├── .dockerignore
├── .github
├── codecov.yml
└── workflows
│ └── ci.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGES.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── docker-compose.yml
├── dockerfiles
├── Dockerfile.gunicorn
└── Dockerfile.uvicorn
├── docs
└── notebooks
│ └── titiler_image_GCPSReader.ipynb
├── pyproject.toml
├── tests
├── __init__.py
├── conftest.py
├── fixtures
│ ├── 67352ccc-d1b0-11e1-89ae-279075081939
│ ├── boston.geojson
│ ├── boston.jpg
│ ├── boston.png
│ ├── boston.tif
│ ├── boston_small.jpg
│ ├── cog_gcps.tif
│ ├── cog_no_gcps.geojson
│ ├── cog_no_gcps.tif
│ └── image.jpg
├── test_health.py
├── test_iiif.py
├── test_local.py
├── test_metadata.py
├── test_reader.py
└── test_utils.py
└── titiler
└── image
├── __init__.py
├── dependencies.py
├── factory.py
├── main.py
├── models.py
├── reader.py
├── resources
└── enums.py
├── settings.py
├── templates
├── iiif.html
└── local.html
└── utils.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Ignore cdk folder
2 | cdk.out
3 | .history
4 | .tox
5 | .git
6 | .vscode
7 |
--------------------------------------------------------------------------------
/.github/codecov.yml:
--------------------------------------------------------------------------------
1 | comment: off
2 |
3 | coverage:
4 | status:
5 | project:
6 | default:
7 | target: auto
8 | threshold: 5
9 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | # On every pull request, but only on push to master
4 | on:
5 | push:
6 | branches:
7 | - main
8 | tags:
9 | - '*'
10 | pull_request:
11 | env:
12 | LATEST_PY_VERSION: '3.10'
13 |
14 | jobs:
15 | tests:
16 | runs-on: ubuntu-latest
17 | strategy:
18 | matrix:
19 | python-version:
20 | - '3.8'
21 | - '3.9'
22 | - '3.10'
23 | - '3.11'
24 |
25 | steps:
26 | - uses: actions/checkout@v3
27 | - name: Set up Python ${{ matrix.python-version }}
28 | uses: actions/setup-python@v4
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 |
32 | - name: Install dependencies
33 | run: |
34 | python -m pip install --upgrade pip
35 | python -m pip install -e .["test"]
36 |
37 | - name: Run pre-commit
38 | if: ${{ matrix.python-version == env.LATEST_PY_VERSION }}
39 | run: |
40 | python -m pip install pre-commit
41 | pre-commit run --all-files
42 |
43 | - name: Run tests
44 | run: python -m pytest --cov titiler.image --cov-report xml --cov-report term-missing --asyncio-mode=strict -s -vv
45 |
46 | - name: Upload Results
47 | if: ${{ matrix.python-version == env.LATEST_PY_VERSION }}
48 | uses: codecov/codecov-action@v1
49 | with:
50 | file: ./coverage.xml
51 | flags: unittests
52 | name: ${{ matrix.python-version }}
53 | fail_ci_if_error: false
54 |
55 | publish:
56 | needs: [tests]
57 | runs-on: ubuntu-latest
58 | if: startsWith(github.event.ref, 'refs/tags') || github.event_name == 'release'
59 | steps:
60 | - uses: actions/checkout@v3
61 | - name: Set up Python
62 | uses: actions/setup-python@v4
63 | with:
64 | python-version: ${{ env.LATEST_PY_VERSION }}
65 |
66 | - name: Install dependencies
67 | run: |
68 | python -m pip install --upgrade pip
69 | python -m pip install wheel twine build
70 | python -m pip install .
71 |
72 | - name: Set tag version
73 | id: tag
74 | run: |
75 | echo "version=${GITHUB_REF#refs/*/}"
76 | echo "version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
77 |
78 | - name: Set module version
79 | id: module
80 | run: |
81 | echo version=$(python -c'import titiler.image; print(titiler.image.__version__)') >> $GITHUB_OUTPUT
82 |
83 |
84 | - name: Build and publish
85 | if: ${{ steps.tag.outputs.version }} == ${{ steps.module.outputs.version}}
86 | env:
87 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
88 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
89 | run: |
90 | rm -rf dist
91 | python -m build
92 | twine upload dist/*
93 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
103 | log-config.yml
104 |
105 | .pgdata/
106 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/abravalheri/validate-pyproject
3 | rev: v0.12.1
4 | hooks:
5 | - id: validate-pyproject
6 |
7 | - repo: https://github.com/psf/black
8 | rev: 22.12.0
9 | hooks:
10 | - id: black
11 | language_version: python
12 |
13 | - repo: https://github.com/PyCQA/isort
14 | rev: 5.12.0
15 | hooks:
16 | - id: isort
17 | language_version: python
18 |
19 | - repo: https://github.com/charliermarsh/ruff-pre-commit
20 | rev: v0.0.238
21 | hooks:
22 | - id: ruff
23 | args: ["--fix"]
24 |
25 | - repo: https://github.com/pre-commit/mirrors-mypy
26 | rev: v1.3.0
27 | hooks:
28 | - id: mypy
29 | language_version: python
30 | exclude: tests/.*
31 | additional_dependencies:
32 | - types-simplejson
33 | - types-attrs
34 | - types-cachetools
35 | - pydantic~=2.0
36 |
--------------------------------------------------------------------------------
/CHANGES.md:
--------------------------------------------------------------------------------
1 | # Release Notes
2 |
3 | ## 0.1.0 (TBD)
4 |
5 | Initial release
6 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Development - Contributing
2 |
3 | Issues and pull requests are more than welcome: https://github.com/developmentseed/titiler-image/issues
4 |
5 | **dev install**
6 |
7 | ```bash
8 | $ git clone https://github.com/developmentseed/titiler-image.git
9 | $ cd titiler-image
10 | $ pip install pre-commit -e .["dev,test"]
11 | ```
12 |
13 | You can then run the tests with the following command:
14 |
15 | ```sh
16 | python -m pytest --cov titiler.image --cov-report term-missing
17 | ```
18 |
19 | **pre-commit**
20 |
21 | This repo is set to use `pre-commit` to run *isort*, *flake8*, *pydocstring*, *black* ("uncompromising Python code formatter") and mypy when committing new code.
22 |
23 | ```bash
24 | $ pre-commit install
25 | ```
26 |
27 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Development Seed
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
TiTiler Image.
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 | ---
22 |
23 | **Source Code**: https://github.com/developmentseed/titiler-image
24 |
25 | ---
26 |
27 | `TiTiler.image` is a [titiler](https://github.com/developmentseed/titiler) extension to work with non-geo images.
28 |
29 | ## Installation
30 |
31 | To install from PyPI and run:
32 |
33 | ```bash
34 | # Make sure to have pip up to date
35 | python -m pip install -U pip
36 |
37 | python -m pip install titiler.image
38 | ```
39 |
40 | To install from sources and run for development:
41 |
42 | ```bash
43 | python -m pip install -e .
44 | ```
45 |
46 | ## Launch
47 |
48 | ```bash
49 | python -m pip install uvicorn
50 | python -m uvicorn titiler.image.main:app --reload
51 | ```
52 |
53 | ### Using Docker
54 |
55 | ```bash
56 | git clone https://github.com/developmentseed/titiler-image.git
57 | cd titiler-pgstac
58 | docker-compose up --build tiler
59 | ```
60 |
61 | It runs `titiler.image` using Gunicorn web server. To run Uvicorn based version:
62 |
63 | ```bash
64 | docker-compose up --build tiler-uvicorn
65 | ```
66 |
67 | ## Factories
68 |
69 | `titiler-image` provide multiple endpoint Factories (see https://developmentseed.org/titiler/advanced/tiler_factories/)
70 |
71 | ### MetadataFactory
72 |
73 | #### Endpoints
74 |
75 | - `/info?url={...}`
76 |
77 | - `/metadata?url={...}`
78 |
79 | ```python
80 | from fastapi import FastAPI
81 | from titiler.image.factory import MetadataFactory
82 |
83 | app = FastAPI()
84 | meta = MetadataFactory()
85 | app.include_router(meta.router)
86 | ```
87 |
88 | ### IIIFFactory
89 |
90 | Specification: https://iiif.io/api/image/3.0/
91 |
92 | #### Endpoints
93 |
94 | - `/{identifier}/info.json`: IIIF Image Information Request
95 |
96 | - `/{identifier}/{region}/{size}/{rotation}/{quality}.{format}`: IIIF Image Request
97 |
98 | - `/{identifier}`: Redirect do the Image Information Request endpoint or return a simple IIIF viewer (based on headers `Accept` value)
99 |
100 |
101 | ```python
102 | from fastapi import FastAPI
103 | from titiler.image.factory import IIIFFactory
104 |
105 | app = FastAPI()
106 | iiif = IIIFFactory()
107 | app.include_router(iiif.router)
108 | ```
109 |
110 | ### LocalTilerFactory
111 |
112 | #### Endpoints
113 |
114 | - `/tilejson.json?url={...}`: TileJSON document
115 |
116 | - `/tiles/{z}/{x}/{y}[@{scale}x.{format}]?url={...}`: Tiles endpoint
117 |
118 | - `/viewer?url={...}`: Simple local tiles viewer
119 |
120 | ```python
121 | from fastapi import FastAPI
122 | from titiler.image.factory import LocalTilerFactory
123 |
124 | app = FastAPI()
125 | local_tiles = LocalTilerFactory()
126 | app.include_router(local_tiles.router)
127 | ```
128 |
129 | ### GeoTilerFactory
130 |
131 | This is a lightweight version of `titiler.core.factory.TilerFactory`.
132 |
133 | #### Endpoints
134 |
135 | - `[/TileMatrixSetId]/tilejson.json?url={...}`: TileJSON document
136 |
137 | - `/tiles[/TileMatrixSetId]/{z}/{x}/{y}[@{scale}x.{format}]?url={...}`: Tiles endpoint
138 |
139 | - `[/{TileMatrixSetId}]/map?url={...}`: Simple dataset viewer
140 |
141 | ```python
142 | from fastapi import FastAPI
143 | from titiler.image.factory import GeoTilerFactory
144 |
145 | app = FastAPI()
146 | geo = GeoTilerFactory()
147 | app.include_router(geo.router)
148 | ```
149 |
150 | ### All together
151 |
152 | ```python
153 | app = FastAPI()
154 |
155 | meta = MetadataFactory()
156 | app.include_router(meta.router, tags=["Metadata"])
157 |
158 | iiif = IIIFFactory(router_prefix="/iiif")
159 | app.include_router(iiif.router, tags=["IIIF"], prefix="/iiif")
160 |
161 | image_tiles = LocalTilerFactory(router_prefix="/image")
162 | app.include_router(image_tiles.router, tags=["Local Tiles"], prefix="/image")
163 |
164 | geo_tiles = GeoTilerFactory(
165 | reader=GCPSReader, reader_dependency=GCPSParams, router_prefix="/geo"
166 | )
167 | app.include_router(geo_tiles.router, tags=["Geo Tiles"], prefix="/geo")
168 | ```
169 |
170 | 
171 |
172 |
173 | ## Contribution & Development
174 |
175 | See [CONTRIBUTING.md](https://github.com//developmentseed/titiler-image/blob/main/CONTRIBUTING.md)
176 |
177 | ## License
178 |
179 | See [LICENSE](https://github.com//developmentseed/titiler-image/blob/main/LICENSE)
180 |
181 | ## Authors
182 |
183 | See [contributors](https://github.com/developmentseed/titiler-image/graphs/contributors) for a listing of individual contributors.
184 |
185 | ## Changes
186 |
187 | See [CHANGES.md](https://github.com/developmentseed/titiler-image/blob/main/CHANGES.md).
188 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | tiler:
5 | container_name: tiler-image
6 | # At the time of writing, rasterio and psycopg wheels are not available for arm64 arch
7 | # so we force the image to be built with linux/amd64
8 | platform: linux/amd64
9 | build:
10 | context: .
11 | dockerfile: dockerfiles/Dockerfile.gunicorn
12 | ports:
13 | - "8081:8081"
14 | environment:
15 | # Application
16 | - HOST=0.0.0.0
17 | - PORT=8081
18 | - WEB_CONCURRENCY=1
19 | - WORKERS_PER_CORE=1
20 | # GDAL Config
21 | # This option controls the default GDAL raster block cache size.
22 | # If its value is small (less than 100000), it is assumed to be measured in megabytes, otherwise in bytes.
23 | - GDAL_CACHEMAX=200
24 | - GDAL_DISABLE_READDIR_ON_OPEN=EMPTY_DIR
25 | - GDAL_INGESTED_BYTES_AT_OPEN=32768
26 | - GDAL_HTTP_MERGE_CONSECUTIVE_RANGES=YES
27 | - GDAL_HTTP_MULTIPLEX=YES
28 | - GDAL_HTTP_VERSION=2
29 | # The file can be cached in RAM by setting the configuration option VSI_CACHE to TRUE.
30 | # The cache size defaults to 25 MB, but can be modified by setting the configuration option VSI_CACHE_SIZE (in bytes).
31 | # Content in that cache is discarded when the file handle is closed.
32 | - VSI_CACHE=TRUE
33 | - VSI_CACHE_SIZE=536870912
34 | # In addition, a global least-recently-used cache of 16 MB shared among all downloaded content is enabled by default,
35 | # and content in it may be reused after a file handle has been closed and reopen,
36 | # during the life-time of the process or until VSICurlClearCache() is called.
37 | # Starting with GDAL 2.3, the size of this global LRU cache can be modified by
38 | # setting the configuration option CPL_VSIL_CURL_CACHE_SIZE (in bytes).
39 | - CPL_VSIL_CURL_CACHE_SIZE=200000000
40 | # AWS S3 endpoint config
41 | # - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
42 | # - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
43 |
44 | tiler-uvicorn:
45 | container_name: tiler-image-uvicorn
46 | # At the time of writing, rasterio and psycopg wheels are not available for arm64 arch
47 | # so we force the image to be built with linux/amd64
48 | platform: linux/amd64
49 | build:
50 | context: .
51 | dockerfile: dockerfiles/Dockerfile.uvicorn
52 | ports:
53 | - "8081:8081"
54 | environment:
55 | # Application
56 | - HOST=0.0.0.0
57 | - PORT=8081
58 | - WEB_CONCURRENCY=1
59 | # This option controls the default GDAL raster block cache size.
60 | # If its value is small (less than 100000), it is assumed to be measured in megabytes, otherwise in bytes.
61 | - GDAL_CACHEMAX=200
62 | - GDAL_DISABLE_READDIR_ON_OPEN=EMPTY_DIR
63 | - GDAL_INGESTED_BYTES_AT_OPEN=32768
64 | - GDAL_HTTP_MERGE_CONSECUTIVE_RANGES=YES
65 | - GDAL_HTTP_MULTIPLEX=YES
66 | - GDAL_HTTP_VERSION=2
67 | # The file can be cached in RAM by setting the configuration option VSI_CACHE to TRUE.
68 | # The cache size defaults to 25 MB, but can be modified by setting the configuration option VSI_CACHE_SIZE (in bytes).
69 | # Content in that cache is discarded when the file handle is closed.
70 | - VSI_CACHE=TRUE
71 | - VSI_CACHE_SIZE=536870912
72 | # In addition, a global least-recently-used cache of 16 MB shared among all downloaded content is enabled by default,
73 | # and content in it may be reused after a file handle has been closed and reopen,
74 | # during the life-time of the process or until VSICurlClearCache() is called.
75 | # Starting with GDAL 2.3, the size of this global LRU cache can be modified by
76 | # setting the configuration option CPL_VSIL_CURL_CACHE_SIZE (in bytes).
77 | - CPL_VSIL_CURL_CACHE_SIZE=200000000
78 | # TiTiler Config
79 | # - RIO_TILER_MAX_THREADS=2
80 | # AWS S3 endpoint config
81 | # - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
82 | # - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
83 |
--------------------------------------------------------------------------------
/dockerfiles/Dockerfile.gunicorn:
--------------------------------------------------------------------------------
1 | ARG PYTHON_VERSION=3.11
2 |
3 | FROM ghcr.io/vincentsarago/uvicorn-gunicorn:${PYTHON_VERSION}
4 |
5 |
6 | ENV CURL_CA_BUNDLE /etc/ssl/certs/ca-certificates.crt
7 |
8 | WORKDIR /tmp
9 |
10 | COPY titiler/ titiler/
11 | COPY pyproject.toml pyproject.toml
12 | COPY LICENSE LICENSE
13 | COPY README.md README.md
14 |
15 | RUN pip install --no-cache-dir --upgrade .
16 | RUN rm -rf titiler/ pyproject.toml README.md LICENSE
17 |
18 | ENV MODULE_NAME titiler.image.main
19 | ENV VARIABLE_NAME app
20 |
--------------------------------------------------------------------------------
/dockerfiles/Dockerfile.uvicorn:
--------------------------------------------------------------------------------
1 | ARG PYTHON_VERSION=3.11
2 |
3 | FROM python:${PYTHON_VERSION}-slim
4 |
5 | WORKDIR /tmp
6 |
7 | COPY titiler/ titiler/
8 | COPY pyproject.toml pyproject.toml
9 | COPY LICENSE LICENSE
10 | COPY README.md README.md
11 |
12 | RUN pip install --no-cache-dir --upgrade . uvicorn
13 | RUN rm -rf titiler/ pyproject.toml README.md LICENSE
14 |
15 | # http://www.uvicorn.org/settings/
16 | ENV HOST 0.0.0.0
17 | ENV PORT 80
18 | CMD uvicorn titiler.image.main:app --host ${HOST} --port ${PORT}
19 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "titiler.image"
3 | description = "TiTiler extension to work with non-geo images."
4 | readme = "README.md"
5 | requires-python = ">=3.8"
6 | authors = [
7 | {name = "Vincent Sarago", email = "vincent@developmentseed.com"},
8 | ]
9 | license = {text = "MIT"}
10 | keywords = [
11 | "TiTiler",
12 | "IIIF",
13 | "DeepZoom",
14 | "Fastapi",
15 | ]
16 | classifiers = [
17 | "Intended Audience :: Information Technology",
18 | "Intended Audience :: Science/Research",
19 | "License :: OSI Approved :: MIT License",
20 | "Programming Language :: Python :: 3.8",
21 | "Programming Language :: Python :: 3.9",
22 | "Programming Language :: Python :: 3.10",
23 | "Programming Language :: Python :: 3.11",
24 | "Topic :: Scientific/Engineering :: GIS",
25 | ]
26 | dynamic = ["version"]
27 | dependencies = [
28 | "titiler.core>=0.13.0,<0.14",
29 | "starlette>=0.27.0,<0.28",
30 | "starlette-cramjam>=0.3,<0.4",
31 | "pydantic_settings~=2.0",
32 | ]
33 |
34 | [project.optional-dependencies]
35 | test = [
36 | "pytest",
37 | "pytest-cov",
38 | "pytest-asyncio",
39 | "httpx",
40 | # "iiif-validator",
41 | ]
42 | dev = [
43 | "pre-commit"
44 | ]
45 |
46 | [project.urls]
47 | Homepage = "https://github.com/developmentseed/titiler-image"
48 | Issues = "https://github.com/developmentseed/titiler-image/issues"
49 | Source = "https://github.com/developmentseed/titiler-image"
50 |
51 | [tool.coverage.run]
52 | branch = true
53 | parallel = true
54 |
55 | [tool.coverage.report]
56 | exclude_lines = [
57 | "no cov",
58 | "if __name__ == .__main__.:",
59 | "if TYPE_CHECKING:",
60 | ]
61 |
62 | [tool.isort]
63 | profile = "black"
64 | known_first_party = ["titiler"]
65 | default_section = "THIRDPARTY"
66 |
67 | [tool.ruff]
68 | select = [
69 | "D1", # pydocstyle errors
70 | "E", # pycodestyle errors
71 | "W", # pycodestyle warnings
72 | "F", # flake8
73 | "C", # flake8-comprehensions
74 | "B", # flake8-bugbear
75 | ]
76 | ignore = [
77 | "E501", # line too long, handled by black
78 | "B008", # do not perform function calls in argument defaults
79 | "B905", # ignore zip() without an explicit strict= parameter, only support with python >3.10
80 | ]
81 |
82 | [tool.mypy]
83 | no_implicit_optional = true
84 | strict_optional = true
85 | namespace_packages = true
86 | explicit_package_bases = true
87 |
88 | [build-system]
89 | requires = ["pdm-pep517"]
90 | build-backend = "pdm.pep517.api"
91 |
92 | [tool.pdm.version]
93 | source = "file"
94 | path = "titiler/image/__init__.py"
95 |
96 | [tool.pdm.build]
97 | includes = ["titiler/image"]
98 | excludes = ["tests/", "**/.mypy_cache", "**/.DS_Store"]
99 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | """titiler.pgstac tests."""
2 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | """``pytest`` configuration."""
2 |
3 | import warnings
4 | from typing import Any, Dict
5 |
6 | import pytest
7 | from rasterio.errors import NotGeoreferencedWarning
8 | from rasterio.io import MemoryFile
9 | from starlette.testclient import TestClient
10 |
11 |
12 | def parse_img(content: bytes) -> Dict[Any, Any]:
13 | """Read tile image and return metadata."""
14 | with warnings.catch_warnings():
15 | warnings.filterwarnings(
16 | "ignore",
17 | category=NotGeoreferencedWarning,
18 | module="rasterio",
19 | )
20 | with MemoryFile(content) as mem:
21 | with mem.open() as dst:
22 | return dst.profile
23 |
24 |
25 | @pytest.fixture(autouse=True)
26 | def app(monkeypatch):
27 | """Create app."""
28 | monkeypatch.setenv("TITILER_IMAGE_IIIF_MAX_WIDTH", "2000")
29 |
30 | from titiler.image.main import app
31 |
32 | with TestClient(app) as app:
33 | yield app
34 |
--------------------------------------------------------------------------------
/tests/fixtures/67352ccc-d1b0-11e1-89ae-279075081939:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/67352ccc-d1b0-11e1-89ae-279075081939
--------------------------------------------------------------------------------
/tests/fixtures/boston.geojson:
--------------------------------------------------------------------------------
1 | {
2 | "type": "FeatureCollection",
3 | "features": [
4 | {
5 | "type": "Feature",
6 | "properties": {
7 | "resourceCoords": [
8 | 3011,
9 | 1187
10 | ]
11 | },
12 | "geometry": {
13 | "type": "Point",
14 | "coordinates": [
15 | -71.11313846819577,
16 | 42.31750853403672
17 | ]
18 | }
19 | },
20 | {
21 | "type": "Feature",
22 | "properties": {
23 | "resourceCoords": [
24 | 5241,
25 | 5053
26 | ]
27 | },
28 | "geometry": {
29 | "type": "Point",
30 | "coordinates": [
31 | -71.11414794574848,
32 | 42.31229871703916
33 | ]
34 | }
35 | },
36 | {
37 | "type": "Feature",
38 | "properties": {
39 | "resourceCoords": [
40 | 5615,
41 | 351
42 | ]
43 | },
44 | "geometry": {
45 | "type": "Point",
46 | "coordinates": [
47 | -71.10904954715107,
48 | 42.31648965975142
49 | ]
50 | }
51 | },
52 | {
53 | "type": "Feature",
54 | "properties": {
55 | "resourceCoords": [
56 | 7012,
57 | 3894
58 | ]
59 | },
60 | "geometry": {
61 | "type": "Point",
62 | "coordinates": [
63 | -71.11068726339353,
64 | 42.312086619361835
65 | ]
66 | }
67 | }
68 | ]
69 | }
70 |
--------------------------------------------------------------------------------
/tests/fixtures/boston.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/boston.jpg
--------------------------------------------------------------------------------
/tests/fixtures/boston.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/boston.png
--------------------------------------------------------------------------------
/tests/fixtures/boston.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/boston.tif
--------------------------------------------------------------------------------
/tests/fixtures/boston_small.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/boston_small.jpg
--------------------------------------------------------------------------------
/tests/fixtures/cog_gcps.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/cog_gcps.tif
--------------------------------------------------------------------------------
/tests/fixtures/cog_no_gcps.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/cog_no_gcps.tif
--------------------------------------------------------------------------------
/tests/fixtures/image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developmentseed/titiler-image/f2139c5f8d02ade9acfc8a29f5909251e2a7ab5c/tests/fixtures/image.jpg
--------------------------------------------------------------------------------
/tests/test_health.py:
--------------------------------------------------------------------------------
1 | """Test healtz endpoint."""
2 |
3 |
4 | def test_health(app):
5 | """test health endpoint."""
6 | response = app.get("/healthz")
7 | assert response.status_code == 200
8 |
9 |
10 | def test_docs(app):
11 | """test docs endpoints."""
12 | response = app.get("/api")
13 | assert response.status_code == 200
14 |
15 | response = app.get("/api.html")
16 | assert response.status_code == 200
17 |
--------------------------------------------------------------------------------
/tests/test_iiif.py:
--------------------------------------------------------------------------------
1 | """Test titiler.image IIIF endpoints."""
2 |
3 | import os
4 | import urllib
5 |
6 | from .conftest import parse_img
7 |
8 | PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
9 |
10 | boston_jpeg = os.path.join(PREFIX, "boston_small.jpg")
11 |
12 |
13 | def test_iiif_information_endpoints(app):
14 | """Test info endpoints."""
15 | identifier = urllib.parse.quote_plus(boston_jpeg, safe="")
16 |
17 | # # Make sure we got redirected
18 | # response = app.get(
19 | # f"/iiif/{identifier}",
20 | # follow_redirects=True,
21 | # headers={"accept": "application/json"},
22 | # )
23 | # assert response.history
24 | # assert response.status_code == 200
25 | # assert response.headers["content-type"] == "application/json"
26 | # bodyr = response.json()
27 | # assert bodyr["id"] == response.history[0].url
28 |
29 | response = app.get(f"/iiif/{identifier}/info.json")
30 | assert response.status_code == 200
31 | assert response.headers["content-type"] == "application/json"
32 | body = response.json()
33 | assert body["type"] == "ImageService3"
34 | assert body["width"] == 1000
35 | assert body["height"] == 695
36 |
37 | response = app.get(
38 | f"/iiif/{identifier}/info.json",
39 | headers={"accept": "application/ld+json"},
40 | )
41 | assert response.status_code == 200
42 | assert (
43 | response.headers["content-type"]
44 | == 'application/ld+json;profile="http://iiif.io/api/image/3/context.json"'
45 | )
46 | body = response.json()
47 | assert body["type"] == "ImageService3"
48 | assert body["width"] == 1000
49 | assert body["height"] == 695
50 |
51 |
52 | def test_iiif_image_endpoint(app):
53 | """Test image endpoints."""
54 | identifier = urllib.parse.quote_plus(boston_jpeg, safe="")
55 |
56 | ###########################################################################
57 | # REGION
58 | # region=full
59 | response = app.get(f"/iiif/{identifier}/full/max/0/default.jpg")
60 | assert response.status_code == 200
61 | assert response.headers["content-type"] == "image/jpg"
62 | meta = parse_img(response.content)
63 | assert meta["width"] == 1000
64 | assert meta["height"] == 695
65 | assert meta["count"] == 3
66 | assert meta["driver"] == "JPEG"
67 |
68 | # region=square
69 | response = app.get(f"/iiif/{identifier}/square/max/0/default.jpg")
70 | assert response.status_code == 200
71 | assert response.headers["content-type"] == "image/jpg"
72 | meta = parse_img(response.content)
73 | assert meta["width"] == meta["height"]
74 |
75 | # region=x,y,w,h
76 | response = app.get(f"/iiif/{identifier}/0,0,10,20/max/0/default.jpg")
77 | assert response.status_code == 200
78 | assert response.headers["content-type"] == "image/jpg"
79 | meta = parse_img(response.content)
80 | assert meta["width"] == 10
81 | assert meta["height"] == 20
82 |
83 | # region=extends beyond
84 | response = app.get(f"/iiif/{identifier}/0,0,1005,700/max/0/default.jpg")
85 | assert response.status_code == 200
86 | assert response.headers["content-type"] == "image/jpg"
87 | meta = parse_img(response.content)
88 | assert meta["width"] == 1000
89 | assert meta["height"] == 695
90 | assert meta["count"] == 3
91 | assert meta["driver"] == "JPEG"
92 |
93 | # region=pct:x,y,w,h
94 | response = app.get(f"/iiif/{identifier}/pct:10,10,10,10/max/0/default.jpg")
95 | assert response.status_code == 200
96 | assert response.headers["content-type"] == "image/jpg"
97 | meta = parse_img(response.content)
98 | assert meta["width"] == 100
99 | assert meta["height"] == 70
100 |
101 | # region=Invalid
102 | response = app.get(f"/iiif/{identifier}/yo/max/0/default.jpg")
103 | assert response.status_code == 400
104 |
105 | # region=Invalid
106 | response = app.get(f"/iiif/{identifier}/pct:105,100,100,100/max/0/default.jpg")
107 | assert response.status_code == 400
108 |
109 | # invalid region
110 | response = app.get(f"/iiif/{identifier}/0,1000,100,100/max/0/default.jpg")
111 | assert response.status_code == 400
112 |
113 | ###########################################################################
114 | # FORMAT
115 | # format=png
116 | response = app.get(f"/iiif/{identifier}/full/max/0/default.png")
117 | assert response.status_code == 200
118 | assert response.headers["content-type"] == "image/png"
119 | meta = parse_img(response.content)
120 | assert meta["count"] == 4
121 | assert meta["driver"] == "PNG"
122 |
123 | ###########################################################################
124 | # ROTATION
125 | # rotation=90
126 | response = app.get(f"/iiif/{identifier}/full/max/90/default.jpg")
127 | assert response.status_code == 200
128 | assert response.headers["content-type"] == "image/jpg"
129 | meta = parse_img(response.content)
130 | assert meta["width"] == 695
131 | assert meta["height"] == 1000
132 |
133 | # rotation=180
134 | response = app.get(f"/iiif/{identifier}/full/max/180/default.jpg")
135 | assert response.status_code == 200
136 | assert response.headers["content-type"] == "image/jpg"
137 | meta = parse_img(response.content)
138 | assert meta["width"] == 1000
139 | assert meta["height"] == 695
140 |
141 | # rotation=-90
142 | response = app.get(f"/iiif/{identifier}/full/max/!90/default.jpg")
143 | assert response.status_code == 200
144 | assert response.headers["content-type"] == "image/jpg"
145 | meta = parse_img(response.content)
146 | assert meta["width"] == 695
147 | assert meta["height"] == 1000
148 |
149 | # rotation=invalid
150 | response = app.get(f"/iiif/{identifier}/full/max/!900/default.jpg")
151 | assert response.status_code == 400
152 |
153 | ###########################################################################
154 | # QUALITY
155 | response = app.get(f"/iiif/{identifier}/full/max/0/gray.jpg")
156 | assert response.status_code == 200
157 | assert response.headers["content-type"] == "image/jpg"
158 | meta = parse_img(response.content)
159 | assert meta["width"] == 1000
160 | assert meta["height"] == 695
161 | assert meta["count"] == 1
162 |
163 | response = app.get(f"/iiif/{identifier}/full/max/0/bitonal.jpg")
164 | assert response.status_code == 200
165 | assert response.headers["content-type"] == "image/jpg"
166 | meta = parse_img(response.content)
167 | assert meta["width"] == 1000
168 | assert meta["height"] == 695
169 | assert meta["count"] == 1
170 |
171 | response = app.get(f"/iiif/{identifier}/full/max/0/color.jpg")
172 | assert response.status_code == 200
173 | assert response.headers["content-type"] == "image/jpg"
174 | meta = parse_img(response.content)
175 | assert meta["width"] == 1000
176 | assert meta["height"] == 695
177 | assert meta["count"] == 3
178 |
179 | ###########################################################################
180 | # SIZE
181 | # size: ^max (upscale to server maxwidth: 2000)
182 | response = app.get(f"/iiif/{identifier}/full/^max/0/default.jpg")
183 | assert response.status_code == 200
184 | assert response.headers["content-type"] == "image/jpg"
185 | meta = parse_img(response.content)
186 | assert meta["width"] == 2000
187 | assert meta["height"] == 1390
188 |
189 | # size: pct:n
190 | response = app.get(f"/iiif/{identifier}/full/pct:50/0/default.jpg")
191 | assert response.status_code == 200
192 | assert response.headers["content-type"] == "image/jpg"
193 | meta = parse_img(response.content)
194 | assert meta["width"] == 500
195 | assert meta["height"] == 348
196 |
197 | # size: pct invalid
198 | response = app.get(f"/iiif/{identifier}/full/pct:-50/0/default.jpg")
199 | assert response.status_code == 400
200 |
201 | response = app.get(f"/iiif/{identifier}/full/^pct:-50/0/default.jpg")
202 | assert response.status_code == 400
203 |
204 | # do not allow upscale without ^
205 | response = app.get(f"/iiif/{identifier}/full/pct:150/0/default.jpg")
206 | assert response.status_code == 400
207 |
208 | # size: ^pct:n
209 | response = app.get(f"/iiif/{identifier}/full/^pct:150/0/default.jpg")
210 | assert response.status_code == 200
211 | assert response.headers["content-type"] == "image/jpg"
212 | meta = parse_img(response.content)
213 | assert meta["width"] == 1500
214 | assert meta["height"] == 1042
215 |
216 | # size: ^pct:n but limit to server limit
217 | response = app.get(f"/iiif/{identifier}/full/^pct:300/0/default.jpg")
218 | assert response.status_code == 200
219 | assert response.headers["content-type"] == "image/jpg"
220 | meta = parse_img(response.content)
221 | assert meta["width"] == 2000
222 | assert meta["height"] == 1390
223 |
224 | # size: w,
225 | response = app.get(f"/iiif/{identifier}/full/500,/0/default.jpg")
226 | assert response.status_code == 200
227 | assert response.headers["content-type"] == "image/jpg"
228 | meta = parse_img(response.content)
229 | assert meta["width"] == 500
230 | assert meta["height"] == 348
231 |
232 | # Do not allow upscale
233 | response = app.get(f"/iiif/{identifier}/full/1500,/0/default.jpg")
234 | assert response.status_code == 400
235 |
236 | # size: ^w,
237 | response = app.get(f"/iiif/{identifier}/full/^1500,/0/default.jpg")
238 | assert response.status_code == 200
239 | assert response.headers["content-type"] == "image/jpg"
240 | meta = parse_img(response.content)
241 | assert meta["width"] == 1500
242 | assert meta["height"] == 1042
243 |
244 | # size: ,h
245 | response = app.get(f"/iiif/{identifier}/full/,348/0/default.jpg")
246 | assert response.status_code == 200
247 | assert response.headers["content-type"] == "image/jpg"
248 | meta = parse_img(response.content)
249 | assert meta["width"] == 501
250 | assert meta["height"] == 348
251 |
252 | # Do not allow upscale
253 | response = app.get(f"/iiif/{identifier}/full/,1042/0/default.jpg")
254 | assert response.status_code == 400
255 |
256 | # size: ^,h
257 | response = app.get(f"/iiif/{identifier}/full/^,1042/0/default.jpg")
258 | assert response.status_code == 200
259 | assert response.headers["content-type"] == "image/jpg"
260 | meta = parse_img(response.content)
261 | assert meta["width"] == 1499
262 | assert meta["height"] == 1042
263 |
264 | # size: w,h
265 | response = app.get(f"/iiif/{identifier}/full/100,50/0/default.jpg")
266 | assert response.status_code == 200
267 | assert response.headers["content-type"] == "image/jpg"
268 | meta = parse_img(response.content)
269 | assert meta["width"] == 100
270 | assert meta["height"] == 50
271 |
272 | # Do not allow upscale
273 | response = app.get(f"/iiif/{identifier}/full/1500,1000/0/default.jpg")
274 | assert response.status_code == 400
275 |
276 | # size: ^w,h
277 | response = app.get(f"/iiif/{identifier}/full/^1500,1000/0/default.jpg")
278 | assert response.status_code == 200
279 | assert response.headers["content-type"] == "image/jpg"
280 | meta = parse_img(response.content)
281 | assert meta["width"] == 1500
282 | assert meta["height"] == 1000
283 |
284 | # size: !w,h (maintain aspect ratio)
285 | response = app.get(f"/iiif/{identifier}/full/!750,800/0/default.jpg")
286 | assert response.status_code == 200
287 | assert response.headers["content-type"] == "image/jpg"
288 | meta = parse_img(response.content)
289 | assert meta["width"] == 750
290 | assert meta["height"] == 521
291 |
292 | # Do not allow upscale
293 | response = app.get(f"/iiif/{identifier}/full/!1500,800/0/default.jpg")
294 | assert response.status_code == 400
295 |
296 | # size: ^!w,h (maintain aspect ratio)
297 | response = app.get(f"/iiif/{identifier}/full/^!1500,800/0/default.jpg")
298 | assert response.status_code == 200
299 | assert response.headers["content-type"] == "image/jpg"
300 | meta = parse_img(response.content)
301 | assert meta["width"] == 1500
302 | assert meta["height"] == 1042
303 |
304 | # size: invalid
305 | response = app.get(f"/iiif/{identifier}/full/^!0,0/0/default.jpg")
306 | assert response.status_code == 400
307 |
308 | response = app.get(f"/iiif/{identifier}/full/0,0/0/default.jpg")
309 | assert response.status_code == 400
310 |
311 | response = app.get(f"/iiif/{identifier}/full/^0,0/0/default.jpg")
312 | assert response.status_code == 400
313 |
314 | response = app.get(f"/iiif/{identifier}/full/0,/0/default.jpg")
315 | assert response.status_code == 400
316 |
317 | response = app.get(f"/iiif/{identifier}/full/^0,/0/default.jpg")
318 | assert response.status_code == 400
319 |
320 | response = app.get(f"/iiif/{identifier}/full/^,0/0/default.jpg")
321 | assert response.status_code == 400
322 |
323 | response = app.get(f"/iiif/{identifier}/full/,0/0/default.jpg")
324 | assert response.status_code == 400
325 |
--------------------------------------------------------------------------------
/tests/test_local.py:
--------------------------------------------------------------------------------
1 | """test Local Tiler Factory endpoints."""
2 |
3 | import os
4 |
5 | from .conftest import parse_img
6 |
7 | PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
8 |
9 | boston_jpeg = os.path.join(PREFIX, "boston.jpg")
10 | cog_gcps = os.path.join(PREFIX, "cog_gcps.tif")
11 |
12 |
13 | def test_tilejson(app):
14 | """test tilejson endpoint."""
15 | response = app.get("/image/tilejson.json", params={"url": boston_jpeg})
16 | assert response.status_code == 200
17 | assert response.headers["content-type"] == "application/json"
18 | body = response.json()
19 | tiles = body.pop("tiles")
20 | expected = {
21 | "tilejson": "2.2.0",
22 | "version": "1.0.0",
23 | "scheme": "xyz",
24 | "minzoom": 0,
25 | "maxzoom": 5,
26 | "bounds": [0.0, 5352.0, 7696.0, 0.0],
27 | "center": [3848.0, 2676.0, 0],
28 | }
29 | assert body == expected
30 | assert tiles[0].startswith("http://testserver/image/tiles/{z}/{x}/{y}?")
31 |
32 | response = app.get("/image/tilejson.json", params={"url": cog_gcps})
33 | assert response.status_code == 200
34 | assert response.headers["content-type"] == "application/json"
35 | body = response.json()
36 | tiles = body.pop("tiles")
37 | expected = {
38 | "tilejson": "2.2.0",
39 | "version": "1.0.0",
40 | "scheme": "xyz",
41 | "minzoom": 0,
42 | "maxzoom": 2,
43 | "bounds": [0.0, 837.0, 1280.0, 0.0],
44 | "center": [640.0, 418.5, 0],
45 | }
46 | assert body == expected
47 | assert tiles[0].startswith("http://testserver/image/tiles/{z}/{x}/{y}?")
48 |
49 | response = app.get(
50 | "/image/tilejson.json",
51 | params={
52 | "url": cog_gcps,
53 | "minzoom": 2,
54 | "maxzoom": 4,
55 | "rescale": "0,700",
56 | "tile_format": "png",
57 | "tile_scale": 2,
58 | },
59 | )
60 | assert response.status_code == 200
61 | assert response.headers["content-type"] == "application/json"
62 | body = response.json()
63 | tiles = body.pop("tiles")
64 | expected = {
65 | "tilejson": "2.2.0",
66 | "version": "1.0.0",
67 | "scheme": "xyz",
68 | "minzoom": 2,
69 | "maxzoom": 4,
70 | "bounds": [0.0, 837.0, 1280.0, 0.0],
71 | "center": [640.0, 418.5, 2],
72 | }
73 | assert body == expected
74 | assert tiles[0].startswith("http://testserver/image/tiles/{z}/{x}/{y}@2x.png?")
75 | assert "rescale=0%2C700" in tiles[0]
76 |
77 |
78 | def test_tiles(app):
79 | """test local tiles endpoint."""
80 | response = app.get("/image/tiles/0/0/0", params={"url": boston_jpeg})
81 | assert response.status_code == 200
82 | assert response.headers["content-type"] == "image/png"
83 | meta = parse_img(response.content)
84 | assert meta["width"] == 256
85 | assert meta["height"] == 256
86 |
87 | response = app.get(
88 | "/image/tiles/0/0/0@2x.jpg", params={"url": cog_gcps, "rescale": "0,700"}
89 | )
90 | assert response.status_code == 200
91 | assert response.headers["content-type"] == "image/jpg"
92 | meta = parse_img(response.content)
93 | assert meta["width"] == 512
94 | assert meta["height"] == 512
95 |
--------------------------------------------------------------------------------
/tests/test_metadata.py:
--------------------------------------------------------------------------------
1 | """test metadata Factory endpoints."""
2 |
3 | import os
4 |
5 | PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
6 |
7 | boston_jpeg = os.path.join(PREFIX, "boston.jpg")
8 | cog_gcps = os.path.join(PREFIX, "cog_gcps.tif")
9 |
10 |
11 | def test_info(app):
12 | """test /info endpoint."""
13 | response = app.get("/info", params={"url": boston_jpeg})
14 | assert response.status_code == 200
15 | assert response.headers["content-type"] == "application/json"
16 | body = response.json()
17 | expected = {
18 | "bounds": [0, 5352, 7696, 0],
19 | "minzoom": 0,
20 | "maxzoom": 5,
21 | "band_metadata": [["b1", {}], ["b2", {}], ["b3", {}]],
22 | "band_descriptions": [["b1", ""], ["b2", ""], ["b3", ""]],
23 | "dtype": "uint8",
24 | "nodata_type": "None",
25 | "colorinterp": ["red", "green", "blue"],
26 | "driver": "JPEG",
27 | "count": 3,
28 | "width": 7696,
29 | "height": 5352,
30 | "overviews": [2, 4, 8],
31 | }
32 | assert body == expected
33 |
34 | response = app.get("/info", params={"url": cog_gcps})
35 | assert response.status_code == 200
36 | assert response.headers["content-type"] == "application/json"
37 | body = response.json()
38 | expected = {
39 | "bounds": [0, 837, 1280, 0],
40 | "minzoom": 0,
41 | "maxzoom": 2,
42 | "band_metadata": [["b1", {}]],
43 | "band_descriptions": [["b1", ""]],
44 | "dtype": "uint16",
45 | "nodata_type": "None",
46 | "colorinterp": ["gray"],
47 | "driver": "GTiff",
48 | "count": 1,
49 | "width": 1280,
50 | "height": 837,
51 | "overviews": [2, 4, 8],
52 | }
53 | assert body == expected
54 |
55 |
56 | def test_statistics(app):
57 | """test /statistics endpoint."""
58 | response = app.get("/statistics", params={"url": boston_jpeg})
59 | assert response.status_code == 200
60 | assert response.headers["content-type"] == "application/json"
61 | body = response.json()
62 | expected = {
63 | "b1": {
64 | "min": 0.0,
65 | "max": 255.0,
66 | "mean": 220.9768487574509,
67 | "count": 730112.0,
68 | "sum": 161337849.0,
69 | "std": 40.5618108485455,
70 | "median": 234.0,
71 | "majority": 237.0,
72 | "minority": 248.0,
73 | "unique": 256.0,
74 | "histogram": [
75 | [16434, 1237, 1655, 2135, 3594, 6181, 16975, 44742, 143524, 493635],
76 | [
77 | 0.0,
78 | 25.5,
79 | 51.0,
80 | 76.5,
81 | 102.0,
82 | 127.5,
83 | 153.0,
84 | 178.5,
85 | 204.0,
86 | 229.5,
87 | 255.0,
88 | ],
89 | ],
90 | "valid_percent": 100.0,
91 | "masked_pixels": 0.0,
92 | "valid_pixels": 730112.0,
93 | "percentile_2": 6.0,
94 | "percentile_98": 241.0,
95 | },
96 | "b2": {
97 | "min": 0.0,
98 | "max": 236.0,
99 | "mean": 208.6166697712132,
100 | "count": 730112.0,
101 | "sum": 152313534.0,
102 | "std": 38.8684106948778,
103 | "median": 221.0,
104 | "majority": 227.0,
105 | "minority": 236.0,
106 | "unique": 237.0,
107 | "histogram": [
108 | [16567, 1221, 1418, 1916, 2693, 5897, 17909, 45820, 117856, 518815],
109 | [
110 | 0.0,
111 | 23.6,
112 | 47.2,
113 | 70.80000000000001,
114 | 94.4,
115 | 118.0,
116 | 141.60000000000002,
117 | 165.20000000000002,
118 | 188.8,
119 | 212.4,
120 | 236.0,
121 | ],
122 | ],
123 | "valid_percent": 100.0,
124 | "masked_pixels": 0.0,
125 | "valid_pixels": 730112.0,
126 | "percentile_2": 4.0,
127 | "percentile_98": 231.0,
128 | },
129 | "b3": {
130 | "min": 0.0,
131 | "max": 226.0,
132 | "mean": 190.8284715221774,
133 | "count": 730112.0,
134 | "sum": 139326157.0,
135 | "std": 38.205110269611204,
136 | "median": 205.0,
137 | "majority": 211.0,
138 | "minority": 226.0,
139 | "unique": 227.0,
140 | "histogram": [
141 | [16882, 1275, 1427, 2638, 4872, 13681, 40667, 71072, 193921, 383677],
142 | [
143 | 0.0,
144 | 22.6,
145 | 45.2,
146 | 67.80000000000001,
147 | 90.4,
148 | 113.0,
149 | 135.60000000000002,
150 | 158.20000000000002,
151 | 180.8,
152 | 203.4,
153 | 226.0,
154 | ],
155 | ],
156 | "valid_percent": 100.0,
157 | "masked_pixels": 0.0,
158 | "valid_pixels": 730112.0,
159 | "percentile_2": 5.0,
160 | "percentile_98": 218.0,
161 | },
162 | }
163 | assert body == expected
164 |
165 | response = app.get(
166 | "/statistics",
167 | params={
168 | "url": boston_jpeg,
169 | "bidx": 1,
170 | "histogram_bins": 5,
171 | "histogram_range": "0,100",
172 | },
173 | )
174 | assert response.status_code == 200
175 | assert response.headers["content-type"] == "application/json"
176 | body = response.json()
177 | expected = {
178 | "b1": {
179 | "min": 0.0,
180 | "max": 255.0,
181 | "mean": 220.9768487574509,
182 | "count": 730112.0,
183 | "sum": 161337849.0,
184 | "std": 40.5618108485455,
185 | "median": 234.0,
186 | "majority": 237.0,
187 | "minority": 248.0,
188 | "unique": 256.0,
189 | "histogram": [
190 | [16216, 864, 1247, 1163, 1875],
191 | [0.0, 20.0, 40.0, 60.0, 80.0, 100.0],
192 | ],
193 | "valid_percent": 100.0,
194 | "masked_pixels": 0.0,
195 | "valid_pixels": 730112.0,
196 | "percentile_2": 6.0,
197 | "percentile_98": 241.0,
198 | }
199 | }
200 | assert body == expected
201 |
--------------------------------------------------------------------------------
/tests/test_reader.py:
--------------------------------------------------------------------------------
1 | """test titiler.image custom Reader."""
2 |
3 | import json
4 | import os
5 | from typing import List
6 |
7 | import numpy
8 | import rasterio
9 | from rasterio.control import GroundControlPoint
10 |
11 | from titiler.image.reader import Reader
12 |
13 | PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
14 |
15 | boston_jpeg = os.path.join(PREFIX, "boston.jpg")
16 | boston_png = os.path.join(PREFIX, "boston.png")
17 | boston_tif = os.path.join(PREFIX, "boston.tif")
18 | boston_geojson = os.path.join(PREFIX, "boston.geojson")
19 | cog_geojson = os.path.join(PREFIX, "cog_no_gcps.geojson")
20 | cog_no_gcps = os.path.join(PREFIX, "cog_no_gcps.tif")
21 | cog_gcps = os.path.join(PREFIX, "cog_gcps.tif")
22 |
23 |
24 | def get_gcps(path: str) -> List[GroundControlPoint]:
25 | """read GCPS geojson."""
26 | with open(path, "r") as f:
27 | geojson = json.loads(f.read())
28 | return [
29 | # GroundControlPoint(row, col, x, y, z)
30 | # https://github.com/allmaps/iiif-api/blob/georef/source/extension/georef/index.md#35-the-resourcecoords-property
31 | GroundControlPoint(
32 | f["properties"]["resourceCoords"][1], # row = y
33 | f["properties"]["resourceCoords"][0], # col = x
34 | *f["geometry"]["coordinates"], # lon, lat, z
35 | )
36 | for f in geojson["features"]
37 | ]
38 |
39 |
40 | def test_reader_gcps():
41 | """Make sure that Reader can use COG with internal GCPS (as rio_tiler.io.Reader)."""
42 | with rasterio.open(cog_gcps) as dst:
43 | with Reader(cog_gcps) as src:
44 | assert dst.meta != src.dataset.meta
45 | assert src.crs == "epsg:4326"
46 |
47 | info = src.info()
48 | assert info.nodata_type == "Alpha"
49 | assert len(info.band_metadata) == 2
50 | assert info.band_descriptions == [("b1", ""), ("b2", "")]
51 | assert info.colorinterp == ["gray", "alpha"]
52 | assert info.count == 2
53 | assert info.width == 1417
54 | assert info.height == 1071
55 |
56 | # The topleft corner should be masked
57 | assert src.preview(indexes=1).array.mask[0, 0, 0]
58 |
59 |
60 | def test_reader_external_gcps():
61 | """Make sure that Reader can use COG with external GCPS."""
62 | with rasterio.open(cog_gcps) as dst:
63 | with Reader(cog_no_gcps, gcps=get_gcps(cog_geojson)) as src:
64 | assert dst.meta != src.dataset.meta
65 | assert src.crs == "epsg:4326"
66 | info = src.info()
67 | assert info.nodata_type == "Alpha"
68 | assert len(info.band_metadata) == 2
69 | assert info.band_descriptions == [("b1", ""), ("b2", "")]
70 | assert info.colorinterp == ["gray", "alpha"]
71 | assert info.count == 2
72 | assert info.width == 1417
73 | assert info.height == 1071
74 |
75 | # The topleft corner should be masked
76 | img = src.preview(indexes=1)
77 | assert img.array.mask[0, 0, 0]
78 |
79 | # Validated that gcps_order has influence
80 | with Reader(cog_no_gcps, gcps=get_gcps(cog_geojson), gcps_order=-1) as src:
81 | img2 = src.preview(indexes=1)
82 |
83 | with numpy.testing.assert_raises(AssertionError):
84 | numpy.testing.assert_array_equal(img2.array.data, img.array.data)
85 |
86 |
87 | def test_reader_internal_external_gcps():
88 | """Make sure we get the same result with internal/external GCPS."""
89 | with Reader(cog_gcps) as src_gcps:
90 | with Reader(cog_no_gcps, gcps=get_gcps(cog_geojson)) as src_no_gcps:
91 | assert src_gcps.dataset.meta == src_no_gcps.dataset.meta
92 |
93 |
94 | def test_reader_cutline():
95 | """Make sure cutline is applied."""
96 | with Reader(boston_tif, gcps=get_gcps(boston_geojson)) as src:
97 | im = src.tile(79285, 97003, 18)
98 | assert not im.array.mask[0, 190, 68]
99 |
100 | with rasterio.open(boston_tif) as src:
101 | w = src.width
102 | h = src.height
103 |
104 | # buffer
105 | b = 100
106 | cutline = (
107 | f"POLYGON (({b} {b}, {w - b} {b}, {w - b} {h - b}, {b} {h - b}, {b} {b}))"
108 | )
109 |
110 | with Reader(
111 | boston_tif,
112 | gcps=get_gcps(boston_geojson),
113 | cutline=cutline,
114 | ) as src:
115 | im_cut = src.tile(79285, 97003, 18)
116 | assert im_cut.array.mask[0, 190, 68]
117 |
118 | with numpy.testing.assert_raises(AssertionError):
119 | numpy.testing.assert_array_equal(im_cut.array.mask, im.array.mask)
120 |
121 | with numpy.testing.assert_raises(AssertionError):
122 | numpy.testing.assert_array_equal(im_cut.array.data, im.array.data)
123 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | """Test titiler-image utils."""
2 |
3 | import os
4 |
5 | import numpy
6 | import pytest
7 | from fastapi import HTTPException
8 | from rio_tiler.io import ImageReader
9 |
10 | from titiler.image.utils import image_to_bitonal, image_to_grayscale, rotate
11 |
12 | PREFIX = os.path.join(os.path.dirname(__file__), "fixtures")
13 | boston_jpeg = os.path.join(PREFIX, "boston_small.jpg")
14 |
15 |
16 | def test_rotate():
17 | """test rotation."""
18 | with ImageReader(boston_jpeg) as src:
19 | # read part of the image with mask area
20 | img = src.part((-100, 100, 100, 0))
21 | assert img.array.mask[0, 0, 0] # Masked
22 | assert not img.array.mask[0, 0, 100] # Not Masked
23 | assert img.array.shape == (3, 100, 200)
24 |
25 | img0 = rotate(img, 0, expand=True)
26 | numpy.testing.assert_array_equal(img.data, img0.data)
27 |
28 | imgm = rotate(img, 0, mirrored=True)
29 | with numpy.testing.assert_raises(AssertionError):
30 | numpy.testing.assert_array_equal(imgm.data, img.data)
31 |
32 | img180 = rotate(img, 180, expand=True)
33 | with numpy.testing.assert_raises(AssertionError):
34 | numpy.testing.assert_array_equal(img180.data, img.data)
35 |
36 | assert img.data[0, 0, 100] == img180.data[0, 99, 99]
37 | assert not img180.array.mask[0, 0, 0] # Not Masked
38 | assert img180.array.mask[0, 0, 100] # Masked
39 |
40 | img90 = rotate(img, 90, expand=True)
41 | assert img90.array.shape == (3, 200, 100)
42 | assert img90.array.mask[0, 0, 0] # Masked
43 | assert not img90.array.mask[0, 100, 0] # Not Masked
44 |
45 | img90 = rotate(img, 90, expand=False)
46 | assert img90.array.shape == (3, 100, 200)
47 | assert img90.array.mask[0, 0, 0] # Masked
48 | assert img90.array.mask[0, 0, 100] # Masked
49 | assert img90.array.mask[0, 99, 0] # Masked
50 | assert not img90.array.mask[0, 99, 100] # not Masked
51 |
52 | img125 = rotate(img, 125, expand=True)
53 | assert img125.array.shape == (3, 222, 198)
54 | assert img125.array.mask[0, 0, 0] # Masked
55 | assert not img125.array.mask[0, 150, 50] # Not Masked
56 |
57 |
58 | def test_gray():
59 | """test to_grayscale."""
60 | with ImageReader(boston_jpeg) as src:
61 | img = src.preview()
62 | assert img.array.shape == (3, 695, 1000)
63 | assert img.array.dtype == "uint8"
64 |
65 | grey = image_to_grayscale(img)
66 | assert grey.array.shape == (1, 695, 1000)
67 | assert grey.array.dtype == "uint8"
68 |
69 | img = src.preview(indexes=1)
70 | assert img.array.shape == (1, 695, 1000)
71 | grey = image_to_grayscale(img)
72 | numpy.testing.assert_array_equal(img.data, grey.data)
73 |
74 | with pytest.raises(HTTPException):
75 | img = src.preview(indexes=(1, 1, 1, 1))
76 | image_to_grayscale(img)
77 |
78 |
79 | def test_bitonal():
80 | """test to_bitonal."""
81 | with ImageReader(boston_jpeg) as src:
82 | img = src.preview()
83 | assert img.array.shape == (3, 695, 1000)
84 | assert img.array.dtype == "uint8"
85 |
86 | grey = image_to_bitonal(img)
87 | assert grey.array.shape == (1, 695, 1000)
88 | assert grey.array.dtype == "uint8"
89 | assert numpy.unique(grey.array).tolist() == [0, 255]
90 |
--------------------------------------------------------------------------------
/titiler/image/__init__.py:
--------------------------------------------------------------------------------
1 | """titiler.image"""
2 |
3 | __version__ = "0.1.0"
4 |
--------------------------------------------------------------------------------
/titiler/image/dependencies.py:
--------------------------------------------------------------------------------
1 | """titiler-image dependencies."""
2 |
3 | import json
4 | from dataclasses import dataclass
5 | from typing import List, Optional
6 |
7 | import httpx
8 | from cachetools import TTLCache, cached
9 | from fastapi import HTTPException, Query
10 | from geojson_pydantic import MultiPolygon, Polygon
11 | from rasterio.control import GroundControlPoint
12 | from rio_tiler.types import RIOResampling
13 | from typing_extensions import Annotated
14 |
15 | from titiler.core.dependencies import DefaultDependency
16 |
17 |
18 | @dataclass
19 | class DatasetParams(DefaultDependency):
20 | """Dataset Optional parameters."""
21 |
22 | unscale: Annotated[
23 | Optional[bool],
24 | Query(
25 | title="Apply internal Scale/Offset",
26 | description="Apply internal Scale/Offset",
27 | ),
28 | ] = False
29 | resampling_method: Annotated[
30 | RIOResampling,
31 | Query(
32 | alias="resampling",
33 | description="Resampling method.",
34 | ),
35 | ] = "nearest"
36 |
37 |
38 | @cached(TTLCache(maxsize=512, ttl=3600))
39 | def get_gcps(gcps_file: str) -> List[GroundControlPoint]:
40 | """Fetch and parse GCPS file."""
41 | if gcps_file.startswith("http"):
42 | body = httpx.get(gcps_file).json()
43 |
44 | else:
45 | with open(gcps_file, "r") as f:
46 | body = json.load(f)
47 |
48 | return [
49 | # GroundControlPoint(row, col, x, y, z)
50 | # https://github.com/allmaps/iiif-api/blob/georef/source/extension/georef/index.md#35-the-resourcecoords-property
51 | GroundControlPoint(
52 | f["properties"]["resourceCoords"][1],
53 | f["properties"]["resourceCoords"][0],
54 | *f["geometry"]["coordinates"], # x, y, z
55 | id=f.get("id"),
56 | )
57 | for f in body["features"]
58 | ]
59 |
60 |
61 | @cached(TTLCache(maxsize=512, ttl=3600))
62 | def get_cutline(cutline_file: str) -> str:
63 | """Fetch and parse Cutline file."""
64 | if cutline_file.startswith("http"):
65 | body = httpx.get(cutline_file).json()
66 |
67 | else:
68 | with open(cutline_file, "r") as f:
69 | body = json.load(f)
70 |
71 | # We assume the geojson is a Feature (not a Feature Collectionw)
72 | if "geometry" in body:
73 | body = body["geometry"]
74 |
75 | geom_type = body["type"]
76 | if geom_type == "Polygon":
77 | return Polygon.parse_obj(body).wkt
78 |
79 | elif geom_type == "MultiPolygon":
80 | return MultiPolygon.parse_obj(body).wkt
81 |
82 | else:
83 | raise HTTPException(
84 | status_code=400, detail=f"Invalid GeoJSON type: {geom_type}."
85 | )
86 |
87 |
88 | @dataclass
89 | class GCPSParams(DefaultDependency):
90 | """GCPS parameters."""
91 |
92 | gcps: Optional[List[GroundControlPoint]] = None
93 | cutline: Optional[str] = None
94 | gcps_order: Optional[int] = None
95 |
96 | def __init__(
97 | self,
98 | gcps: Annotated[
99 | Optional[List[str]],
100 | Query(
101 | title="Ground Control Points",
102 | description="Ground Control Points in form of `row (y), col (x), lon, lat, alt`",
103 | ),
104 | ] = None,
105 | gcps_file: Annotated[
106 | Optional[str],
107 | Query(title="Ground Control Points GeoJSON path"),
108 | ] = None,
109 | gcps_order: Annotated[
110 | Optional[int],
111 | Query(
112 | title="The maximum order to use for GCP derived polynomials if possible. The default is to autoselect based on the number of GCPs. A value of -1 triggers use of Thin Plate Spline instead of polynomials."
113 | ),
114 | ] = None,
115 | cutline: Annotated[
116 | Optional[str],
117 | Query(
118 | title="WKT Image Cutline (equivalent of the SVG Selector)",
119 | description="WKT Polygon or MultiPolygon.",
120 | ),
121 | ] = None,
122 | cutline_file: Annotated[
123 | Optional[str],
124 | Query(title="GeoJSON file for cutline"),
125 | ] = None,
126 | ):
127 | """Initialize GCPSParams and Cutline
128 |
129 | Note: We only want `gcps` or `cutline` to be forwarded to the reader so we use a custom `__init__` method used by FastAPI to parse the QueryParams.
130 | """
131 | if gcps:
132 | self.gcps: List[GroundControlPoint] = [ # type: ignore
133 | # WARNING: gpcs should be in form of `row (y), col (x), lon, lat, alt`
134 | GroundControlPoint(*list(map(float, gcps.split(","))))
135 | for gcps in gcps
136 | ]
137 | elif gcps_file:
138 | self.gcps = get_gcps(gcps_file)
139 |
140 | if gcps_order is not None:
141 | self.gcps_order = gcps_order
142 |
143 | if self.gcps and len(self.gcps) < 3:
144 | raise HTTPException(
145 | status_code=400, detail="Need at least 3 gcps to wrap an image."
146 | )
147 |
148 | if cutline:
149 | self.cutline = cutline
150 |
151 | elif cutline_file:
152 | self.cutline = get_cutline(cutline_file)
153 |
--------------------------------------------------------------------------------
/titiler/image/factory.py:
--------------------------------------------------------------------------------
1 | """titiler.image factories."""
2 |
3 | import abc
4 | import urllib.parse
5 | from dataclasses import dataclass, field
6 | from typing import Any, Dict, List, Literal, Optional, Tuple, Type
7 |
8 | import jinja2
9 | from fastapi import APIRouter, Depends, HTTPException, Path, Query, params
10 | from fastapi.dependencies.utils import get_parameterless_sub_dependant
11 | from pydantic import conint
12 | from rasterio import windows
13 | from rio_tiler.io import BaseReader, ImageReader
14 | from rio_tiler.models import Info
15 | from starlette.requests import Request
16 | from starlette.responses import (
17 | HTMLResponse,
18 | RedirectResponse,
19 | Response,
20 | StreamingResponse,
21 | )
22 | from starlette.routing import Match, compile_path, replace_params
23 | from starlette.templating import Jinja2Templates
24 | from typing_extensions import Annotated
25 |
26 | from titiler.core.dependencies import (
27 | BidxExprParams,
28 | ColorMapParams,
29 | DefaultDependency,
30 | HistogramParams,
31 | ImageParams,
32 | RescalingParams,
33 | StatisticsParams,
34 | )
35 | from titiler.core.factory import TilerFactory, img_endpoint_params
36 | from titiler.core.models.mapbox import TileJSON
37 | from titiler.core.models.responses import Statistics
38 | from titiler.core.resources.enums import ImageType, MediaType
39 | from titiler.core.resources.responses import JSONResponse
40 | from titiler.core.routing import EndpointScope
41 | from titiler.image.dependencies import DatasetParams, GCPSParams
42 | from titiler.image.models import iiifInfo
43 | from titiler.image.reader import Reader
44 | from titiler.image.resources.enums import IIIFImageFormat
45 | from titiler.image.settings import iiif_settings
46 | from titiler.image.utils import (
47 | _get_sizes,
48 | _percent,
49 | accept_media_type,
50 | image_to_bitonal,
51 | image_to_grayscale,
52 | rotate,
53 | )
54 |
55 | DEFAULT_TEMPLATES = Jinja2Templates(
56 | directory="",
57 | loader=jinja2.ChoiceLoader([jinja2.PackageLoader(__package__, "templates")]),
58 | ) # type:ignore
59 |
60 |
61 | @dataclass # type: ignore
62 | class BaseFactory(metaclass=abc.ABCMeta):
63 | """BaseFactory.
64 |
65 | Abstract Base Class for endpoints factories.
66 |
67 | Note: This is a custom version of titiler.core.factory.BaseTilerFactory (striped of most options)
68 |
69 | """
70 |
71 | # FastAPI router
72 | router: APIRouter = field(default_factory=APIRouter)
73 |
74 | # Router Prefix is needed to find the path for /tile if the TilerFactory.router is mounted
75 | # with other router (multiple `.../tile` routes).
76 | # e.g if you mount the route with `/cog` prefix, set router_prefix to cog and
77 | router_prefix: str = ""
78 |
79 | # add dependencies to specific routes
80 | route_dependencies: List[Tuple[List[EndpointScope], List[params.Depends]]] = field(
81 | default_factory=list
82 | )
83 |
84 | templates: Jinja2Templates = DEFAULT_TEMPLATES
85 |
86 | def __post_init__(self):
87 | """Post Init: register route and configure specific options."""
88 | self.register_routes()
89 |
90 | for scopes, dependencies in self.route_dependencies:
91 | self.add_route_dependencies(scopes=scopes, dependencies=dependencies)
92 |
93 | @abc.abstractmethod
94 | def register_routes(self):
95 | """Register Routes."""
96 | ...
97 |
98 | def url_for(self, request: Request, name: str, **path_params: Any) -> str:
99 | """Return full url (with prefix) for a specific endpoint."""
100 | url_path = self.router.url_path_for(name, **path_params)
101 | base_url = str(request.base_url)
102 | if self.router_prefix:
103 | prefix = self.router_prefix.lstrip("/")
104 | # If we have prefix with custom path param we check and replace them with
105 | # the path params provided
106 | if "{" in prefix:
107 | _, path_format, param_convertors = compile_path(prefix)
108 | prefix, _ = replace_params(
109 | path_format, param_convertors, request.path_params
110 | )
111 | base_url += prefix
112 |
113 | return str(url_path.make_absolute_url(base_url=base_url))
114 |
115 | def add_route_dependencies(
116 | self,
117 | *,
118 | scopes: List[EndpointScope],
119 | dependencies=List[params.Depends],
120 | ):
121 | """Add dependencies to routes.
122 |
123 | Allows a developer to add dependencies to a route after the route has been defined.
124 |
125 | """
126 | for route in self.router.routes:
127 | for scope in scopes:
128 | match, _ = route.matches({"type": "http", **scope})
129 | if match != Match.FULL:
130 | continue
131 |
132 | # Mimicking how APIRoute handles dependencies:
133 | # https://github.com/tiangolo/fastapi/blob/1760da0efa55585c19835d81afa8ca386036c325/fastapi/routing.py#L408-L412
134 | for depends in dependencies[::-1]:
135 | route.dependant.dependencies.insert( # type: ignore
136 | 0,
137 | get_parameterless_sub_dependant(
138 | depends=depends, path=route.path_format # type: ignore
139 | ),
140 | )
141 |
142 | # Register dependencies directly on route so that they aren't ignored if
143 | # the routes are later associated with an app (e.g. app.include_router(router))
144 | # https://github.com/tiangolo/fastapi/blob/58ab733f19846b4875c5b79bfb1f4d1cb7f4823f/fastapi/applications.py#L337-L360
145 | # https://github.com/tiangolo/fastapi/blob/58ab733f19846b4875c5b79bfb1f4d1cb7f4823f/fastapi/routing.py#L677-L678
146 | route.dependencies.extend(dependencies) # type: ignore
147 |
148 |
149 | ###############################################################################
150 | # Metadata Endpoints Factory
151 | ###############################################################################
152 | @dataclass
153 | class MetadataFactory(BaseFactory):
154 | """Metadata Factory."""
155 |
156 | def register_routes(self):
157 | """Register Routes."""
158 |
159 | @self.router.get(
160 | "/info",
161 | response_model=Info,
162 | response_model_exclude_none=True,
163 | response_class=JSONResponse,
164 | responses={200: {"description": "Return dataset's basic info."}},
165 | )
166 | def info(
167 | src_path: Annotated[
168 | str,
169 | Query(description="Dataset URL", alias="url"),
170 | ],
171 | ):
172 | """Return Image metadata."""
173 | with ImageReader(src_path) as dst:
174 | return dst.info()
175 |
176 | @self.router.get(
177 | "/statistics",
178 | response_class=JSONResponse,
179 | response_model=Statistics,
180 | responses={
181 | 200: {
182 | "content": {"application/json": {}},
183 | "description": "Return dataset's statistics.",
184 | }
185 | },
186 | )
187 | def statistics(
188 | src_path: Annotated[
189 | str,
190 | Query(description="Dataset URL", alias="url"),
191 | ],
192 | layer_params: BidxExprParams = Depends(),
193 | dataset_params: DatasetParams = Depends(),
194 | image_params: ImageParams = Depends(),
195 | stats_params: StatisticsParams = Depends(),
196 | histogram_params: HistogramParams = Depends(),
197 | ):
198 | """Get Dataset statistics."""
199 | with ImageReader(src_path) as dst:
200 | return dst.statistics(
201 | **layer_params,
202 | **image_params,
203 | **dataset_params,
204 | **stats_params,
205 | hist_options={**histogram_params},
206 | )
207 |
208 |
209 | ###############################################################################
210 | # Local Tiles Endpoints Factory
211 | ###############################################################################
212 | @dataclass
213 | class LocalTilerFactory(BaseFactory):
214 | """Local Tiler Factory."""
215 |
216 | add_viewer: bool = True
217 |
218 | def register_routes(self):
219 | """Register Routes."""
220 | self.register_tiles()
221 |
222 | if self.add_viewer:
223 | self.register_viewer()
224 |
225 | def register_tiles(self):
226 | """Register Tile routes."""
227 |
228 | @self.router.get(
229 | "/tilejson.json",
230 | response_model=TileJSON,
231 | responses={200: {"description": "Return a TileJSON document"}},
232 | response_model_exclude_none=True,
233 | )
234 | def tilejson(
235 | request: Request,
236 | src_path: Annotated[
237 | str,
238 | Query(description="Dataset URL", alias="url"),
239 | ],
240 | tile_format: Annotated[
241 | Optional[ImageType],
242 | Query(description="Output image type. Default is auto."),
243 | ] = None,
244 | tile_scale: Annotated[
245 | Optional[int],
246 | Query(
247 | gt=0,
248 | lt=4,
249 | description="Tile size scale. 1=256x256, 2=512x512...",
250 | ),
251 | ] = None,
252 | minzoom: Annotated[
253 | Optional[int],
254 | Query(description="Overwrite default minzoom."),
255 | ] = None,
256 | maxzoom: Annotated[
257 | Optional[int],
258 | Query(description="Overwrite default maxzoom."),
259 | ] = None,
260 | layer_params: BidxExprParams = Depends(),
261 | dataset_params: DatasetParams = Depends(),
262 | rescale: RescalingParams = Depends(),
263 | color_formula: Annotated[
264 | Optional[str],
265 | Query(
266 | title="Color Formula",
267 | description="rio-color formula (info: https://github.com/mapbox/rio-color)",
268 | ),
269 | ] = None,
270 | colormap: ColorMapParams = Depends(),
271 | add_mask: Annotated[
272 | Optional[bool],
273 | Query(
274 | alias="return-mask",
275 | description="Add mask to the output data.",
276 | ),
277 | ] = None,
278 | ):
279 | """return Tilejson doc."""
280 | route_params: Dict[str, Any] = {
281 | "z": "{z}",
282 | "x": "{x}",
283 | "y": "{y}",
284 | }
285 |
286 | if tile_scale:
287 | route_params["scale"] = tile_scale
288 |
289 | if tile_format:
290 | route_params["format"] = tile_format.value
291 |
292 | tiles_url = self.url_for(request, "tile", **route_params)
293 |
294 | qs_key_to_remove = [
295 | "tile_format",
296 | "tile_scale",
297 | "minzoom",
298 | "maxzoom",
299 | ]
300 | qs = [
301 | (key, value)
302 | for (key, value) in request.query_params._list
303 | if key.lower() not in qs_key_to_remove
304 | ]
305 | if qs:
306 | tiles_url += f"?{urllib.parse.urlencode(qs)}"
307 |
308 | with ImageReader(src_path) as dst:
309 | return {
310 | "bounds": dst.geographic_bounds,
311 | "minzoom": minzoom if minzoom is not None else dst.minzoom,
312 | "maxzoom": maxzoom if maxzoom is not None else dst.maxzoom,
313 | "tiles": [tiles_url],
314 | }
315 |
316 | @self.router.get("/tiles/{z}/{x}/{y}", **img_endpoint_params)
317 | @self.router.get("/tiles/{z}/{x}/{y}.{format}", **img_endpoint_params)
318 | @self.router.get("/tiles/{z}/{x}/{y}@{scale}x", **img_endpoint_params)
319 | @self.router.get("/tiles/{z}/{x}/{y}@{scale}x.{format}", **img_endpoint_params)
320 | def tile(
321 | z: Annotated[
322 | int,
323 | Path(description="Identifier (Z) selecting one of the scales."),
324 | ],
325 | x: Annotated[
326 | int,
327 | Path(description="Column (X) index of the tile."),
328 | ],
329 | y: Annotated[
330 | int,
331 | Path(description="Row (Y) index of the tile."),
332 | ],
333 | src_path: Annotated[str, Query(description="Dataset URL", alias="url")],
334 | scale: Annotated[
335 | Optional[conint(gt=0, le=4)], "Tile size scale. 1=256x256, 2=512x512..."
336 | ] = None,
337 | format: Annotated[
338 | ImageType,
339 | "Default will be automatically defined if the output image needs a mask (png) or not (jpeg).",
340 | ] = None,
341 | layer_params: BidxExprParams = Depends(),
342 | dataset_params: DatasetParams = Depends(),
343 | rescale: RescalingParams = Depends(),
344 | color_formula: Annotated[
345 | Optional[str],
346 | Query(
347 | title="Color Formula",
348 | description="rio-color formula (info: https://github.com/mapbox/rio-color)",
349 | ),
350 | ] = None,
351 | colormap: ColorMapParams = Depends(),
352 | add_mask: Annotated[
353 | Optional[bool],
354 | Query(
355 | alias="return-mask",
356 | description="Add mask to the output data.",
357 | ),
358 | ] = None,
359 | ):
360 | """Tile in Local TMS."""
361 | tilesize = scale * 256 if scale is not None else 256
362 |
363 | with ImageReader(src_path) as dst:
364 | image = dst.tile(
365 | x,
366 | y,
367 | z,
368 | tilesize=tilesize,
369 | **layer_params,
370 | **dataset_params,
371 | )
372 | dst_colormap = getattr(dst, "colormap", None)
373 |
374 | if rescale:
375 | image.rescale(rescale)
376 |
377 | if color_formula:
378 | image.apply_color_formula(color_formula)
379 |
380 | if cmap := colormap or dst_colormap:
381 | image = image.apply_colormap(cmap)
382 |
383 | if not format:
384 | format = ImageType.jpeg if image.mask.all() else ImageType.png
385 |
386 | content = image.render(
387 | add_mask=add_mask if add_mask is not None else True,
388 | img_format=format.driver,
389 | **format.profile,
390 | )
391 |
392 | return Response(content, media_type=format.mediatype)
393 |
394 | def register_viewer(self):
395 | """Register Viewer route."""
396 |
397 | @self.router.get("/viewer", response_class=HTMLResponse)
398 | def image_viewer(
399 | request: Request,
400 | src_path: Annotated[str, Query(description="Dataset URL", alias="url")],
401 | tile_format: Annotated[
402 | Optional[ImageType],
403 | Query(description="Output image type. Default is auto."),
404 | ] = None,
405 | tile_scale: Annotated[
406 | Optional[int],
407 | Query(
408 | gt=0,
409 | lt=4,
410 | description="Tile size scale. 1=256x256, 2=512x512...",
411 | ),
412 | ] = None,
413 | minzoom: Annotated[
414 | Optional[int],
415 | Query(description="Overwrite default minzoom."),
416 | ] = None,
417 | maxzoom: Annotated[
418 | Optional[int],
419 | Query(description="Overwrite default maxzoom."),
420 | ] = None,
421 | layer_params: BidxExprParams = Depends(),
422 | dataset_params: DatasetParams = Depends(),
423 | rescale: RescalingParams = Depends(),
424 | color_formula: Annotated[
425 | Optional[str],
426 | Query(
427 | title="Color Formula",
428 | description="rio-color formula (info: https://github.com/mapbox/rio-color)",
429 | ),
430 | ] = None,
431 | colormap: ColorMapParams = Depends(),
432 | add_mask: Annotated[
433 | Optional[bool],
434 | Query(
435 | alias="return-mask",
436 | description="Add mask to the output data.",
437 | ),
438 | ] = None,
439 | ):
440 | """Return Simple Image viewer."""
441 | tilejson_url = self.url_for(request, "tilejson")
442 | if request.query_params._list:
443 | tilejson_url += f"?{urllib.parse.urlencode(request.query_params._list)}"
444 |
445 | return self.templates.TemplateResponse(
446 | name="local.html",
447 | context={
448 | "request": request,
449 | "tilejson_endpoint": tilejson_url,
450 | },
451 | media_type=MediaType.html.value,
452 | )
453 |
454 |
455 | ###############################################################################
456 | # IIIF Endpoints Factory
457 | ###############################################################################
458 | @dataclass
459 | class IIIFFactory(BaseFactory):
460 | """IIIF Factory.
461 |
462 | Specification: https://iiif.io/api/image/3.0/
463 | """
464 |
465 | def register_routes(self):
466 | """Register Routes."""
467 | self.register_image_api()
468 |
469 | def register_image_api(self): # noqa: C901
470 | """Register IIIF Image API routes."""
471 |
472 | @self.router.get(
473 | "/{identifier:path}/info.json",
474 | response_model=iiifInfo,
475 | response_model_exclude_none=True,
476 | responses={
477 | 200: {
478 | "description": "Image Information Request",
479 | "content": {
480 | "application/json": {},
481 | "application/ld+json": {},
482 | },
483 | },
484 | },
485 | )
486 | def iiif_info(
487 | request: Request,
488 | identifier: Annotated[
489 | str,
490 | Path(description="The identifier of the requested image."),
491 | ],
492 | ):
493 | """Image Information Request."""
494 | output_type = accept_media_type(
495 | request.headers.get("accept", ""),
496 | ["application/json", "application/ld+json"],
497 | )
498 | url_path = self.url_for(
499 | request,
500 | "iiif_baseuri",
501 | identifier=urllib.parse.quote_plus(identifier, safe=""),
502 | )
503 |
504 | identifier = urllib.parse.unquote(identifier)
505 | with ImageReader(identifier) as dst:
506 | # TODO: If overviews:
507 | # Set Sizes
508 | # Set Tiles (using min/max zooms)
509 | info = iiifInfo(
510 | id=url_path,
511 | width=dst.dataset.width,
512 | height=dst.dataset.height,
513 | )
514 |
515 | if output_type == "application/ld+json":
516 | return StreamingResponse(
517 | iter((info.model_dump_json(exclude_none=True) + "\n",)),
518 | media_type='application/ld+json;profile="http://iiif.io/api/image/3/context.json"',
519 | )
520 |
521 | return info
522 |
523 | @self.router.get(
524 | "/{identifier:path}/{region}/{size}/{rotation}/{quality}.{format}",
525 | **img_endpoint_params,
526 | )
527 | def iiif_image( # noqa: C901
528 | identifier: Annotated[
529 | str,
530 | Path(description="The identifier of the requested image."),
531 | ],
532 | region: Annotated[
533 | str,
534 | Path(
535 | description="The region parameter defines the rectangular portion of the underlying image content to be returned."
536 | ),
537 | ],
538 | size: Annotated[
539 | str,
540 | Path(
541 | description="The size parameter specifies the dimensions to which the extracted region, which might be the full image, is to be scaled."
542 | ),
543 | ],
544 | rotation: Annotated[
545 | str,
546 | Path(
547 | description="The rotation parameter specifies mirroring and rotation",
548 | ),
549 | ],
550 | quality: Annotated[
551 | Literal["color", "gray", "bitonal", "default"],
552 | Path(
553 | description="The quality parameter determines whether the image is delivered in color, grayscale or black and white.",
554 | ),
555 | ],
556 | format: Annotated[
557 | IIIFImageFormat,
558 | Path(
559 | description="The format of the returned image is expressed as a suffix, mirroring common filename extensions, at the end of the URI."
560 | ),
561 | ],
562 | # TiTiler Extension
563 | layer_params: BidxExprParams = Depends(),
564 | dataset_params: DatasetParams = Depends(),
565 | rescale: RescalingParams = Depends(),
566 | color_formula: Annotated[
567 | Optional[str],
568 | Query(
569 | title="Color Formula",
570 | description="rio-color formula (info: https://github.com/mapbox/rio-color)",
571 | ),
572 | ] = None,
573 | colormap: ColorMapParams = Depends(),
574 | add_mask: Annotated[
575 | Optional[bool],
576 | Query(
577 | alias="return-mask",
578 | description="Add mask to the output data.",
579 | ),
580 | ] = None,
581 | ):
582 | """IIIF Image Request.
583 |
584 | ref: https://iiif.io/api/image/3.0
585 |
586 | """
587 | identifier = urllib.parse.unquote(identifier)
588 | with ImageReader(identifier) as dst:
589 | dst_width = dst.dataset.width
590 | dst_height = dst.dataset.height
591 |
592 | #################################################################################
593 | # REGION
594 | # full, square, x,y,w,h, pct:x,y,w,h
595 | #################################################################################
596 | window = windows.Window(
597 | col_off=0, row_off=0, width=dst_width, height=dst_height
598 | )
599 | if region == "full":
600 | # The full image is returned, without any cropping.
601 | pass
602 |
603 | elif region == "square":
604 | # The region is defined as an area where the width and height are both equal to the length of the shorter dimension of the full image.
605 | # The region may be positioned anywhere in the longer dimension of the full image at the server’s discretion, and centered is often a reasonable default.
606 | min_size = min(dst_width, dst_height)
607 | x_off = (dst_width - min_size) // 2
608 | y_off = (dst_height - min_size) // 2
609 | window = windows.Window(
610 | col_off=x_off, row_off=y_off, width=min_size, height=min_size
611 | )
612 |
613 | elif region.startswith("pct:"):
614 | # The region to be returned is specified as a sequence of percentages of the full image’s dimensions,
615 | # as reported in the image information document.
616 | # Thus, x represents the number of pixels from the 0 position on the horizontal axis, calculated as a percentage of the reported width.
617 | # w represents the width of the region, also calculated as a percentage of the reported width.
618 | # The same applies to y and h respectively.
619 | x, y, w, h = list(map(float, region.replace("pct:", "").split(",")))
620 | if max(x, y, w, h) > 100 or min(x, y, w, h) < 0:
621 | raise HTTPException(
622 | status_code=400,
623 | detail=f"Invalid Region parameter: {region}.",
624 | )
625 |
626 | x = round(_percent(dst_width, x))
627 | y = round(_percent(dst_height, y))
628 | w = round(_percent(dst_width, w))
629 | h = round(_percent(dst_height, h))
630 |
631 | # Service should return an image cropped at the image’s edge, rather than adding empty space.
632 | w = dst_width - x if w + x > dst_width else w
633 | h = dst_height - y if h + y > dst_height else h
634 |
635 | window = windows.Window(col_off=x, row_off=y, width=w, height=h)
636 |
637 | elif len(region.split(",")) == 4:
638 | # The region of the full image to be returned is specified in terms of absolute pixel values.
639 | # The value of x represents the number of pixels from the 0 position on the horizontal axis.
640 | # The value of y represents the number of pixels from the 0 position on the vertical axis.
641 | # Thus the x,y position 0,0 is the upper left-most pixel of the image. w represents
642 | # the width of the region and h represents the height of the region in pixels.
643 | x, y, w, h = list(map(float, region.split(",")))
644 |
645 | # Service should return an image cropped at the image’s edge, rather than adding empty space.
646 | w = dst_width - x if w + x > dst_width else w
647 | h = dst_height - y if h + y > dst_height else h
648 |
649 | try:
650 | window = windows.Window(col_off=x, row_off=y, width=w, height=h)
651 | except ValueError as e:
652 | raise HTTPException(
653 | status_code=400,
654 | detail=f"Invalid Region parameter: {region}.",
655 | ) from e
656 |
657 | else:
658 | raise HTTPException(
659 | status_code=400, detail=f"Invalid Region parameter: {region}."
660 | )
661 |
662 | if (
663 | window.width <= 0
664 | or window.height <= 0
665 | or window.col_off > dst_width
666 | or window.row_off > dst_height
667 | ):
668 | raise HTTPException(
669 | status_code=400, detail=f"Invalid Region parameter: {region}."
670 | )
671 |
672 | #################################################################################
673 | # SIZE
674 | # Formats are: w, ,h w,h pct:p !w,h full max ^w, ^,h ^w,h
675 | #################################################################################
676 | out_width, out_height = window.width, window.height
677 | aspect_ratio = out_width / out_height
678 |
679 | if size == "max":
680 | # max: The extracted region is returned at the maximum size available, but will not be upscaled.
681 | # The resulting image will have the pixel dimensions of the extracted region,
682 | # unless it is constrained to a smaller size by maxWidth, maxHeight, or maxArea
683 | pass
684 |
685 | elif size == "^max":
686 | # ^max: The extracted region is scaled to the maximum size permitted by maxWidth, maxHeight, or maxArea.
687 | # If the resulting dimensions are greater than the pixel width and height of the extracted region, the extracted region is upscaled.
688 | if aspect_ratio > 1:
689 | out_width = (
690 | max(out_width, iiif_settings.max_width)
691 | if iiif_settings.max_width
692 | else out_width
693 | )
694 | out_height = round(out_width / aspect_ratio)
695 | else:
696 | out_height = (
697 | max(out_height, iiif_settings.max_height)
698 | if iiif_settings.max_height
699 | else out_height
700 | )
701 | out_width = round(aspect_ratio * out_height)
702 |
703 | elif size.startswith("pct:"):
704 | # pct:n: The width and height of the returned image is scaled to n percent of the width and height of the extracted region.
705 | # The value of n must not be greater than 100.
706 | pct_size = float(size.replace("pct:", ""))
707 | if pct_size > 100 or pct_size <= 0:
708 | raise HTTPException(
709 | status_code=400,
710 | detail=f"Invalid Size parameter: {size} (must be between 0 and 100).",
711 | )
712 |
713 | out_width = round(_percent(out_width, pct_size))
714 | out_height = round(_percent(out_height, pct_size))
715 |
716 | elif size.startswith("^pct:"):
717 | # ^pct:n: The width and height of the returned image is scaled to n percent of the width and height of the extracted region.
718 | # For values of n greater than 100, the extracted region is upscaled.
719 | pct_size = float(size.replace("^pct:", ""))
720 | if pct_size <= 0:
721 | raise HTTPException(
722 | status_code=400,
723 | detail=f"Invalid Size parameter: {size} (must be greater than 0).",
724 | )
725 |
726 | out_width = round(_percent(out_width, pct_size))
727 | out_height = round(_percent(out_height, pct_size))
728 |
729 | elif "," in size:
730 | sizes = size.split(",")
731 |
732 | if size.startswith("^!"):
733 | # ^!w,h The extracted region is scaled so that the width and height of the returned image are not greater than w and h, while maintaining the aspect ratio.
734 | # The returned image must be as large as possible but not larger than w, h, or server-imposed limits.
735 | max_width, max_height = list(
736 | map(int, size.replace("^!", "").split(","))
737 | )
738 | if aspect_ratio > 1:
739 | out_width = max_width
740 | out_height = round(out_width / aspect_ratio)
741 | else:
742 | out_height = max_height
743 | out_width = round(aspect_ratio * out_height)
744 |
745 | elif size.startswith("!"):
746 | # !w,h The extracted region is scaled so that the width and height of the returned image are not greater than w and h, while maintaining the aspect ratio.
747 | # The returned image must be as large as possible but not larger than the extracted region, w or h, or server-imposed limits.
748 | max_width, max_height = list(
749 | map(int, size.replace("!", "").split(","))
750 | )
751 |
752 | if aspect_ratio > 1:
753 | out_width = max_width
754 | out_height = round(out_width / aspect_ratio)
755 | else:
756 | out_height = max_height
757 | out_width = round(aspect_ratio * out_height)
758 |
759 | if out_width > window.width or out_height > window.height:
760 | raise HTTPException(
761 | status_code=400,
762 | detail=f"Invalid 'h,w' parameter: {size} (greater than region height {window.width},{window.height}).",
763 | )
764 |
765 | elif size.startswith("^"):
766 | sizes = size.replace("^", "").split(",")
767 |
768 | if size.endswith(","):
769 | # ^w,: The extracted region should be scaled so that the width of the returned image is exactly equal to w.
770 | # If w is greater than the pixel width of the extracted region, the extracted region is upscaled.
771 | out_width = int(sizes[0])
772 | out_height = round(out_width / aspect_ratio)
773 |
774 | elif size.startswith("^,"):
775 | # ^,h: The extracted region should be scaled so that the height of the returned image is exactly equal to h. If h is greater than the pixel height of the extracted region, the extracted region is upscaled.
776 | out_height = int(sizes[1])
777 | out_width = round(aspect_ratio * out_height)
778 |
779 | elif sizes[0] and sizes[1]:
780 | # ^w,h: The width and height of the returned image are exactly w and h.
781 | # The aspect ratio of the returned image may be significantly different than the extracted region, resulting in a distorted image.
782 | # If w and/or h are greater than the corresponding pixel dimensions of the extracted region, the extracted region is upscaled.
783 | out_width, out_height = list(map(int, sizes))
784 |
785 | elif size.endswith(","):
786 | # w,: The extracted region should be scaled so that the width of the returned image is exactly equal to w.
787 | # The value of w must not be greater than the width of the extracted region.
788 | out_width = int(sizes[0])
789 | if out_width > window.width:
790 | raise HTTPException(
791 | status_code=400,
792 | detail=f"Invalid 'w' parameter: {out_width} (greater than region width {window.width}).",
793 | )
794 | out_height = round(out_width / aspect_ratio)
795 |
796 | elif size.startswith(","):
797 | # ,h: The extracted region should be scaled so that the height of the returned image is exactly equal to h.
798 | # The value of h must not be greater than the height of the extracted region.
799 | out_height = int(sizes[1])
800 | if out_height > window.height:
801 | raise HTTPException(
802 | status_code=400,
803 | detail=f"Invalid 'h' parameter: {out_height} (greater than region height {window.height}).",
804 | )
805 | out_width = round(aspect_ratio * out_height)
806 |
807 | elif sizes[0] and sizes[1]:
808 | # w,h: The width and height of the returned image are exactly w and h.
809 | # The aspect ratio of the returned image may be significantly different than the extracted region, resulting in a distorted image.
810 | # The values of w and h must not be greater than the corresponding pixel dimensions of the extracted region.
811 | out_width, out_height = list(map(int, sizes))
812 | if out_width > window.width or out_height > window.height:
813 | raise HTTPException(
814 | status_code=400,
815 | detail=f"Invalid 'h,w' parameter: {size} (greater than region height {window.width},{window.height}).",
816 | )
817 |
818 | else:
819 | raise HTTPException(
820 | status_code=400, detail=f"Invalid Size parameter: {size}."
821 | )
822 |
823 | else:
824 | raise HTTPException(
825 | status_code=400, detail=f"Invalid Size parameter: {size}."
826 | )
827 |
828 | out_width, out_height = _get_sizes(
829 | out_width,
830 | out_height,
831 | max_width=iiif_settings.max_width,
832 | max_height=iiif_settings.max_height,
833 | max_area=iiif_settings.max_area,
834 | )
835 |
836 | if out_width <= 1 or out_height <= 1:
837 | raise HTTPException(
838 | status_code=400,
839 | detail=f"Invalid Size parameter: {size} resulting in size <=1 ({out_width},{out_height}).",
840 | )
841 |
842 | image = dst.read(
843 | window=window,
844 | width=int(out_width),
845 | height=int(out_height),
846 | **layer_params,
847 | **dataset_params,
848 | )
849 | dst_colormap = getattr(dst, "colormap", None)
850 |
851 | #################################################################################
852 | # ROTATION
853 | # Formats are: n, !n
854 | #################################################################################
855 | try:
856 | rot = float(rotation.replace("!", ""))
857 | if rot < 0 or rot > 360:
858 | raise ValueError("Invalid rotation value")
859 |
860 | except (ValueError) as e:
861 | raise HTTPException(
862 | status_code=400,
863 | detail=f"Invalid rotation parameter: {rotation}.",
864 | ) from e
865 |
866 | image = rotate(image, rot, expand=True, mirrored=rotation.startswith("!"))
867 |
868 | if rescale:
869 | image.rescale(rescale)
870 |
871 | if color_formula:
872 | image.apply_color_formula(color_formula)
873 |
874 | #################################################################################
875 | # QUALITY
876 | # one of default, color, gray or bitonal
877 | #################################################################################
878 | if quality == "gray":
879 | colormap = dst_colormap = None
880 | image = image_to_grayscale(image)
881 |
882 | elif quality == "bitonal":
883 | colormap = dst_colormap = None
884 | image = image_to_bitonal(image)
885 |
886 | if cmap := colormap or dst_colormap:
887 | image = image.apply_colormap(cmap)
888 |
889 | content = image.render(
890 | add_mask=add_mask if add_mask is not None else True,
891 | img_format=format.driver,
892 | **format.profile,
893 | )
894 | return Response(content, media_type=format.mediatype)
895 |
896 | @self.router.get(
897 | "/{identifier:path}",
898 | response_class=RedirectResponse,
899 | include_in_schema=False,
900 | )
901 | def iiif_baseuri(
902 | request: Request,
903 | identifier: Annotated[
904 | str, Path(description="The identifier of the requested image.")
905 | ],
906 | ):
907 | """Return Simple IIIF viewer.
908 |
909 | ref: https://iiif.io/api/image/3.0/#2-uri-syntax
910 | When the base URI is dereferenced, the interaction should result in the image information document.
911 | It is recommended that the response be a 303 status redirection to the image information document’s URI.
912 | Implementations may also exhibit other behavior for the base URI beyond the scope of this specification
913 | in response to HTTP request headers and methods.
914 |
915 | """
916 | url = self.url_for(request, "iiif_info", identifier=identifier)
917 | output_type = accept_media_type(
918 | request.headers.get("accept", ""),
919 | ["text/html"],
920 | )
921 | if output_type:
922 | return self.templates.TemplateResponse(
923 | name="iiif.html",
924 | context={
925 | "request": request,
926 | "info_endpoint": url,
927 | },
928 | media_type=MediaType.html.value,
929 | )
930 |
931 | return RedirectResponse(url)
932 |
933 |
934 | ###############################################################################
935 | # Geo Tiler Factory
936 | ###############################################################################
937 | @dataclass
938 | class GeoTilerFactory(TilerFactory):
939 | """Like Tiler Factory but with less endpoints."""
940 |
941 | reader: Type[BaseReader] = Reader
942 |
943 | reader_dependency: Type[DefaultDependency] = GCPSParams
944 |
945 | # Rasterio Dataset Options (nodata, unscale, resampling)
946 | dataset_dependency: Type[DefaultDependency] = DatasetParams
947 |
948 | def register_routes(self):
949 | """This Method register routes to the router."""
950 | self.tile()
951 | self.tilejson()
952 |
953 | if self.add_viewer:
954 | self.map_viewer()
955 |
--------------------------------------------------------------------------------
/titiler/image/main.py:
--------------------------------------------------------------------------------
1 | """TiTiler-Image FastAPI application."""
2 | import warnings
3 |
4 | from fastapi import FastAPI
5 | from fastapi.exceptions import RequestValidationError
6 | from rasterio.errors import NotGeoreferencedWarning, RasterioIOError
7 | from starlette import status
8 | from starlette.middleware.cors import CORSMiddleware
9 | from starlette_cramjam.middleware import CompressionMiddleware
10 |
11 | from titiler.core.errors import DEFAULT_STATUS_CODES, add_exception_handlers
12 | from titiler.core.middleware import CacheControlMiddleware
13 | from titiler.image import __version__ as titiler_image_version
14 | from titiler.image.factory import (
15 | GeoTilerFactory,
16 | IIIFFactory,
17 | LocalTilerFactory,
18 | MetadataFactory,
19 | )
20 | from titiler.image.settings import api_settings
21 |
22 | app = FastAPI(
23 | title=api_settings.name,
24 | openapi_url="/api",
25 | docs_url="/api.html",
26 | description="""titiler application to work with non-geo images.
27 |
28 | ---
29 |
30 | **Source Code**: https://github.com/developmentseed/titiler-image
31 |
32 | ---
33 | """,
34 | version=titiler_image_version,
35 | root_path=api_settings.root_path,
36 | )
37 |
38 | warnings.filterwarnings("ignore", category=NotGeoreferencedWarning)
39 |
40 | DEFAULT_STATUS_CODES.update(
41 | {
42 | RasterioIOError: status.HTTP_404_NOT_FOUND,
43 | RequestValidationError: status.HTTP_400_BAD_REQUEST,
44 | }
45 | )
46 |
47 | add_exception_handlers(app, DEFAULT_STATUS_CODES)
48 |
49 | # Set all CORS enabled origins
50 | if api_settings.cors_origins:
51 | app.add_middleware(
52 | CORSMiddleware,
53 | allow_origins=api_settings.cors_origins,
54 | allow_credentials=True,
55 | allow_methods=["GET"],
56 | allow_headers=["*"],
57 | )
58 |
59 | app.add_middleware(
60 | CompressionMiddleware,
61 | minimum_size=0,
62 | exclude_mediatype={
63 | "image/jpeg",
64 | "image/jpg",
65 | "image/png",
66 | "image/jp2",
67 | "image/webp",
68 | },
69 | )
70 |
71 | app.add_middleware(
72 | CacheControlMiddleware,
73 | cachecontrol=api_settings.cachecontrol,
74 | exclude_path={r"/healthz"},
75 | )
76 |
77 | meta = MetadataFactory()
78 | app.include_router(meta.router, tags=["Metadata"])
79 |
80 | iiif = IIIFFactory(router_prefix="/iiif")
81 | app.include_router(iiif.router, tags=["IIIF"], prefix="/iiif")
82 |
83 | image_tiles = LocalTilerFactory(router_prefix="/image")
84 | app.include_router(image_tiles.router, tags=["Local Tiles"], prefix="/image")
85 |
86 | geo_tiles = GeoTilerFactory(router_prefix="/geo")
87 | app.include_router(geo_tiles.router, tags=["Geo Tiles"], prefix="/geo")
88 |
89 | ###############################################################################
90 | # Health Check Endpoint
91 | @app.get(
92 | "/healthz",
93 | description="Health Check.",
94 | summary="Health Check.",
95 | operation_id="healthCheck",
96 | tags=["Health Check"],
97 | )
98 | def ping():
99 | """Health check."""
100 | return {"ping": "pong!"}
101 |
--------------------------------------------------------------------------------
/titiler/image/models.py:
--------------------------------------------------------------------------------
1 | """Common response models."""
2 |
3 | from typing import Dict, List, Literal, Optional
4 |
5 | from pydantic import BaseModel, Field, model_validator
6 | from typing_extensions import Annotated
7 |
8 | from titiler.image.settings import iiif_settings
9 |
10 |
11 | class IIIFSize(BaseModel):
12 | """
13 | IIIF Size info
14 |
15 | e.g. `{ "width": 150, "height": 100 }`
16 |
17 | Ref: https://iiif.io/api/image/3.0/#53-sizes
18 | """
19 |
20 | type: Literal["Size"] = "Size"
21 | width: int
22 | height: int
23 |
24 |
25 | class IIIFTile(BaseModel):
26 | """
27 | IIIF Tile info
28 |
29 | e.g `{ "width": 512, "height": 512, "scaleFactors": [ 1, 2, 4, 8, 16 ] }`
30 |
31 | Ref: https://iiif.io/api/image/3.0/#54-tiles
32 | """
33 |
34 | type: Literal["Tile"] = "Tile"
35 | scaleFactors: List[int]
36 | width: int
37 | height: Optional[int] = None
38 |
39 | @model_validator(mode="before")
40 | def check_height(cls, values):
41 | """Check height configuration."""
42 | if values.get("height", None):
43 | values["height"] = values["width"]
44 |
45 | return values
46 |
47 |
48 | class iiifInfo(BaseModel):
49 | """
50 | IIIF Info model.
51 |
52 | Based on https://iiif.io/api/image/3.0/#5-image-information
53 |
54 | """
55 |
56 | # The @context tells Linked Data processors how to interpret the image information. If extensions are used then their context definitions should be included in this top-level @context property.
57 | context: Annotated[
58 | Literal["http://iiif.io/api/image/3/context.json"],
59 | Field(alias="@context"),
60 | ] = "http://iiif.io/api/image/3/context.json"
61 |
62 | # The base URI of the image as defined in URI Syntax, including scheme, server, prefix and identifier without a trailing slash.
63 | id: str
64 |
65 | # The type for the Image API. The value must be the string ImageService3.
66 | type: Literal["ImageService3"] = "ImageService3"
67 |
68 | # The URI http://iiif.io/api/image which can be used to determine that the document describes an image service which is a version of the IIIF Image API.
69 | protocol: Literal["http://iiif.io/api/image"] = "http://iiif.io/api/image"
70 | # A string indicating the highest compliance level which is fully supported by the service. The value must be one of level0, level1, or level2.
71 | profile: Literal["level0", "level1", "level2"] = "level2"
72 | # The width in pixels of the full image, given as an integer.
73 | width: int
74 | # The height in pixels of the full image, given as an integer.
75 | height: int
76 | # The maximum width in pixels supported for this image.
77 | maxWidth: Optional[int] = iiif_settings.max_width
78 | # The maximum height in pixels supported for this image.
79 | maxHeight: Optional[int] = iiif_settings.max_height
80 | # The maximum area in pixels supported for this image.
81 | maxArea: Optional[int] = iiif_settings.max_area
82 |
83 | preferredFormats: Optional[List[str]] = ["png", "jpeg", "webp", "tif", "jp2"]
84 |
85 | # sizes property, which is used to describe preferred height and width combinations for representations of the full image.
86 | sizes: Optional[List[IIIFSize]] = None
87 |
88 | # tiles property which describes a set of image regions that have a consistent height and width, over a series of resolutions, that can be stitched together visually.
89 | tiles: Optional[List[IIIFTile]] = None
90 |
91 | # The rights property has the same semantics and requirements as it does in the Presentation API.
92 | rights: Optional[str] = None
93 |
94 | # https://iiif.io/api/image/3.0/#57-extra-functionality
95 | # An array of strings that can be used as the quality parameter, in addition to default.
96 | extraQualities: Optional[List[str]] = None
97 | # An array of strings that can be used as the format parameter, in addition to the ones specified in the referenced profile.
98 | extraFormats: Optional[List[str]] = None
99 | # An array of strings identifying features supported by the service, in addition to the ones specified in the referenced profile.
100 | extraFeatures: Optional[List[str]] = None
101 |
102 | # https://iiif.io/api/image/3.0/#58-linking-properties
103 | # TODO: Define models
104 | partOf: Optional[Dict] = None
105 | seeAlso: Optional[Dict] = None
106 | service: Optional[Dict] = None
107 |
--------------------------------------------------------------------------------
/titiler/image/reader.py:
--------------------------------------------------------------------------------
1 | """Reader with GCPS support."""
2 |
3 | import warnings
4 | import xml.etree.ElementTree as ET
5 | from typing import List, Optional, Union
6 |
7 | import attr
8 | import rasterio
9 | from rasterio._path import _parse_path
10 | from rasterio.control import GroundControlPoint
11 | from rasterio.crs import CRS
12 | from rasterio.dtypes import _gdal_typename
13 | from rasterio.enums import MaskFlags
14 | from rasterio.io import DatasetReader
15 | from rasterio.transform import from_gcps
16 | from rasterio.vrt import WarpedVRT
17 | from rio_tiler import io
18 | from rio_tiler.constants import WGS84_CRS
19 | from rio_tiler.errors import NoOverviewWarning
20 | from rio_tiler.utils import has_alpha_band
21 |
22 |
23 | @attr.s
24 | class Reader(io.Reader):
25 | """GCPS + Image Reader"""
26 |
27 | gcps: Optional[List[GroundControlPoint]] = attr.ib(default=None)
28 | gcps_crs: Optional[CRS] = attr.ib(default=WGS84_CRS)
29 | gcps_order: Optional[int] = attr.ib(default=None)
30 |
31 | cutline: Optional[str] = attr.ib(default=None)
32 |
33 | dataset: Union[DatasetReader, WarpedVRT] = attr.ib(init=False)
34 |
35 | def __attrs_post_init__(self):
36 | """Define _kwargs, open dataset and get info."""
37 | dataset = self._ctx_stack.enter_context(rasterio.open(self.input))
38 |
39 | # when external GCPS we create a VRT
40 | if self.gcps:
41 | vrt_xml = vrt_doc(dataset, gcps=self.gcps, gcps_crs=self.gcps_crs)
42 | dataset = self._ctx_stack.enter_context(rasterio.open(vrt_xml))
43 |
44 | vrt_options = {}
45 |
46 | # Options 1: GCPS (internal or external)
47 | if dataset.gcps[0]:
48 | vrt_options.update(
49 | {
50 | "src_crs": dataset.gcps[1],
51 | "src_transform": from_gcps(dataset.gcps[0]),
52 | }
53 | )
54 | if self.gcps_order is not None:
55 | vrt_options.update({"max_gcp_order": self.gcps_order})
56 |
57 | # Option 2: Cutline
58 | if self.cutline:
59 | vrt_options.update({"cutline": self.cutline})
60 |
61 | if vrt_options:
62 | vrt_options.update({"add_alpha": True})
63 | if dataset.nodata is not None:
64 | vrt_options.update(
65 | {
66 | "nodata": dataset.nodata,
67 | "add_alpha": False,
68 | "src_nodata": dataset.nodata,
69 | }
70 | )
71 |
72 | elif has_alpha_band(dataset):
73 | vrt_options.update({"add_alpha": False})
74 |
75 | self.dataset = self._ctx_stack.enter_context(
76 | WarpedVRT(dataset, **vrt_options)
77 | )
78 |
79 | else:
80 | self.dataset = dataset
81 |
82 | self.bounds = tuple(self.dataset.bounds)
83 | self.crs = self.dataset.crs
84 |
85 | if self.colormap is None:
86 | self._get_colormap()
87 |
88 | if min(
89 | self.dataset.width, self.dataset.height
90 | ) > 512 and not self.dataset.overviews(1):
91 | warnings.warn(
92 | "The dataset has no Overviews. rio-tiler performances might be impacted.",
93 | NoOverviewWarning,
94 | )
95 |
96 |
97 | def vrt_doc( # noqa: C901
98 | src_dataset,
99 | gcps: Optional[List[GroundControlPoint]] = None,
100 | gcps_crs: Optional[CRS] = WGS84_CRS,
101 | ):
102 | """Make a VRT XML document.
103 |
104 | Adapted from rasterio.vrt._boundless_vrt_doc function
105 | """
106 | vrtdataset = ET.Element("VRTDataset")
107 | vrtdataset.attrib["rasterYSize"] = str(src_dataset.height)
108 | vrtdataset.attrib["rasterXSize"] = str(src_dataset.width)
109 |
110 | tags = src_dataset.tags()
111 | if tags:
112 | metadata = ET.SubElement(vrtdataset, "Metadata")
113 | for key, value in tags.items():
114 | v = ET.SubElement(metadata, "MDI")
115 | v.attrib["Key"] = key
116 | v.text = str(value)
117 |
118 | im_tags = src_dataset.tags(ns="IMAGE_STRUCTURE")
119 | if im_tags:
120 | metadata = ET.SubElement(vrtdataset, "Metadata")
121 | for key, value in im_tags.items():
122 | if key == "LAYOUT" and value == "COG":
123 | continue
124 | v = ET.SubElement(metadata, "MDI")
125 | v.attrib["Key"] = key
126 | v.text = str(value)
127 |
128 | if src_dataset.crs:
129 | srs = ET.SubElement(vrtdataset, "SRS")
130 | srs.text = src_dataset.crs.wkt if src_dataset.crs else ""
131 | geotransform = ET.SubElement(vrtdataset, "GeoTransform")
132 | geotransform.text = ",".join([str(v) for v in src_dataset.transform.to_gdal()])
133 |
134 | nodata_value = src_dataset.nodata
135 |
136 | if gcps:
137 | gcp_list = ET.SubElement(vrtdataset, "GCPList")
138 | gcp_list.attrib["Projection"] = str(gcps_crs)
139 | for gcp in gcps:
140 | g = ET.SubElement(gcp_list, "GCP")
141 | g.attrib["Id"] = gcp.id
142 | g.attrib["Pixel"] = str(gcp.col)
143 | g.attrib["Line"] = str(gcp.row)
144 | g.attrib["X"] = str(gcp.x)
145 | g.attrib["Y"] = str(gcp.y)
146 |
147 | for bidx, ci, block_shape, dtype in zip(
148 | src_dataset.indexes,
149 | src_dataset.colorinterp,
150 | src_dataset.block_shapes,
151 | src_dataset.dtypes,
152 | ):
153 | vrtrasterband = ET.SubElement(vrtdataset, "VRTRasterBand")
154 | vrtrasterband.attrib["dataType"] = _gdal_typename(dtype)
155 | vrtrasterband.attrib["band"] = str(bidx)
156 |
157 | if nodata_value is not None:
158 | nodata = ET.SubElement(vrtrasterband, "NoDataValue")
159 | nodata.text = str(nodata_value)
160 |
161 | colorinterp = ET.SubElement(vrtrasterband, "ColorInterp")
162 | colorinterp.text = ci.name.capitalize()
163 |
164 | source = ET.SubElement(vrtrasterband, "SimpleSource")
165 | sourcefilename = ET.SubElement(source, "SourceFilename")
166 | sourcefilename.attrib["relativeToVRT"] = "0"
167 | sourcefilename.text = _parse_path(src_dataset.name).as_vsi()
168 | sourceband = ET.SubElement(source, "SourceBand")
169 | sourceband.text = str(bidx)
170 | sourceproperties = ET.SubElement(source, "SourceProperties")
171 | sourceproperties.attrib["RasterXSize"] = str(src_dataset.width)
172 | sourceproperties.attrib["RasterYSize"] = str(src_dataset.height)
173 | sourceproperties.attrib["dataType"] = _gdal_typename(dtype)
174 | sourceproperties.attrib["BlockYSize"] = str(block_shape[0])
175 | sourceproperties.attrib["BlockXSize"] = str(block_shape[1])
176 | srcrect = ET.SubElement(source, "SrcRect")
177 | srcrect.attrib["xOff"] = "0"
178 | srcrect.attrib["yOff"] = "0"
179 | srcrect.attrib["xSize"] = str(src_dataset.width)
180 | srcrect.attrib["ySize"] = str(src_dataset.height)
181 | dstrect = ET.SubElement(source, "DstRect")
182 | dstrect.attrib["xOff"] = "0"
183 | dstrect.attrib["yOff"] = "0"
184 | dstrect.attrib["xSize"] = str(src_dataset.width)
185 | dstrect.attrib["ySize"] = str(src_dataset.height)
186 |
187 | if src_dataset.options is not None:
188 | openoptions = ET.SubElement(source, "OpenOptions")
189 | for ookey, oovalue in src_dataset.options.items():
190 | ooi = ET.SubElement(openoptions, "OOI")
191 | ooi.attrib["key"] = str(ookey)
192 | ooi.text = str(oovalue)
193 |
194 | if nodata_value is not None:
195 | nodata = ET.SubElement(source, "NODATA")
196 | nodata.text = str(nodata_value)
197 |
198 | if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):
199 | maskband = ET.SubElement(vrtdataset, "MaskBand")
200 | vrtrasterband = ET.SubElement(maskband, "VRTRasterBand")
201 | vrtrasterband.attrib["dataType"] = "Byte"
202 |
203 | source = ET.SubElement(vrtrasterband, "SimpleSource")
204 | sourcefilename = ET.SubElement(source, "SourceFilename")
205 | sourcefilename.attrib["relativeToVRT"] = "0"
206 | sourcefilename.attrib["shared"] = "0"
207 | sourcefilename.text = _parse_path(src_dataset.name).as_vsi()
208 |
209 | sourceband = ET.SubElement(source, "SourceBand")
210 | sourceband.text = "mask,1"
211 | sourceproperties = ET.SubElement(source, "SourceProperties")
212 | sourceproperties.attrib["RasterXSize"] = str(src_dataset.width)
213 | sourceproperties.attrib["RasterYSize"] = str(src_dataset.height)
214 | sourceproperties.attrib["dataType"] = "Byte"
215 | sourceproperties.attrib["BlockYSize"] = str(block_shape[0])
216 | sourceproperties.attrib["BlockXSize"] = str(block_shape[1])
217 | srcrect = ET.SubElement(source, "SrcRect")
218 | srcrect.attrib["xOff"] = "0"
219 | srcrect.attrib["yOff"] = "0"
220 | srcrect.attrib["xSize"] = str(src_dataset.width)
221 | srcrect.attrib["ySize"] = str(src_dataset.height)
222 | dstrect = ET.SubElement(source, "DstRect")
223 | dstrect.attrib["xOff"] = "0"
224 | dstrect.attrib["yOff"] = "0"
225 | dstrect.attrib["xSize"] = str(src_dataset.width)
226 | dstrect.attrib["ySize"] = str(src_dataset.height)
227 |
228 | return ET.tostring(vrtdataset).decode("ascii")
229 |
--------------------------------------------------------------------------------
/titiler/image/resources/enums.py:
--------------------------------------------------------------------------------
1 | """Titiler.image Enums."""
2 |
3 | from enum import Enum
4 | from types import DynamicClassAttribute
5 |
6 | from rio_tiler.profiles import img_profiles
7 |
8 | from titiler.core.resources.enums import ImageDriver, MediaType
9 |
10 |
11 | class IIIFImageFormat(str, Enum):
12 | """Available Output image type."""
13 |
14 | jpg = "jpg"
15 | tif = "tif"
16 | png = "png"
17 | gif = "gif"
18 | jp2 = "jp2"
19 | # pdf = "pdf" Not Available
20 |
21 | @DynamicClassAttribute
22 | def profile(self):
23 | """Return rio-tiler image default profile."""
24 | return img_profiles.get(self._name_, {})
25 |
26 | @DynamicClassAttribute
27 | def driver(self):
28 | """Return rio-tiler image default profile."""
29 | return ImageDriver[self._name_].value
30 |
31 | @DynamicClassAttribute
32 | def mediatype(self):
33 | """Return image media type."""
34 | return MediaType[self._name_].value
35 |
--------------------------------------------------------------------------------
/titiler/image/settings.py:
--------------------------------------------------------------------------------
1 | """API settings."""
2 |
3 | from typing import Optional
4 |
5 | from pydantic import field_validator, model_validator
6 | from pydantic_settings import BaseSettings
7 |
8 |
9 | class ApiSettings(BaseSettings):
10 | """API settings"""
11 |
12 | name: str = "titiler-image"
13 | cors_origins: str = "*"
14 | cachecontrol: str = "public, max-age=3600"
15 | root_path: str = ""
16 |
17 | model_config = {
18 | "env_prefix": "TITILER_IMAGE_API_",
19 | "env_file": ".env",
20 | "extra": "ignore",
21 | }
22 |
23 | @field_validator("cors_origins")
24 | def parse_cors_origin(cls, v):
25 | """Parse CORS origins."""
26 | return [origin.strip() for origin in v.split(",")]
27 |
28 |
29 | class IIIFSettings(BaseSettings):
30 | """IIIF settings"""
31 |
32 | # The maximum width in pixels supported for this image.
33 | max_width: Optional[int] = None
34 |
35 | # The maximum height in pixels supported for this image.
36 | max_height: Optional[int] = None
37 |
38 | # The maximum area in pixels supported for this image.
39 | max_area: Optional[int] = None
40 |
41 | model_config = {
42 | "env_prefix": "TITILER_IMAGE_IIIF_",
43 | "env_file": ".env",
44 | "extra": "ignore",
45 | }
46 |
47 | @model_validator(mode="before")
48 | def check_max(cls, values):
49 | """Check MaxWitdh and MaxHeight configuration."""
50 | # maxWidth must be specified if maxHeight is specified.
51 | keys = {"max_width", "max_height"}
52 | if keys.intersection(values):
53 | if "max_width" not in values:
54 | raise Exception("max_width has to be set if max_height is.")
55 | if "max_height" not in values:
56 | values["max_height"] = values["max_width"]
57 |
58 | return values
59 |
60 |
61 | iiif_settings = IIIFSettings()
62 | api_settings = ApiSettings()
63 |
--------------------------------------------------------------------------------
/titiler/image/templates/iiif.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Image Viewer
6 |
7 |
8 |
9 |
10 |
14 |
15 |
16 |
17 |
18 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/titiler/image/templates/local.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Image Viewer
6 |
7 |
8 |
9 |
13 |
14 |
15 |
16 |
17 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/titiler/image/utils.py:
--------------------------------------------------------------------------------
1 | """Titiler.image utility functions."""
2 |
3 | import math
4 | from typing import List, Optional, Tuple
5 |
6 | import numpy
7 | from affine import Affine
8 | from fastapi import HTTPException
9 | from rasterio.warp import reproject
10 | from rio_tiler.models import ImageData
11 |
12 |
13 | def _percent(x: float, y: float) -> float:
14 | return (x / 100) * y
15 |
16 |
17 | def _get_sizes(
18 | w: int,
19 | h: int,
20 | max_width: Optional[int] = None,
21 | max_height: Optional[int] = None,
22 | max_area: Optional[int] = None,
23 | ) -> Tuple[int, int]:
24 | """Return Output width/height constrained by environment."""
25 | # use size constraints if present, else full
26 | if max_area and max_area < (w * h):
27 | area_ratio = max_area / (w * h)
28 | w = int(w * area_ratio)
29 | h = int(h * area_ratio)
30 |
31 | elif max_width:
32 | max_height = max_height or max_width
33 | width, height = w, h
34 |
35 | if w > max_width:
36 | w = max_width
37 | h = int(height * max_width / width)
38 |
39 | if h > max_height:
40 | h = max_height
41 | w = int(width * max_height / height)
42 |
43 | return w, h
44 |
45 |
46 | def rotate(img: ImageData, angle: float, expand: bool = False, mirrored: bool = False):
47 | """Rotate Image.
48 |
49 | Args:
50 | img (rio_tiler.models.ImageData): ImageData to rotate.
51 | angle (float): The degrees of clockwise rotation from 0 up to 360.
52 | expand (bool): Optional expansion flag. If true, expands the output
53 | image to make it large enough to hold the entire rotated image.
54 | If false or omitted, make the output image the same size as the
55 | input image. Note that the expand flag assumes rotation around
56 | the center and no translation.
57 | mirrored (bool): Optional flag to apply mirroring and then rotation.
58 |
59 | Returns:
60 | ImageData
61 |
62 | """
63 |
64 | array = img.array
65 | if mirrored:
66 | array = numpy.flip(array, axis=2)
67 |
68 | if angle != 0:
69 | nband = img.count
70 | nw = img.width
71 | nh = img.height
72 |
73 | # rotation around image center
74 | rotated_affine = Affine.rotation(-angle, (nw // 2, nh // 2))
75 |
76 | # Adapted from https://github.com/python-pillow/Pillow/blob/acdb882aae391f29e551a09dc678b153c0c04e5b/src/PIL/Image.py#L2297-L2311
77 | if expand:
78 | xx = []
79 | yy = []
80 | for x, y in (
81 | (0, 0),
82 | (img.width, 0),
83 | (img.width, img.height),
84 | (0, img.height),
85 | ):
86 | x, y = rotated_affine * (x, y)
87 | xx.append(x)
88 | yy.append(y)
89 |
90 | nw = math.ceil(max(xx)) - math.floor(min(xx))
91 | nh = math.ceil(max(yy)) - math.floor(min(yy))
92 |
93 | rotated_affine = rotated_affine * Affine.translation(
94 | -(nw - img.width) / 2.0, -(nh - img.height) / 2.0
95 | )
96 |
97 | # Rotate the data
98 | data = numpy.zeros((nband, nh, nw), dtype=array.data.dtype)
99 | _ = reproject(
100 | array.data,
101 | data,
102 | src_crs="epsg:4326", # Fake CRS
103 | src_transform=Affine.identity(),
104 | dst_crs="epsg:4326", # Fake CRS
105 | dst_transform=rotated_affine,
106 | )
107 |
108 | # Rotate the mask
109 | mask = numpy.zeros((nband, nh, nw), dtype="uint8") + 1
110 | _ = reproject(
111 | array.mask * 1,
112 | mask,
113 | src_crs="epsg:4326", # Fake CRS
114 | src_transform=Affine.identity(),
115 | dst_crs="epsg:4326", # Fake CRS
116 | dst_transform=rotated_affine,
117 | dst_nodata=1, # 1=True -> means masked
118 | )
119 |
120 | array = numpy.ma.MaskedArray(data, mask=mask.astype("bool"))
121 |
122 | return ImageData(
123 | array,
124 | assets=img.assets,
125 | metadata=img.metadata,
126 | band_names=img.band_names,
127 | dataset_statistics=img.dataset_statistics,
128 | )
129 |
130 |
131 | def image_to_grayscale(img: ImageData) -> ImageData:
132 | """Convert Image to Grayscale using ITU-R 601-2 luma transform."""
133 | if img.count == 1:
134 | return img
135 |
136 | if img.count == 3:
137 | data = (
138 | img.data[0] * 299 / 1000
139 | + img.data[1] * 587 / 1000
140 | + img.data[2] * 114 / 1000
141 | )
142 |
143 | data = numpy.ma.MaskedArray(data.astype("uint8"))
144 | data.mask = ~img.mask.astype("bool")
145 |
146 | return ImageData(
147 | data,
148 | assets=img.assets,
149 | crs=img.crs,
150 | bounds=img.bounds,
151 | band_names=["b1"],
152 | metadata=img.metadata,
153 | )
154 |
155 | raise HTTPException(
156 | status_code=400,
157 | detail=f"Number of band {img.count} for grayscale transformation.",
158 | )
159 |
160 |
161 | def image_to_bitonal(img: ImageData) -> ImageData:
162 | """Convert Image to Bitonal
163 |
164 | All values larger than 127 are set to 255 (white), all other values to 0 (black).
165 | """
166 | img = image_to_grayscale(img)
167 | arr = numpy.where(img.array > 127, 255, 0).astype("uint8")
168 |
169 | return ImageData(
170 | arr,
171 | assets=img.assets,
172 | crs=img.crs,
173 | bounds=img.bounds,
174 | band_names=["b1"],
175 | metadata=img.metadata,
176 | )
177 |
178 |
179 | def accept_media_type(accept: str, mediatypes: List[str]) -> Optional[str]:
180 | """Return MediaType based on accept header and available mediatype.
181 |
182 | Links:
183 | - https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
184 | - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept
185 |
186 | """
187 | accept_values = {}
188 | for m in accept.replace(" ", "").split(","):
189 | values = m.split(";")
190 | if len(values) == 1:
191 | name = values[0]
192 | quality = 1.0
193 | else:
194 | name = values[0]
195 | groups = dict([param.split("=") for param in values[1:]]) # type: ignore
196 | try:
197 | q = groups.get("q")
198 | quality = float(q) if q else 1.0
199 | except ValueError:
200 | quality = 0
201 |
202 | # if quality is 0 we ignore encoding
203 | if quality:
204 | accept_values[name] = quality
205 |
206 | # Create Preference matrix
207 | media_preference = {
208 | v: [n for (n, q) in accept_values.items() if q == v]
209 | for v in sorted(set(accept_values.values()), reverse=True)
210 | }
211 |
212 | # Loop through available compression and encoding preference
213 | for _, pref in media_preference.items():
214 | for media in mediatypes:
215 | if media in pref:
216 | return media
217 |
218 | # If no specified encoding is supported but "*" is accepted,
219 | # take one of the available compressions.
220 | if "*" in accept_values and mediatypes:
221 | return mediatypes[0]
222 |
223 | return None
224 |
--------------------------------------------------------------------------------