├── .github └── workflows │ ├── notebook-test.yml │ └── test.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CITATION.cff ├── LICENSE ├── README.md ├── examples ├── ConvertITKImage.ipynb ├── ConvertImageioImageResource.ipynb ├── ConvertPyImageJDataset.ipynb ├── ConvertTiffFile.ipynb └── HelloMultiscaleSpatialImageWorld.ipynb ├── multiscale_spatial_image ├── __about__.py ├── __init__.py ├── _docs.py ├── multiscale_spatial_image.py ├── operations │ ├── __init__.py │ └── operations.py ├── to_multiscale │ ├── __init__.py │ ├── _dask_image.py │ ├── _itk.py │ ├── _support.py │ ├── _xarray.py │ ├── itk_image_to_multiscale.py │ └── to_multiscale.py └── utils.py ├── pixi.lock ├── pyproject.toml └── test ├── __init__.py ├── _data.py ├── conftest.py ├── test_ngff_validation.py ├── test_operations.py ├── test_to_multiscale.py ├── test_to_multiscale_dask_image.py ├── test_to_multiscale_itk.py ├── test_to_multiscale_xarray.py └── test_utils.py /.github/workflows/notebook-test.yml: -------------------------------------------------------------------------------- 1 | name: Notebook tests 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | run: 7 | runs-on: ubuntu-latest 8 | name: Test notebooks with nbmake 9 | steps: 10 | - uses: actions/checkout@v4 11 | 12 | - uses: prefix-dev/setup-pixi@v0.8.1 13 | 14 | - name: Test notebooks 15 | run: | 16 | pixi run test-notebooks 17 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | runs-on: ${{ matrix.os }} 8 | strategy: 9 | max-parallel: 5 10 | matrix: 11 | os: [ubuntu-22.04, windows-2022, macos-12] 12 | python-version: ["3.10", "3.11", "3.12"] 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Set up Python ${{ matrix.python-version }} 17 | uses: actions/setup-python@v5 18 | with: 19 | python-version: ${{ matrix.python-version }} 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip 23 | python -m pip install -e ".[test]" 24 | - name: Test with pytest 25 | run: | 26 | pytest --junitxml=junit/test-results.xml 27 | - name: Publish Test Report 28 | uses: mikepenz/action-junit-report@v2 29 | with: 30 | report_paths: "junit/test-results*.xml" 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | examples/.ipynb_checkpoints 3 | test/data.tar.gz 4 | test/data 5 | examples/monkey_brain.nrrd 6 | examples/cthead1.zarr/ 7 | examples/chelsea.zarr/ 8 | .ipynb_checkpoints 9 | examples/sub-I46_ses-SPIM_sample-BrocaAreaS01_stain-GAD67_chunk-00_SPIM* 10 | examples/dask-worker-space/ 11 | .pixi/ 12 | examples/Cell_Colony.zarr/ 13 | .idea/ 14 | 15 | # Byte-compiled / optimized / DLL files 16 | __pycache__/ 17 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: (^.pixi/|.snap|pixi.lock) 2 | 3 | ci: 4 | autoupdate_commit_msg: "ENH: update pre-commit hooks" 5 | autofix_commit_msg: "STYLE: pre-commit fixes" 6 | 7 | repos: 8 | - repo: https://github.com/pre-commit/pre-commit-hooks 9 | rev: "v4.6.0" 10 | hooks: 11 | - id: check-added-large-files 12 | - id: check-case-conflict 13 | - id: check-symlinks 14 | - id: check-json 15 | - id: check-merge-conflict 16 | - id: check-toml 17 | - id: check-xml 18 | - id: check-yaml 19 | - id: debug-statements 20 | - id: detect-private-key 21 | - id: end-of-file-fixer 22 | - id: mixed-line-ending 23 | - id: name-tests-test 24 | args: ["--pytest-test-first"] 25 | - id: requirements-txt-fixer 26 | - id: trailing-whitespace 27 | 28 | - repo: https://github.com/pre-commit/mirrors-prettier 29 | rev: "v3.0.0" 30 | hooks: 31 | - id: prettier 32 | types_or: [yaml, markdown, html, css, scss, javascript, json] 33 | args: [--prose-wrap=always] 34 | 35 | - repo: https://github.com/codespell-project/codespell 36 | rev: "v2.2.5" 37 | hooks: 38 | - id: codespell 39 | exclude: examples/ 40 | 41 | - repo: https://github.com/shellcheck-py/shellcheck-py 42 | rev: "v0.9.0.5" 43 | hooks: 44 | - id: shellcheck 45 | 46 | - repo: https://github.com/charliermarsh/ruff-pre-commit 47 | rev: v0.5.4 48 | hooks: 49 | - id: ruff 50 | args: [--fix, --exit-non-zero-on-fix] 51 | exclude: test/conftest.py 52 | - id: ruff-format 53 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: >- 6 | multiscale-spatial-image: A multiscale, chunked, 7 | multi-dimensional Xarray spatial image data 8 | structure, serializable to OME-NGFF and netCDF. 9 | message: >- 10 | If you use this software, please cite it using the 11 | metadata from this file. 12 | type: software 13 | authors: 14 | - given-names: Matthew 15 | family-names: McCormick 16 | email: matt.mccormick@kitware.com 17 | affiliation: 'Kitware, Inc' 18 | orcid: 'https://orcid.org/0000-0001-9475-3756' 19 | - given-names: Thomas 20 | family-names: Birdsong 21 | email: tom.birdsong@kitware.com 22 | affiliation: 'Kitware, Inc' 23 | repository-code: >- 24 | https://github.com/spatial-image/multiscale-spatial-image 25 | repository-artifact: 'https://pypi.org/project/multiscale_spatial_image/' 26 | keywords: 27 | - scientific 28 | - imaging 29 | - ngff 30 | - zarr 31 | - netcdf 32 | - xarray 33 | - python 34 | - multiscale 35 | - scalespace 36 | - spatial pyramids 37 | license: Apache-2.0 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # multiscale-spatial-image 2 | 3 | [![Test](https://github.com/spatial-image/multiscale-spatial-image/actions/workflows/test.yml/badge.svg)](https://github.com/spatial-image/multiscale-spatial-image/actions/workflows/test.yml) 4 | [![Notebook tests](https://github.com/spatial-image/multiscale-spatial-image/actions/workflows/notebook-test.yml/badge.svg)](https://github.com/spatial-image/multiscale-spatial-image/actions/workflows/notebook-test.yml) 5 | [![image](https://img.shields.io/pypi/v/multiscale_spatial_image.svg)](https://pypi.python.org/pypi/multiscale_spatial_image/) 6 | [![image](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/python/black) 7 | [![DOI](https://zenodo.org/badge/379678181.svg)](https://zenodo.org/badge/latestdoi/379678181) 8 | 9 | Generate a multiscale, chunked, multi-dimensional spatial image data structure 10 | that can serialized to [OME-NGFF]. 11 | 12 | Each scale is a scientific Python [Xarray] [spatial-image] [Dataset], organized 13 | into nodes of an Xarray [Datatree]. 14 | 15 | ## Installation 16 | 17 | ```sh 18 | pip install multiscale_spatial_image 19 | ``` 20 | 21 | ## Usage 22 | 23 | ```python 24 | import numpy as np 25 | from spatial_image import to_spatial_image 26 | from multiscale_spatial_image import to_multiscale 27 | import zarr 28 | 29 | # Image pixels 30 | array = np.random.randint(0, 256, size=(128,128), dtype=np.uint8) 31 | 32 | image = to_spatial_image(array) 33 | print(image) 34 | ``` 35 | 36 | An [Xarray] [spatial-image] [DataArray]. Spatial metadata can also be passed 37 | during construction. 38 | 39 | ``` 40 | Size: 16kB 41 | array([[170, 79, 215, ..., 31, 151, 150], 42 | [ 77, 181, 1, ..., 217, 176, 228], 43 | [193, 91, 240, ..., 132, 152, 41], 44 | ..., 45 | [ 50, 140, 231, ..., 80, 236, 28], 46 | [ 89, 46, 180, ..., 84, 42, 140], 47 | [ 96, 148, 240, ..., 61, 43, 255]], dtype=uint8) 48 | Coordinates: 49 | * y (y) float64 1kB 0.0 1.0 2.0 3.0 4.0 ... 124.0 125.0 126.0 127.0 50 | * x (x) float64 1kB 0.0 1.0 2.0 3.0 4.0 ... 124.0 125.0 126.0 127.0 51 | ``` 52 | 53 | ```python 54 | # Create multiscale pyramid, downscaling by a factor of 2, then 4 55 | multiscale = to_multiscale(image, [2, 4]) 56 | print(multiscale) 57 | ``` 58 | 59 | A chunked [Dask] Array MultiscaleSpatialImage [Xarray] [Datatree]. 60 | 61 | ``` 62 | 63 | Group: / 64 | ├── Group: /scale0 65 | │ Dimensions: (y: 128, x: 128) 66 | │ Coordinates: 67 | │ * y (y) float64 1kB 0.0 1.0 2.0 3.0 4.0 ... 124.0 125.0 126.0 127.0 68 | │ * x (x) float64 1kB 0.0 1.0 2.0 3.0 4.0 ... 124.0 125.0 126.0 127.0 69 | │ Data variables: 70 | │ image (y, x) uint8 16kB dask.array 71 | ├── Group: /scale1 72 | │ Dimensions: (y: 64, x: 64) 73 | │ Coordinates: 74 | │ * y (y) float64 512B 0.5 2.5 4.5 6.5 8.5 ... 120.5 122.5 124.5 126.5 75 | │ * x (x) float64 512B 0.5 2.5 4.5 6.5 8.5 ... 120.5 122.5 124.5 126.5 76 | │ Data variables: 77 | │ image (y, x) uint8 4kB dask.array 78 | └── Group: /scale2 79 | Dimensions: (y: 16, x: 16) 80 | Coordinates: 81 | * y (y) float64 128B 3.5 11.5 19.5 27.5 35.5 ... 99.5 107.5 115.5 123.5 82 | * x (x) float64 128B 3.5 11.5 19.5 27.5 35.5 ... 99.5 107.5 115.5 123.5 83 | Data variables: 84 | image (y, x) uint8 256B dask.array 85 | ``` 86 | 87 | Map a function over datasets while skipping nodes that do not contain dimensions 88 | 89 | ```python 90 | import numpy as np 91 | from spatial_image import to_spatial_image 92 | from multiscale_spatial_image import skip_non_dimension_nodes, to_multiscale 93 | 94 | data = np.zeros((2, 200, 200)) 95 | dims = ("c", "y", "x") 96 | scale_factors = [2, 2] 97 | image = to_spatial_image(array_like=data, dims=dims) 98 | multiscale = to_multiscale(image, scale_factors=scale_factors) 99 | 100 | @skip_non_dimension_nodes 101 | def transpose(ds, *args, **kwargs): 102 | return ds.transpose(*args, **kwargs) 103 | 104 | multiscale = multiscale.map_over_datasets(transpose, "y", "x", "c") 105 | print(multiscale) 106 | ``` 107 | 108 | A transposed MultiscaleSpatialImage. 109 | 110 | ``` 111 | 112 | Group: / 113 | ├── Group: /scale0 114 | │ Dimensions: (c: 2, y: 200, x: 200) 115 | │ Coordinates: 116 | │ * c (c) int32 8B 0 1 117 | │ * y (y) float64 2kB 0.0 1.0 2.0 3.0 4.0 ... 196.0 197.0 198.0 199.0 118 | │ * x (x) float64 2kB 0.0 1.0 2.0 3.0 4.0 ... 196.0 197.0 198.0 199.0 119 | │ Data variables: 120 | │ image (y, x, c) float64 640kB dask.array 121 | ├── Group: /scale1 122 | │ Dimensions: (c: 2, y: 100, x: 100) 123 | │ Coordinates: 124 | │ * c (c) int32 8B 0 1 125 | │ * y (y) float64 800B 0.5 2.5 4.5 6.5 8.5 ... 192.5 194.5 196.5 198.5 126 | │ * x (x) float64 800B 0.5 2.5 4.5 6.5 8.5 ... 192.5 194.5 196.5 198.5 127 | │ Data variables: 128 | │ image (y, x, c) float64 160kB dask.array 129 | └── Group: /scale2 130 | Dimensions: (c: 2, y: 50, x: 50) 131 | Coordinates: 132 | * c (c) int32 8B 0 1 133 | * y (y) float64 400B 1.5 5.5 9.5 13.5 17.5 ... 185.5 189.5 193.5 197.5 134 | * x (x) float64 400B 1.5 5.5 9.5 13.5 17.5 ... 185.5 189.5 193.5 197.5 135 | Data variables: 136 | image (y, x, c) float64 40kB dask.array 137 | ``` 138 | 139 | While the decorator allows you to define your own methods to map over datasets 140 | in the `DataTree` while ignoring those datasets not having dimensions, this 141 | library also provides a few convenience methods. For example, the transpose 142 | method we saw earlier can also be applied as follows: 143 | 144 | ```python 145 | multiscale = multiscale.msi.transpose("y", "x", "c") 146 | ``` 147 | 148 | Other methods implemented this way are `reindex`, equivalent to the 149 | `xr.DataArray` 150 | [reindex](https://docs.xarray.dev/en/stable/generated/xarray.DataArray.reindex.html) 151 | method and `assign_coords`, equivalent to `xr.Dataset` `assign_coords` method. 152 | 153 | Store as an Open Microscopy Environment-Next Generation File Format ([OME-NGFF]) 154 | / [netCDF] [Zarr] store. 155 | 156 | It is highly recommended to use `dimension_separator='/'` in the construction of 157 | the Zarr stores. 158 | 159 | ```python 160 | store = zarr.storage.DirectoryStore('multiscale.zarr', dimension_separator='/') 161 | multiscale.to_zarr(store) 162 | ``` 163 | 164 | **Note**: The API is under development, and it may change until 1.0.0 is 165 | released. We mean it :-). 166 | 167 | ## Examples 168 | 169 | - [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/spatial-image/multiscale-spatial-image/main?urlpath=lab/tree/examples%2FHelloMultiscaleSpatialImageWorld.ipynb) 170 | [Hello MultiscaleSpatialImage World!](./examples/HelloMultiscaleSpatialImageWorld.ipynb) 171 | - [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/spatial-image/multiscale-spatial-image/main?urlpath=lab/tree/examples%2FConvertITKImage.ipynb) 172 | [Convert itk.Image](./examples/ConvertITKImage.ipynb) 173 | - [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/spatial-image/multiscale-spatial-image/main?urlpath=lab/tree/examples%2FConvertImageioImageResource.ipynb) 174 | [Convert imageio ImageResource](./examples/ConvertImageioImageResource.ipynb) 175 | - [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/spatial-image/multiscale-spatial-image/main?urlpath=lab/tree/examples%2FConvertPyImageJDataset.ipynb) 176 | [Convert pyimagej Dataset](./examples/ConvertPyImageJDataset.ipynb) 177 | 178 | ## Development 179 | 180 | Contributions are welcome and appreciated. 181 | 182 | ### Get the source code 183 | 184 | ```shell 185 | git clone https://github.com/spatial-image/multiscale-spatial-image 186 | cd multiscale-spatial-image 187 | ``` 188 | 189 | ### Install dependencies 190 | 191 | First install [pixi]. Then, install project dependencies: 192 | 193 | ```shell 194 | pixi install -a 195 | pixi run pre-commit-install 196 | ``` 197 | 198 | ### Run the test suite 199 | 200 | The unit tests: 201 | 202 | ```shell 203 | pixi run -e test test 204 | ``` 205 | 206 | The notebooks tests: 207 | 208 | ```shell 209 | pixi run test-notebooks 210 | ``` 211 | 212 | ### Update test data 213 | 214 | To add new or update testing data, such as a new baseline for this block: 215 | 216 | ```py 217 | dataset_name = "cthead1" 218 | image = input_images[dataset_name] 219 | baseline_name = "2_4/XARRAY_COARSEN" 220 | multiscale = to_multiscale(image, [2, 4], method=Methods.XARRAY_COARSEN) 221 | verify_against_baseline(test_data_dir, dataset_name, baseline_name, multiscale) 222 | ``` 223 | 224 | Add a `store_new_image` call in your test block: 225 | 226 | ```py 227 | dataset_name = "cthead1" 228 | image = input_images[dataset_name] 229 | baseline_name = "2_4/XARRAY_COARSEN" 230 | multiscale = to_multiscale(image, [2, 4], method=Methods.XARRAY_COARSEN) 231 | 232 | store_new_image(dataset_name, baseline_name, multiscale) 233 | 234 | verify_against_baseline(dataset_name, baseline_name, multiscale) 235 | ``` 236 | 237 | Run the tests to generate the output. Remove the `store_new_image` call. 238 | 239 | Then, create a tarball of the current testing data 240 | 241 | ```console 242 | cd test/data 243 | tar cvf ../data.tar * 244 | gzip -9 ../data.tar 245 | python3 -c 'import pooch; print(pooch.file_hash("../data.tar.gz"))' 246 | ``` 247 | 248 | Update the `test_data_sha256` variable in the _test/\_data.py_ file. Upload the 249 | data to [web3.storage](https://web3.storage). And update the 250 | `test_data_ipfs_cid` 251 | [Content Identifier (CID)](https://proto.school/anatomy-of-a-cid/01) variable, 252 | which is available in the web3.storage web page interface. 253 | 254 | ### Submit the patch 255 | 256 | We use the standard [GitHub flow]. 257 | 258 | ### Create a release 259 | 260 | This section is relevant only for maintainers. 261 | 262 | 1. Pull `git`'s `main` branch. 263 | 2. `pixi install -a` 264 | 3. `pixi run pre-commit-install` 265 | 4. `pixi run -e test test` 266 | 5. `pixi shell` 267 | 6. `hatch version ` 268 | 7. `git add .` 269 | 8. `git commit -m "ENH: Bump version to "` 270 | 9. `hatch build` 271 | 10. `hatch publish` 272 | 11. `git push upstream main` 273 | 12. Create a new tag and Release via the GitHub UI. Auto-generate release notes 274 | and add additional notes as needed. 275 | 276 | [spatial-image]: https://github.com/spatial-image/spatial-image 277 | [Xarray]: https://xarray.pydata.org/en/stable/ 278 | [OME-NGFF]: https://ngff.openmicroscopy.org/ 279 | [Dataset]: https://docs.xarray.dev/en/stable/generated/xarray.Dataset.html 280 | [Datatree]: https://xarray-datatree.readthedocs.io/en/latest/ 281 | [DataArray]: https://xarray.pydata.org/en/stable/generated/xarray.DataArray.html 282 | [Zarr]: https://zarr.readthedocs.io/en/stable/ 283 | [Dask]: https://docs.dask.org/en/stable/array.html 284 | [netCDF]: https://www.unidata.ucar.edu/software/netcdf/ 285 | [pixi]: https://pixi.sh 286 | [GitHub flow]: https://docs.github.com/en/get-started/using-github/github-flow 287 | -------------------------------------------------------------------------------- /multiscale_spatial_image/__about__.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2022-present NumFOCUS 2 | # 3 | # SPDX-License-Identifier: MIT 4 | __version__ = "2.0.2" 5 | -------------------------------------------------------------------------------- /multiscale_spatial_image/__init__.py: -------------------------------------------------------------------------------- 1 | """multiscale-spatial-image 2 | 3 | Generate a multiscale spatial image.""" 4 | 5 | __all__ = [ 6 | "MultiscaleSpatialImage", 7 | "Methods", 8 | "to_multiscale", 9 | "itk_image_to_multiscale", 10 | "skip_non_dimension_nodes", 11 | "__version__", 12 | ] 13 | 14 | from .__about__ import __version__ 15 | from .multiscale_spatial_image import MultiscaleSpatialImage 16 | from .to_multiscale import Methods, to_multiscale, itk_image_to_multiscale 17 | from .utils import skip_non_dimension_nodes 18 | -------------------------------------------------------------------------------- /multiscale_spatial_image/_docs.py: -------------------------------------------------------------------------------- 1 | from textwrap import dedent 2 | from typing import Any, Callable 3 | 4 | 5 | def inject_docs(**kwargs: Any) -> Callable[..., Any]: 6 | # taken from scanpy 7 | def decorator(obj: Any) -> Any: 8 | obj.__doc__ = dedent(obj.__doc__).format(**kwargs) 9 | return obj 10 | 11 | return decorator 12 | -------------------------------------------------------------------------------- /multiscale_spatial_image/multiscale_spatial_image.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Iterable, Any 2 | 3 | from xarray import DataTree, register_datatree_accessor 4 | import numpy as np 5 | from collections.abc import MutableMapping, Hashable 6 | from pathlib import Path 7 | from zarr.storage import BaseStore 8 | from multiscale_spatial_image.operations import ( 9 | transpose, 10 | reindex_data_arrays, 11 | assign_coords, 12 | ) 13 | 14 | 15 | @register_datatree_accessor("msi") 16 | class MultiscaleSpatialImage: 17 | """A multi-scale representation of a spatial image. 18 | 19 | This is an xarray DataTree, with content compatible with the Open Microscopy Environment- 20 | Next Generation File Format (OME-NGFF). 21 | 22 | The tree contains nodes in the form: `scale{scale}` where *scale* is the integer scale. 23 | Each node has a the same named `Dataset` that corresponds to to the NGFF dataset name. 24 | For example, a three-scale representation of a *cells* dataset would have `Dataset` nodes: 25 | 26 | scale0 27 | scale1 28 | scale2 29 | """ 30 | 31 | def __init__(self, xarray_obj: DataTree): 32 | self._dt = xarray_obj 33 | 34 | def to_zarr( 35 | self, 36 | store: Union[MutableMapping, str, Path, BaseStore], 37 | mode: str = "w", 38 | encoding=None, 39 | **kwargs, 40 | ): 41 | """ 42 | Write multi-scale spatial image contents to a Zarr store. 43 | 44 | Metadata is added according the OME-NGFF standard. 45 | 46 | store : MutableMapping, str or Path, or zarr.storage.BaseStore 47 | Store or path to directory in file system 48 | mode : {{"w", "w-", "a", "r+", None}, default: "w" 49 | Persistence mode: “w” means create (overwrite if exists); “w-” means create (fail if exists); 50 | “a” means override existing variables (create if does not exist); “r+” means modify existing 51 | array values only (raise an error if any metadata or shapes would change). The default mode 52 | is “a” if append_dim is set. Otherwise, it is “r+” if region is set and w- otherwise. 53 | encoding : dict, optional 54 | Nested dictionary with variable names as keys and dictionaries of 55 | variable specific encodings as values, e.g., 56 | ``{"scale0/image": {"my_variable": {"dtype": "int16", "scale_factor": 0.1}, ...}, ...}``. 57 | See ``xarray.Dataset.to_zarr`` for available options. 58 | kwargs : 59 | Additional keyword arguments to be passed to ``datatree.DataTree.to_zarr`` 60 | """ 61 | 62 | multiscales = [] 63 | scale0 = self._dt[self._dt.groups[1]] 64 | for name in scale0.ds.data_vars.keys(): 65 | ngff_datasets = [] 66 | for child in self._dt.children: 67 | image = self._dt[child].ds 68 | scale_transform = [] 69 | translate_transform = [] 70 | for dim in image.dims: 71 | if len(image.coords[dim]) > 1 and np.issubdtype( 72 | image.coords[dim].dtype, np.number 73 | ): 74 | scale_transform.append( 75 | float(image.coords[dim][1] - image.coords[dim][0]) 76 | ) 77 | else: 78 | scale_transform.append(1.0) 79 | if len(image.coords[dim]) > 0 and np.issubdtype( 80 | image.coords[dim].dtype, np.number 81 | ): 82 | translate_transform.append(float(image.coords[dim][0])) 83 | else: 84 | translate_transform.append(0.0) 85 | 86 | ngff_datasets.append( 87 | { 88 | "path": f"{self._dt[child].name}/{name}", 89 | "coordinateTransformations": [ 90 | { 91 | "type": "scale", 92 | "scale": scale_transform, 93 | }, 94 | { 95 | "type": "translation", 96 | "translation": translate_transform, 97 | }, 98 | ], 99 | } 100 | ) 101 | 102 | image = scale0.ds 103 | axes = [] 104 | for axis in image.dims: 105 | if axis == "t": 106 | axes.append({"name": "t", "type": "time"}) 107 | elif axis == "c": 108 | axes.append({"name": "c", "type": "channel"}) 109 | else: 110 | axes.append({"name": axis, "type": "space"}) 111 | if "units" in image.coords[axis].attrs: 112 | axes[-1]["unit"] = image.coords[axis].attrs["units"] 113 | 114 | multiscales.append( 115 | { 116 | "@type": "ngff:Image", 117 | "version": "0.4", 118 | "name": name, 119 | "axes": axes, 120 | "datasets": ngff_datasets, 121 | } 122 | ) 123 | 124 | # NGFF v0.4 metadata 125 | ngff_metadata = {"multiscales": multiscales, "multiscaleSpatialImageVersion": 1} 126 | self._dt.ds = self._dt.ds.assign_attrs(**ngff_metadata) 127 | 128 | self._dt.to_zarr(store, mode=mode, **kwargs) 129 | 130 | def transpose(self, *dims: Hashable) -> DataTree: 131 | """Return a `DataTree` with all dimensions of arrays in datasets transposed. 132 | 133 | This method automatically skips those nodes of the `DataTree` that do not contain 134 | dimensions. Note that for `Dataset`s themselves, the order of dimensions stays the same. 135 | In case of a `DataTree` node missing specified dimensions an error is raised. 136 | 137 | Parameters 138 | ---------- 139 | *dims : Hashable | None 140 | If not specified, reverse the dimensions on each array. Otherwise, 141 | reorder the dimensions to the order that the `dims` are specified.. 142 | """ 143 | return self._dt.map_over_datasets(transpose, *dims) 144 | 145 | def reindex_data_arrays( 146 | self, 147 | indexers: dict[str, Any], 148 | method: str | None = None, 149 | tolerance: float | Iterable[float] | str | None = None, 150 | copy: bool = False, 151 | fill_value: int | dict[str, int] | None = None, 152 | **indexer_kwargs: Any, 153 | ): 154 | """ 155 | Reindex the `DataArray`s present in the datasets at each scale level of the MultiscaleSpatialImage. 156 | 157 | From the original xarray docstring: Conform this object onto the indexes of another object, filling in missing 158 | values with fill_value. The default fill value is NaN. 159 | 160 | Parameters 161 | ---------- 162 | indexers : dict | None 163 | Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. 164 | Any mis-matched coordinate values will be filled in with NaN, and any mis-matched dimension names will 165 | simply be ignored. One of indexers or indexers_kwargs must be provided. 166 | method : str | None 167 | Method to use for filling index values in indexers not found on this data array: 168 | - None (default): don’t fill gaps 169 | - pad / ffill: propagate last valid index value forward 170 | - backfill / bfill: propagate next valid index value backward 171 | - nearest: use nearest valid index value 172 | tolerance: float | Iterable[float] | str | None 173 | Maximum distance between original and new labels for inexact matches. The values of the index at the 174 | matching locations must satisfy the equation abs(index[indexer] - target) <= tolerance. Tolerance may 175 | be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable 176 | tolerance per element. List-like must be the same size as the index and its dtype must exactly match the 177 | index’s type. 178 | copy : bool 179 | If copy=True, data in the return value is always copied. If copy=False and reindexing is unnecessary, or 180 | can be performed with only slice operations, then the output may share memory with the input. In either 181 | case, a new xarray object is always returned. 182 | fill_value: int | dict[str, int] | None 183 | Value to use for newly missing values. If a dict-like, maps variable names (including coordinates) to fill 184 | values. Use this data array’s name to refer to the data array’s values. 185 | **indexer_kwargs 186 | The keyword arguments form of indexers. One of indexers or indexers_kwargs must be provided. 187 | """ 188 | return self._dt.map_over_datasets( 189 | reindex_data_arrays, 190 | indexers, 191 | method, 192 | tolerance, 193 | copy, 194 | fill_value, 195 | *indexer_kwargs, 196 | ) 197 | 198 | def assign_coords(self, coords, **coords_kwargs): 199 | """ 200 | Assign new coordinates to all `Dataset`s in the `DataTree` having dimensions. 201 | 202 | Returns a new `Dataset` at each scale level of the `MultiscaleSpatialImage` with all the original data in 203 | addition to the new coordinates. 204 | 205 | Parameters 206 | ---------- 207 | coords 208 | A mapping whose keys are the names of the coordinates and values are the coordinates to assign. 209 | The mapping will generally be a dict or Coordinates. 210 | - If a value is a standard data value — for example, a DataArray, scalar, or array — the data is simply 211 | assigned as a coordinate. 212 | - If a value is callable, it is called with this object as the only parameter, and the return value is 213 | used as new coordinate variables. 214 | - A coordinate can also be defined and attached to an existing dimension using a tuple with the first 215 | element the dimension name and the second element the values for this new coordinate. 216 | **coords_kwargs 217 | The keyword arguments form of coords. One of coords or coords_kwargs must be provided. 218 | """ 219 | return self._dt.map_over_datasets(assign_coords, coords, *coords_kwargs) 220 | -------------------------------------------------------------------------------- /multiscale_spatial_image/operations/__init__.py: -------------------------------------------------------------------------------- 1 | from .operations import assign_coords, transpose, reindex_data_arrays 2 | 3 | __all__ = ["assign_coords", "transpose", "reindex_data_arrays"] 4 | -------------------------------------------------------------------------------- /multiscale_spatial_image/operations/operations.py: -------------------------------------------------------------------------------- 1 | from multiscale_spatial_image.utils import skip_non_dimension_nodes 2 | from xarray import Dataset 3 | from typing import Any 4 | 5 | 6 | @skip_non_dimension_nodes 7 | def assign_coords(ds: Dataset, *args: Any, **kwargs: Any) -> Dataset: 8 | return ds.assign_coords(*args, **kwargs) 9 | 10 | 11 | @skip_non_dimension_nodes 12 | def transpose(ds: Dataset, *args: Any, **kwargs: Any) -> Dataset: 13 | return ds.transpose(*args, **kwargs) 14 | 15 | 16 | @skip_non_dimension_nodes 17 | def reindex_data_arrays(ds: Dataset, *args: Any, **kwargs: Any) -> Dataset: 18 | return ds["image"].reindex(*args, **kwargs).to_dataset() 19 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/__init__.py: -------------------------------------------------------------------------------- 1 | from .to_multiscale import Methods, to_multiscale 2 | from .itk_image_to_multiscale import itk_image_to_multiscale 3 | 4 | __all__ = ["Methods", "to_multiscale", "itk_image_to_multiscale"] 5 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/_dask_image.py: -------------------------------------------------------------------------------- 1 | from spatial_image import to_spatial_image 2 | import numpy as np 3 | 4 | from ._support import _align_chunks, _dim_scale_factors, _compute_sigma 5 | 6 | 7 | def _compute_input_spacing(input_image): 8 | """Helper method to manually compute image spacing. Assumes even spacing along any axis. 9 | 10 | input_image: xarray.core.dataarray.DataArray 11 | The image for which voxel spacings are computed 12 | 13 | result: Dict 14 | Spacing along each enumerated image axis 15 | Example {'x': 1.0, 'y': 0.5} 16 | """ 17 | return { 18 | dim: float(input_image.coords[dim][1]) - float(input_image.coords[dim][0]) 19 | for dim in input_image.dims 20 | } 21 | 22 | 23 | def _compute_output_spacing(input_image, dim_factors): 24 | """Helper method to manually compute output image spacing. 25 | 26 | input_image: xarray.core.dataarray.DataArray 27 | The image for which voxel spacings are computed 28 | 29 | dim_factors: Dict 30 | Shrink ratio along each enumerated axis 31 | 32 | result: Dict 33 | Spacing along each enumerated image axis 34 | Example {'x': 2.0, 'y': 1.0} 35 | """ 36 | input_spacing = _compute_input_spacing(input_image) 37 | return {dim: input_spacing[dim] * dim_factors[dim] for dim in input_image.dims} 38 | 39 | 40 | def _compute_output_origin(input_image, dim_factors): 41 | """Helper method to manually compute output image physical offset. 42 | Note that this method does not account for an image direction matrix. 43 | 44 | input_image: xarray.core.dataarray.DataArray 45 | The image for which voxel spacings are computed 46 | 47 | dim_factors: Dict 48 | Shrink ratio along each enumerated axis 49 | 50 | result: Dict 51 | Offset in physical space of first voxel in output image 52 | Example {'x': 0.5, 'y': 1.0} 53 | """ 54 | 55 | input_spacing = _compute_input_spacing(input_image) 56 | input_origin = { 57 | dim: float(input_image.coords[dim][0]) 58 | for dim in input_image.dims 59 | if dim in dim_factors 60 | } 61 | 62 | # Index in input image space corresponding to offset after shrink 63 | input_index = { 64 | dim: 0.5 * (dim_factors[dim] - 1) 65 | for dim in input_image.dims 66 | if dim in dim_factors 67 | } 68 | # Translate input index coordinate to offset in physical space 69 | # NOTE: This method fails to account for direction matrix 70 | return { 71 | dim: input_index[dim] * input_spacing[dim] + input_origin[dim] 72 | for dim in input_image.dims 73 | if dim in dim_factors 74 | } 75 | 76 | 77 | def _get_truncate(xarray_image, sigma_values, truncate_start=4.0) -> float: 78 | """Discover truncate parameter yielding a viable kernel width 79 | for dask_image.ndfilters.gaussian_filter processing. Block overlap 80 | cannot be greater than image size, so kernel radius is more limited 81 | for small images. A lower stddev truncation ceiling for kernel 82 | generation can result in a less precise kernel. 83 | 84 | xarray_image: xarray.core.dataarray.DataArray 85 | Chunked image to be smoothed 86 | 87 | sigma:values: List 88 | Gaussian kernel standard deviations in tzyx order 89 | 90 | truncate_start: float 91 | First truncation value to try. 92 | "dask_image.ndfilters.gaussian_filter" defaults to 4.0. 93 | 94 | result: float 95 | Truncation value found to yield largest possible kernel width without 96 | extending beyond one chunk such that chunked smoothing would fail. 97 | """ 98 | 99 | from dask_image.ndfilters._gaussian import _get_border 100 | 101 | truncate = truncate_start 102 | stddev_step = 0.5 # search by stepping down by 0.5 stddev in each iteration 103 | 104 | border = _get_border(xarray_image.data, sigma_values, truncate) 105 | while any( 106 | [ 107 | border_len > image_len 108 | for border_len, image_len in zip(border, xarray_image.shape) 109 | ] 110 | ): 111 | truncate = truncate - stddev_step 112 | if truncate <= 0.0: 113 | break 114 | border = _get_border(xarray_image.data, sigma_values, truncate) 115 | 116 | return truncate 117 | 118 | 119 | def _downsample_dask_image( 120 | current_input, 121 | default_chunks, 122 | out_chunks, 123 | scale_factors, 124 | data_objects, 125 | image, 126 | label=False, 127 | ): 128 | import dask_image.ndfilters 129 | import dask_image.ndinterp 130 | 131 | for factor_index, scale_factor in enumerate(scale_factors): 132 | dim_factors = _dim_scale_factors(image.dims, scale_factor) 133 | current_input = _align_chunks(current_input, default_chunks, dim_factors) 134 | 135 | shrink_factors = [] 136 | for dim in image.dims: 137 | if dim in dim_factors: 138 | shrink_factors.append(dim_factors[dim]) 139 | else: 140 | shrink_factors.append(1) 141 | 142 | # Compute/discover region splitting parameters 143 | input_spacing = _compute_input_spacing(current_input) 144 | 145 | # Compute output shape and metadata 146 | output_shape = [ 147 | int(image_len / shrink_factor) 148 | for image_len, shrink_factor in zip(current_input.shape, shrink_factors) 149 | ] 150 | output_spacing = _compute_output_spacing(current_input, dim_factors) 151 | output_origin = _compute_output_origin(current_input, dim_factors) 152 | 153 | if label == "mode": 154 | 155 | def largest_mode(arr): 156 | values, counts = np.unique(arr, return_counts=True) 157 | m = counts.argmax() 158 | return values[m] 159 | 160 | size = tuple(shrink_factors) 161 | blurred_array = dask_image.ndfilters.generic_filter( 162 | image=current_input.data, 163 | function=largest_mode, 164 | size=size, 165 | mode="nearest", 166 | ) 167 | elif label == "nearest": 168 | blurred_array = current_input.data 169 | else: 170 | input_spacing_list = [input_spacing[dim] for dim in image.dims] 171 | sigma_values = _compute_sigma(input_spacing_list, shrink_factors) 172 | truncate = _get_truncate(current_input, sigma_values) 173 | 174 | blurred_array = dask_image.ndfilters.gaussian_filter( 175 | image=current_input.data, 176 | sigma=sigma_values, # tzyx order 177 | mode="nearest", 178 | truncate=truncate, 179 | ) 180 | 181 | # Construct downsample parameters 182 | image_dimension = len(dim_factors) 183 | transform = np.eye(image_dimension) 184 | for dim, shrink_factor in enumerate(shrink_factors): 185 | transform[dim, dim] = shrink_factor 186 | if label: 187 | order = 0 188 | else: 189 | order = 1 190 | 191 | downscaled_array = dask_image.ndinterp.affine_transform( 192 | blurred_array, 193 | matrix=transform, 194 | order=order, 195 | output_shape=output_shape, # tzyx order 196 | ).compute() 197 | 198 | downscaled = to_spatial_image( 199 | downscaled_array, 200 | dims=image.dims, 201 | scale=output_spacing, 202 | translation=output_origin, 203 | name=current_input.name, 204 | axis_names={ 205 | d: image.coords[d].attrs.get("long_name", d) for d in image.dims 206 | }, 207 | axis_units={d: image.coords[d].attrs.get("units", "") for d in image.dims}, 208 | t_coords=image.coords.get("t", None), 209 | c_coords=image.coords.get("c", None), 210 | ) 211 | downscaled = downscaled.chunk(out_chunks) 212 | data_objects[f"scale{factor_index+1}"] = downscaled.to_dataset( 213 | name=image.name, promote_attrs=True 214 | ) 215 | current_input = downscaled 216 | 217 | return data_objects 218 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/_itk.py: -------------------------------------------------------------------------------- 1 | from spatial_image import to_spatial_image 2 | from dask.array import map_blocks, map_overlap 3 | import numpy as np 4 | from typing import Tuple 5 | 6 | from ._support import _align_chunks, _dim_scale_factors, _compute_sigma 7 | 8 | 9 | def _get_block(current_input, block_index: int): 10 | """Helper method for accessing an enumerated chunk from xarray input""" 11 | block_shape = [c[block_index] for c in current_input.chunks] 12 | block = current_input[tuple([slice(0, s) for s in block_shape])] 13 | # For consistency for now, do not utilize direction until there is standardized support for 14 | # direction cosines / orientation in OME-NGFF 15 | block.attrs.pop("direction", None) 16 | return block 17 | 18 | 19 | def _compute_itk_gaussian_kernel_radius( 20 | input_size, sigma_values, shrink_factors 21 | ) -> list: 22 | """Get kernel radius in xyzt directions""" 23 | DEFAULT_MAX_KERNEL_WIDTH = 32 24 | MAX_KERNEL_ERROR = 0.01 25 | image_dimension = len(input_size) 26 | 27 | import itk 28 | 29 | # Constrain kernel width to be at most the size of one chunk 30 | max_kernel_width = min(DEFAULT_MAX_KERNEL_WIDTH, *input_size) 31 | variance = [sigma**2 for sigma in sigma_values] 32 | 33 | def generate_radius(direction: int) -> int: 34 | """Follow itk.DiscreteGaussianImageFilter procedure to generate directional kernels""" 35 | oper = itk.GaussianOperator[itk.F, image_dimension]() 36 | oper.SetDirection(direction) 37 | oper.SetMaximumError(MAX_KERNEL_ERROR) 38 | oper.SetMaximumKernelWidth(max_kernel_width) 39 | oper.SetVariance(variance[direction]) 40 | oper.CreateDirectional() 41 | return oper.GetRadius(direction) 42 | 43 | return [generate_radius(dim) for dim in range(image_dimension)] 44 | 45 | 46 | def _itk_blur_and_downsample( 47 | xarray_data, 48 | gaussian_filter_name, 49 | interpolator_name, 50 | shrink_factors, 51 | sigma_values, 52 | kernel_radius, 53 | ): 54 | """Blur and then downsample a given image chunk""" 55 | import itk 56 | 57 | # xarray chunk does not have metadata attached, values are ITK defaults 58 | image = itk.image_view_from_array(xarray_data) 59 | input_origin = itk.origin(image) 60 | 61 | # Skip this image block if it has 0 voxels 62 | block_size = itk.size(image) 63 | if any([block_len == 0 for block_len in block_size]): 64 | return None 65 | 66 | # Output values are relative to input 67 | itk_shrink_factors = shrink_factors # xyzt 68 | itk_kernel_radius = kernel_radius 69 | output_origin = [ 70 | val + radius for val, radius in zip(input_origin, itk_kernel_radius) 71 | ] 72 | output_spacing = [s * f for s, f in zip(itk.spacing(image), itk_shrink_factors)] 73 | output_size = [ 74 | max(0, int((image_len - 2 * radius) / shrink_factor)) 75 | for image_len, radius, shrink_factor in zip( 76 | itk.size(image), itk_kernel_radius, itk_shrink_factors 77 | ) 78 | ] 79 | 80 | # Optionally run accelerated smoothing with itk-vkfft 81 | if gaussian_filter_name == "VkDiscreteGaussianImageFilter": 82 | smoothing_filter_template = itk.VkDiscreteGaussianImageFilter 83 | elif gaussian_filter_name == "DiscreteGaussianImageFilter": 84 | smoothing_filter_template = itk.DiscreteGaussianImageFilter 85 | else: 86 | raise ValueError(f"Unsupported gaussian_filter {gaussian_filter_name}") 87 | 88 | # Construct pipeline 89 | smoothing_filter = smoothing_filter_template.New( 90 | image, sigma_array=sigma_values, use_image_spacing=False 91 | ) 92 | 93 | if interpolator_name == "LinearInterpolateImageFunction": 94 | interpolator_instance = itk.LinearInterpolateImageFunction.New( 95 | smoothing_filter.GetOutput() 96 | ) 97 | elif interpolator_name == "LabelImageGaussianInterpolateImageFunction": 98 | interpolator_instance = itk.LabelImageGaussianInterpolateImageFunction.New( 99 | smoothing_filter.GetOutput() 100 | ) 101 | # Similar approach as compute_sigma 102 | # Ref: https://link.springer.com/content/pdf/10.1007/978-3-319-24571-3_81.pdf 103 | sigma = [s * 0.7355 for s in output_spacing] 104 | sigma_max = max(sigma) 105 | interpolator_instance.SetSigma(sigma) 106 | interpolator_instance.SetAlpha(sigma_max * 2.5) 107 | else: 108 | raise ValueError(f"Unsupported interpolator_name {interpolator_name}") 109 | 110 | shrink_filter = itk.ResampleImageFilter.New( 111 | smoothing_filter.GetOutput(), 112 | interpolator=interpolator_instance, 113 | size=output_size, 114 | output_spacing=output_spacing, 115 | output_origin=output_origin, 116 | ) 117 | shrink_filter.Update() 118 | 119 | return shrink_filter.GetOutput() 120 | 121 | 122 | def _downsample_itk_bin_shrink( 123 | current_input, default_chunks, out_chunks, scale_factors, data_objects, image 124 | ): 125 | import itk 126 | 127 | for factor_index, scale_factor in enumerate(scale_factors): 128 | dim_factors = _dim_scale_factors(image.dims, scale_factor) 129 | current_input = _align_chunks(current_input, default_chunks, dim_factors) 130 | 131 | image_dims: Tuple[str, str, str, str] = ("x", "y", "z", "t") 132 | shrink_factors = [dim_factors[sf] for sf in image_dims if sf in dim_factors] 133 | 134 | block_0_shape = [c[0] for c in current_input.chunks] 135 | block_0 = current_input[tuple([slice(0, s) for s in block_0_shape])] 136 | # For consistency for now, do not utilize direction until there is standardized support for 137 | # direction cosines / orientation in OME-NGFF 138 | block_0.attrs.pop("direction", None) 139 | block_input = itk.image_from_xarray(block_0) 140 | filt = itk.BinShrinkImageFilter.New(block_input, shrink_factors=shrink_factors) 141 | filt.UpdateOutputInformation() 142 | block_output = filt.GetOutput() 143 | scale = {image_dims[i]: s for (i, s) in enumerate(block_output.GetSpacing())} 144 | translation = { 145 | image_dims[i]: s for (i, s) in enumerate(block_output.GetOrigin()) 146 | } 147 | dtype = block_output.dtype 148 | output_chunks = list(current_input.chunks) 149 | for i, c in enumerate(output_chunks): 150 | output_chunks[i] = [ 151 | block_output.shape[i], 152 | ] * len(c) 153 | 154 | block_neg1_shape = [c[-1] for c in current_input.chunks] 155 | block_neg1 = current_input[tuple([slice(0, s) for s in block_neg1_shape])] 156 | block_neg1.attrs.pop("direction", None) 157 | block_input = itk.image_from_xarray(block_neg1) 158 | filt = itk.BinShrinkImageFilter.New(block_input, shrink_factors=shrink_factors) 159 | filt.UpdateOutputInformation() 160 | block_output = filt.GetOutput() 161 | for i, c in enumerate(output_chunks): 162 | output_chunks[i][-1] = block_output.shape[i] 163 | output_chunks[i] = tuple(output_chunks[i]) 164 | output_chunks = tuple(output_chunks) 165 | 166 | downscaled_array = map_blocks( 167 | itk.bin_shrink_image_filter, 168 | current_input.data, 169 | shrink_factors=shrink_factors, 170 | dtype=dtype, 171 | chunks=output_chunks, 172 | ) 173 | downscaled = to_spatial_image( 174 | downscaled_array, 175 | dims=image.dims, 176 | scale=scale, 177 | translation=translation, 178 | name=current_input.name, 179 | axis_names={ 180 | d: image.coords[d].attrs.get("long_name", d) for d in image.dims 181 | }, 182 | axis_units={d: image.coords[d].attrs.get("units", "") for d in image.dims}, 183 | t_coords=image.coords.get("t", None), 184 | c_coords=image.coords.get("c", None), 185 | ) 186 | downscaled = downscaled.chunk(out_chunks) 187 | data_objects[f"scale{factor_index+1}"] = downscaled.to_dataset( 188 | name=image.name, promote_attrs=True 189 | ) 190 | current_input = downscaled 191 | 192 | return data_objects 193 | 194 | 195 | def _downsample_itk_gaussian( 196 | current_input, default_chunks, out_chunks, scale_factors, data_objects, image 197 | ): 198 | import itk 199 | 200 | # Optionally run accelerated smoothing with itk-vkfft 201 | if "VkFFTBackend" in dir(itk): 202 | gaussian_filter_name = "VkDiscreteGaussianImageFilter" 203 | else: 204 | gaussian_filter_name = "DiscreteGaussianImageFilter" 205 | 206 | interpolator_name = "LinearInterpolateImageFunction" 207 | 208 | for factor_index, scale_factor in enumerate(scale_factors): 209 | dim_factors = _dim_scale_factors(image.dims, scale_factor) 210 | current_input = _align_chunks(current_input, default_chunks, dim_factors) 211 | 212 | image_dims: Tuple[str, str, str, str] = ("x", "y", "z", "t") 213 | shrink_factors = [dim_factors[sf] for sf in image_dims if sf in dim_factors] 214 | 215 | # Compute metadata for region splitting 216 | 217 | # Blocks 0, ..., N-2 have the same shape 218 | block_0_input = _get_block(current_input, 0) 219 | # Block N-1 may be smaller than preceding blocks 220 | block_neg1_input = _get_block(current_input, -1) 221 | 222 | # Compute overlap for Gaussian blurring for all blocks 223 | block_0_image = itk.image_from_xarray(block_0_input) 224 | input_spacing = itk.spacing(block_0_image) 225 | sigma_values = _compute_sigma(input_spacing, shrink_factors) 226 | kernel_radius = _compute_itk_gaussian_kernel_radius( 227 | itk.size(block_0_image), sigma_values, shrink_factors 228 | ) 229 | 230 | # Compute output size and spatial metadata for blocks 0, .., N-2 231 | filt = itk.BinShrinkImageFilter.New( 232 | block_0_image, shrink_factors=shrink_factors 233 | ) 234 | filt.UpdateOutputInformation() 235 | block_output = filt.GetOutput() 236 | block_0_output_spacing = block_output.GetSpacing() 237 | block_0_output_origin = block_output.GetOrigin() 238 | 239 | block_0_scale = { 240 | image_dims[i]: s for (i, s) in enumerate(block_0_output_spacing) 241 | } 242 | block_0_translation = { 243 | image_dims[i]: s for (i, s) in enumerate(block_0_output_origin) 244 | } 245 | dtype = block_output.dtype 246 | 247 | computed_size = [ 248 | int(block_len / shrink_factor) 249 | for block_len, shrink_factor in zip(itk.size(block_0_image), shrink_factors) 250 | ] 251 | assert all( 252 | [ 253 | itk.size(block_output)[dim] == computed_size[dim] 254 | for dim in range(block_output.ndim) 255 | ] 256 | ) 257 | output_chunks = list(current_input.chunks) 258 | for i, c in enumerate(output_chunks): 259 | output_chunks[i] = [ 260 | block_output.shape[i], 261 | ] * len(c) 262 | 263 | # Compute output size for block N-1 264 | block_neg1_image = itk.image_from_xarray(block_neg1_input) 265 | filt.SetInput(block_neg1_image) 266 | filt.UpdateOutputInformation() 267 | block_output = filt.GetOutput() 268 | computed_size = [ 269 | int(block_len / shrink_factor) 270 | for block_len, shrink_factor in zip( 271 | itk.size(block_neg1_image), shrink_factors 272 | ) 273 | ] 274 | assert all( 275 | [ 276 | itk.size(block_output)[dim] == computed_size[dim] 277 | for dim in range(block_output.ndim) 278 | ] 279 | ) 280 | for i, c in enumerate(output_chunks): 281 | output_chunks[i][-1] = block_output.shape[i] 282 | output_chunks[i] = tuple(output_chunks[i]) 283 | output_chunks = tuple(output_chunks) 284 | 285 | downscaled_array = map_overlap( 286 | _itk_blur_and_downsample, 287 | current_input.data, 288 | gaussian_filter_name=gaussian_filter_name, 289 | interpolator_name=interpolator_name, 290 | shrink_factors=shrink_factors, 291 | sigma_values=sigma_values, 292 | kernel_radius=kernel_radius, 293 | dtype=dtype, 294 | depth={ 295 | dim: radius for dim, radius in enumerate(np.flip(kernel_radius)) 296 | }, # overlap is in tzyx 297 | boundary="nearest", 298 | trim=False, # Overlapped region is trimmed in blur_and_downsample to output size 299 | ).compute() 300 | 301 | downscaled = to_spatial_image( 302 | downscaled_array, 303 | dims=image.dims, 304 | scale=block_0_scale, 305 | translation=block_0_translation, 306 | name=current_input.name, 307 | axis_names={ 308 | d: image.coords[d].attrs.get("long_name", d) for d in image.dims 309 | }, 310 | axis_units={d: image.coords[d].attrs.get("units", "") for d in image.dims}, 311 | t_coords=image.coords.get("t", None), 312 | c_coords=image.coords.get("c", None), 313 | ) 314 | downscaled = downscaled.chunk(out_chunks) 315 | data_objects[f"scale{factor_index+1}"] = downscaled.to_dataset( 316 | name=image.name, promote_attrs=True 317 | ) 318 | current_input = downscaled 319 | return data_objects 320 | 321 | 322 | def _downsample_itk_label( 323 | current_input, default_chunks, out_chunks, scale_factors, data_objects, image 324 | ): 325 | # Uses the LabelImageGaussianInterpolateImageFunction. More appropriate for integer label images. 326 | import itk 327 | 328 | gaussian_filter_name = "DiscreteGaussianImageFilter" 329 | interpolator_name = "LabelImageGaussianInterpolateImageFunction" 330 | 331 | for factor_index, scale_factor in enumerate(scale_factors): 332 | dim_factors = _dim_scale_factors(image.dims, scale_factor) 333 | current_input = _align_chunks(current_input, default_chunks, dim_factors) 334 | 335 | image_dims: Tuple[str, str, str, str] = ("x", "y", "z", "t") 336 | shrink_factors = [dim_factors[sf] for sf in image_dims if sf in dim_factors] 337 | 338 | # Compute metadata for region splitting 339 | 340 | # Blocks 0, ..., N-2 have the same shape 341 | block_0_input = _get_block(current_input, 0) 342 | # Block N-1 may be smaller than preceding blocks 343 | block_neg1_input = _get_block(current_input, -1) 344 | 345 | # Compute overlap for Gaussian blurring for all blocks 346 | block_0_image = itk.image_from_xarray(block_0_input) 347 | input_spacing = itk.spacing(block_0_image) 348 | sigma_values = _compute_sigma(input_spacing, shrink_factors) 349 | kernel_radius = _compute_itk_gaussian_kernel_radius( 350 | itk.size(block_0_image), sigma_values, shrink_factors 351 | ) 352 | 353 | # Compute output size and spatial metadata for blocks 0, .., N-2 354 | filt = itk.BinShrinkImageFilter.New( 355 | block_0_image, shrink_factors=shrink_factors 356 | ) 357 | filt.UpdateOutputInformation() 358 | block_output = filt.GetOutput() 359 | block_0_output_spacing = block_output.GetSpacing() 360 | block_0_output_origin = block_output.GetOrigin() 361 | 362 | block_0_scale = { 363 | image_dims[i]: s for (i, s) in enumerate(block_0_output_spacing) 364 | } 365 | block_0_translation = { 366 | image_dims[i]: s for (i, s) in enumerate(block_0_output_origin) 367 | } 368 | dtype = block_output.dtype 369 | 370 | computed_size = [ 371 | int(block_len / shrink_factor) 372 | for block_len, shrink_factor in zip(itk.size(block_0_image), shrink_factors) 373 | ] 374 | assert all( 375 | [ 376 | itk.size(block_output)[dim] == computed_size[dim] 377 | for dim in range(block_output.ndim) 378 | ] 379 | ) 380 | output_chunks = list(current_input.chunks) 381 | for i, c in enumerate(output_chunks): 382 | output_chunks[i] = [ 383 | block_output.shape[i], 384 | ] * len(c) 385 | 386 | # Compute output size for block N-1 387 | block_neg1_image = itk.image_from_xarray(block_neg1_input) 388 | filt.SetInput(block_neg1_image) 389 | filt.UpdateOutputInformation() 390 | block_output = filt.GetOutput() 391 | computed_size = [ 392 | int(block_len / shrink_factor) 393 | for block_len, shrink_factor in zip( 394 | itk.size(block_neg1_image), shrink_factors 395 | ) 396 | ] 397 | assert all( 398 | [ 399 | itk.size(block_output)[dim] == computed_size[dim] 400 | for dim in range(block_output.ndim) 401 | ] 402 | ) 403 | for i, c in enumerate(output_chunks): 404 | output_chunks[i][-1] = block_output.shape[i] 405 | output_chunks[i] = tuple(output_chunks[i]) 406 | output_chunks = tuple(output_chunks) 407 | 408 | downscaled_array = map_overlap( 409 | _itk_blur_and_downsample, 410 | current_input.data, 411 | gaussian_filter_name=gaussian_filter_name, 412 | interpolator_name=interpolator_name, 413 | shrink_factors=shrink_factors, 414 | sigma_values=sigma_values, 415 | kernel_radius=kernel_radius, 416 | dtype=dtype, 417 | depth={ 418 | dim: radius for dim, radius in enumerate(np.flip(kernel_radius)) 419 | }, # overlap is in tzyx 420 | boundary="nearest", 421 | trim=False, # Overlapped region is trimmed in blur_and_downsample to output size 422 | ).compute() 423 | 424 | downscaled = to_spatial_image( 425 | downscaled_array, 426 | dims=image.dims, 427 | scale=block_0_scale, 428 | translation=block_0_translation, 429 | name=current_input.name, 430 | axis_names={ 431 | d: image.coords[d].attrs.get("long_name", d) for d in image.dims 432 | }, 433 | axis_units={d: image.coords[d].attrs.get("units", "") for d in image.dims}, 434 | t_coords=image.coords.get("t", None), 435 | c_coords=image.coords.get("c", None), 436 | ) 437 | downscaled = downscaled.chunk(out_chunks) 438 | data_objects[f"scale{factor_index+1}"] = downscaled.to_dataset( 439 | name=image.name, promote_attrs=True 440 | ) 441 | current_input = downscaled 442 | 443 | return data_objects 444 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/_support.py: -------------------------------------------------------------------------------- 1 | _spatial_dims = {"x", "y", "z"} 2 | 3 | 4 | def _dim_scale_factors(dims, scale_factor): 5 | if isinstance(scale_factor, int): 6 | result_scale_factors = { 7 | dim: scale_factor for dim in _spatial_dims.intersection(dims) 8 | } 9 | else: 10 | result_scale_factors = scale_factor 11 | return result_scale_factors 12 | 13 | 14 | def _align_chunks(current_input, default_chunks, dim_factors): 15 | block_0_shape = [c[0] for c in current_input.chunks] 16 | 17 | rechunk = False 18 | aligned_chunks = {} 19 | for dim, factor in dim_factors.items(): 20 | dim_index = current_input.dims.index(dim) 21 | if block_0_shape[dim_index] % factor: 22 | aligned_chunks[dim] = block_0_shape[dim_index] * factor 23 | rechunk = True 24 | else: 25 | aligned_chunks[dim] = default_chunks[dim] 26 | if rechunk: 27 | current_input = current_input.chunk(aligned_chunks) 28 | 29 | return current_input 30 | 31 | 32 | def _compute_sigma(input_spacings, shrink_factors) -> list: 33 | """Compute Gaussian kernel sigma values for resampling to isotropic spacing. 34 | sigma = sqrt((isoSpacing^2 - inputSpacing[0]^2)/(2*sqrt(2*ln(2)))^2) 35 | Ref https://discourse.itk.org/t/resampling-to-isotropic-signal-processing-theory/1403/16 36 | 37 | input spacings: List 38 | Input image physical spacings in xyzt order 39 | 40 | shrink_factors: List 41 | Shrink ratio along each axis in xyzt order 42 | 43 | result: List 44 | Standard deviation of Gaussian kernel along each axis in xyzt order 45 | """ 46 | assert len(input_spacings) == len(shrink_factors) 47 | import math 48 | 49 | output_spacings = [ 50 | input_spacing * shrink 51 | for input_spacing, shrink in zip(input_spacings, shrink_factors) 52 | ] 53 | denominator = (2 * ((2 * math.log(2)) ** 0.5)) ** 2 54 | return [ 55 | ((output_spacing**2 - input_spacing**2) / denominator) ** 0.5 56 | for input_spacing, output_spacing in zip(input_spacings, output_spacings) 57 | ] 58 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/_xarray.py: -------------------------------------------------------------------------------- 1 | from ._support import _align_chunks, _dim_scale_factors 2 | 3 | 4 | def _downsample_xarray_coarsen( 5 | current_input, default_chunks, out_chunks, scale_factors, data_objects, name 6 | ): 7 | for factor_index, scale_factor in enumerate(scale_factors): 8 | dim_factors = _dim_scale_factors(current_input.dims, scale_factor) 9 | current_input = _align_chunks(current_input, default_chunks, dim_factors) 10 | 11 | downscaled = ( 12 | current_input.coarsen(dim=dim_factors, boundary="trim", side="right") 13 | .mean() 14 | .astype(current_input.dtype) 15 | ) 16 | 17 | downscaled = downscaled.chunk(out_chunks) 18 | 19 | data_objects[f"scale{factor_index+1}"] = downscaled.to_dataset( 20 | name=name, promote_attrs=True 21 | ) 22 | current_input = downscaled 23 | 24 | return data_objects 25 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/itk_image_to_multiscale.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Sequence, List, Optional, Dict, Mapping, Any, Tuple 2 | 3 | from spatial_image import to_spatial_image 4 | 5 | from .to_multiscale import to_multiscale, Methods 6 | from xarray import DataTree 7 | 8 | 9 | def itk_image_to_multiscale( 10 | image, 11 | scale_factors: Sequence[Union[Dict[str, int], int]], 12 | anatomical_axes: bool = False, 13 | axis_names: List[str] = None, 14 | axis_units: List[str] = None, 15 | name: List[str] = None, 16 | method: Optional[Methods] = None, 17 | chunks: Optional[ 18 | Union[ 19 | int, 20 | Tuple[int, ...], 21 | Tuple[Tuple[int, ...], ...], 22 | Mapping[Any, Union[None, int, Tuple[int, ...]]], 23 | ] 24 | ] = None, 25 | ) -> DataTree: 26 | import itk 27 | import numpy as np 28 | 29 | if not name: 30 | object_name = image.GetObjectName().strip() 31 | if object_name and not object_name.isspace(): 32 | name = object_name 33 | else: 34 | name = "image" 35 | 36 | # Handle anatomical axes 37 | if anatomical_axes and (axis_names is None): 38 | axis_names = { 39 | "x": "right-left", 40 | "y": "anterior-posterior", 41 | "z": "inferior-superior", 42 | } 43 | 44 | # Orient 3D image so that direction is identity wrt RAI coordinates 45 | image_dimension = image.GetImageDimension() 46 | input_direction = np.array(image.GetDirection()) 47 | oriented_image = image 48 | if ( 49 | anatomical_axes 50 | and image_dimension == 3 51 | and not (np.eye(image_dimension) == input_direction).all() 52 | ): 53 | desired_orientation = itk.SpatialOrientationEnums.ValidCoordinateOrientations_ITK_COORDINATE_ORIENTATION_RAI 54 | oriented_image = itk.orient_image_filter( 55 | image, 56 | use_image_direction=True, 57 | desired_coordinate_orientation=desired_orientation, 58 | ) 59 | 60 | elif anatomical_axes and image_dimension != 3: 61 | raise ValueError( 62 | f"Cannot use anatomical axes for input image of size {image_dimension}" 63 | ) 64 | 65 | image_da = itk.xarray_from_image(oriented_image) 66 | image_da.name = name 67 | 68 | image_dims: Tuple[str, str, str, str] = ( 69 | "x", 70 | "y", 71 | "z", 72 | "t", 73 | ) # ITK dims are in xyzt order 74 | scale = {image_dims[i]: s for (i, s) in enumerate(image.GetSpacing())} 75 | translation = {image_dims[i]: s for (i, s) in enumerate(image.GetOrigin())} 76 | 77 | spatial_image = to_spatial_image( 78 | image_da.data, 79 | dims=image_da.dims, 80 | scale=scale, 81 | translation=translation, 82 | name=name, 83 | axis_names=axis_names, 84 | axis_units=axis_units, 85 | ) 86 | 87 | return to_multiscale(spatial_image, scale_factors, method=method, chunks=chunks) 88 | -------------------------------------------------------------------------------- /multiscale_spatial_image/to_multiscale/to_multiscale.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Sequence, Optional, Dict, Mapping, Any, Tuple 2 | from enum import Enum 3 | 4 | from spatial_image import SpatialImage # type: ignore 5 | 6 | 7 | from xarray import DataTree 8 | 9 | from ._xarray import _downsample_xarray_coarsen 10 | from ._itk import ( 11 | _downsample_itk_bin_shrink, 12 | _downsample_itk_gaussian, 13 | _downsample_itk_label, 14 | ) 15 | from ._dask_image import _downsample_dask_image 16 | from .._docs import inject_docs 17 | 18 | 19 | class Methods(Enum): 20 | XARRAY_COARSEN = "xarray_coarsen" 21 | ITK_BIN_SHRINK = "itk_bin_shrink" 22 | ITK_GAUSSIAN = "itk_gaussian" 23 | ITK_LABEL_GAUSSIAN = "itk_label_gaussian" 24 | DASK_IMAGE_GAUSSIAN = "dask_image_gaussian" 25 | DASK_IMAGE_MODE = "dask_image_mode" 26 | DASK_IMAGE_NEAREST = "dask_image_nearest" 27 | 28 | 29 | @inject_docs(m=Methods) 30 | def to_multiscale( 31 | image: SpatialImage, 32 | scale_factors: Sequence[Union[Dict[str, int], int]], 33 | method: Optional[Methods] = None, 34 | chunks: Optional[ 35 | Union[ 36 | int, 37 | Tuple[int, ...], 38 | Tuple[Tuple[int, ...], ...], 39 | Mapping[Any, Union[None, int, Tuple[int, ...]]], 40 | ] 41 | ] = None, 42 | ) -> DataTree: 43 | """\ 44 | Generate a multiscale representation of a spatial image. 45 | 46 | Parameters 47 | ---------- 48 | 49 | image : SpatialImage 50 | The spatial image from which we generate a multi-scale representation. 51 | 52 | scale_factors : int per scale or dict of spatial dimension int's per scale 53 | Integer scale factors to apply uniformly across all spatial dimension or 54 | along individual spatial dimensions. 55 | Examples: [2, 2] or [{{'x': 2, 'y': 4 }}, {{'x': 5, 'y': 10}}] 56 | 57 | method : multiscale_spatial_image.Methods, optional 58 | Method to reduce the input image. Available methods are the following: 59 | 60 | - `{m.XARRAY_COARSEN.value!r}` - Use xarray coarsen to downsample the image. 61 | - `{m.ITK_BIN_SHRINK.value!r}` - Use ITK BinShrinkImageFilter to downsample the image. 62 | - `{m.ITK_GAUSSIAN.value!r}` - Use ITK GaussianImageFilter to downsample the image. 63 | - `{m.ITK_LABEL_GAUSSIAN.value!r}` - Use ITK LabelGaussianImageFilter to downsample the image. 64 | - `{m.DASK_IMAGE_GAUSSIAN.value!r}` - Use dask-image gaussian_filter to downsample the image. 65 | - `{m.DASK_IMAGE_MODE.value!r}` - Use dask-image mode_filter to downsample the image. 66 | - `{m.DASK_IMAGE_NEAREST.value!r}` - Use dask-image zoom to downsample the image. 67 | 68 | chunks : xarray Dask array chunking specification, optional 69 | Specify the chunking used in each output scale. 70 | 71 | Returns 72 | ------- 73 | 74 | result : DataTree 75 | Multiscale representation. An xarray DataTree where each node is a SpatialImage Dataset 76 | named by the integer scale. Increasing scales are downscaled versions of the input image. 77 | """ 78 | 79 | # IPFS and visualization friendly default chunks 80 | if "z" in image.dims: 81 | default_chunks = 64 82 | else: 83 | default_chunks = 256 84 | default_chunks = {d: default_chunks for d in image.dims} 85 | if "t" in image.dims: 86 | default_chunks["t"] = 1 87 | out_chunks = chunks 88 | if out_chunks is None: 89 | out_chunks = default_chunks 90 | 91 | # check for valid scale factors 92 | current_shape = { 93 | d: s for (d, s) in zip(image.dims, image.shape) if d not in {"t", "c"} 94 | } 95 | 96 | for scale_factor in scale_factors: 97 | if isinstance(scale_factor, dict): 98 | current_shape = { 99 | k: (current_shape[k] / s) for (k, s) in scale_factor.items() 100 | } 101 | elif isinstance(scale_factor, int): 102 | current_shape = {k: (s / scale_factor) for (k, s) in current_shape.items()} 103 | for k, v in current_shape.items(): 104 | if v < 1: 105 | raise ValueError( 106 | f"Scale factor {scale_factor} is incompatible with image shape {image.shape} along dimension `{k}`." 107 | ) 108 | 109 | current_input = image.chunk(out_chunks) 110 | # https://github.com/pydata/xarray/issues/5219 111 | if "chunks" in current_input.encoding: 112 | del current_input.encoding["chunks"] 113 | data_objects = { 114 | "scale0": current_input.to_dataset(name=image.name, promote_attrs=True) 115 | } 116 | 117 | if method is None: 118 | method = Methods.XARRAY_COARSEN 119 | 120 | if method is Methods.XARRAY_COARSEN: 121 | data_objects = _downsample_xarray_coarsen( 122 | current_input, 123 | default_chunks, 124 | out_chunks, 125 | scale_factors, 126 | data_objects, 127 | image.name, 128 | ) 129 | elif method is Methods.ITK_BIN_SHRINK: 130 | data_objects = _downsample_itk_bin_shrink( 131 | current_input, 132 | default_chunks, 133 | out_chunks, 134 | scale_factors, 135 | data_objects, 136 | image, 137 | ) 138 | elif method is Methods.ITK_GAUSSIAN: 139 | data_objects = _downsample_itk_gaussian( 140 | current_input, 141 | default_chunks, 142 | out_chunks, 143 | scale_factors, 144 | data_objects, 145 | image, 146 | ) 147 | elif method is Methods.ITK_LABEL_GAUSSIAN: 148 | data_objects = _downsample_itk_label( 149 | current_input, 150 | default_chunks, 151 | out_chunks, 152 | scale_factors, 153 | data_objects, 154 | image, 155 | ) 156 | elif method is Methods.DASK_IMAGE_GAUSSIAN: 157 | data_objects = _downsample_dask_image( 158 | current_input, 159 | default_chunks, 160 | out_chunks, 161 | scale_factors, 162 | data_objects, 163 | image, 164 | label=False, 165 | ) 166 | elif method is Methods.DASK_IMAGE_NEAREST: 167 | data_objects = _downsample_dask_image( 168 | current_input, 169 | default_chunks, 170 | out_chunks, 171 | scale_factors, 172 | data_objects, 173 | image, 174 | label="nearest", 175 | ) 176 | elif method is Methods.DASK_IMAGE_MODE: 177 | data_objects = _downsample_dask_image( 178 | current_input, 179 | default_chunks, 180 | out_chunks, 181 | scale_factors, 182 | data_objects, 183 | image, 184 | label="mode", 185 | ) 186 | 187 | multiscale = DataTree.from_dict(data_objects) 188 | 189 | return multiscale 190 | -------------------------------------------------------------------------------- /multiscale_spatial_image/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Any 2 | from xarray import Dataset 3 | import functools 4 | 5 | 6 | def skip_non_dimension_nodes( 7 | func: Callable[[Dataset], Dataset], 8 | ) -> Callable[[Dataset], Dataset]: 9 | """Skip nodes in Datatree that do not contain dimensions. 10 | 11 | This function implements the workaround of https://github.com/pydata/xarray/issues/9693. In particular, 12 | we need this because of our DataTree representing multiscale image having a root node that does not have 13 | dimensions. Several functions need to be mapped over the datasets in the datatree that depend on having 14 | dimensions, e.g. a transpose. 15 | """ 16 | 17 | @functools.wraps(func) 18 | def _func(ds: Dataset, *args: Any, **kwargs: Any) -> Dataset: 19 | # check if dimensions are present otherwise return verbatim 20 | if len(ds.dims) == 0: 21 | return ds 22 | return func(ds, *args, **kwargs) 23 | 24 | return _func 25 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "multiscale_spatial_image" 7 | description = "Generate a multiscale, chunked, multi-dimensional spatial image data structure that can be serialized to OME-NGFF." 8 | authors = [{name = "Matt McCormick", email = "matt@mmmccormick.com"}] 9 | readme = "README.md" 10 | license.file = "LICENSE" 11 | home-page = "https://github.com/spatial-image/multiscale-spatial-image" 12 | classifiers = [ 13 | "License :: OSI Approved :: Apache Software License", 14 | "Programming Language :: Python", 15 | 'Intended Audience :: Developers', 16 | 'Intended Audience :: Science/Research', 17 | 'Programming Language :: Python :: 3', 18 | 'Programming Language :: Python :: 3.10', 19 | 'Programming Language :: Python :: 3.11', 20 | 'Programming Language :: Python :: 3.12', 21 | ] 22 | keywords = [ 23 | "itk", 24 | "ngff", 25 | "ome", 26 | "zarr", 27 | "dask", 28 | "imaging", 29 | "visualization" 30 | ] 31 | dynamic = ["version"] 32 | 33 | requires-python = ">=3.10,<3.13" 34 | dependencies = [ 35 | "numpy", 36 | "dask", 37 | "python-dateutil", 38 | "spatial_image>=0.2.1", 39 | "xarray>=2024.10.0", 40 | "zarr", 41 | ] 42 | 43 | [project.urls] 44 | Home = "https://github.com/spatial-image/multiscale-spatial-image" 45 | Source = "https://github.com/spatial-image/multiscale-spatial-image" 46 | Issues = "https://github.com/spatial-image/multiscale-spatial-image" 47 | 48 | [project.optional-dependencies] 49 | test = [ 50 | "itk-filtering>=5.3.0", 51 | "dask_image", 52 | "jsonschema", 53 | "pooch", 54 | "pytest", 55 | "pytest-mypy", 56 | "fsspec", 57 | "ipfsspec", 58 | "urllib3", 59 | "nbmake", 60 | ] 61 | 62 | itk = [ 63 | "itk-filtering>=5.3.0", 64 | ] 65 | dask-image = [ 66 | "dask-image", 67 | ] 68 | imagej = [ 69 | "pyimagej", 70 | ] 71 | notebooks = ["matplotlib>=3.9.2,<4", "ome-types>=0.5.1.post1,<0.6", "tqdm>=4.66.4,<5"] 72 | 73 | [tool.black] 74 | line-length = 88 75 | 76 | [tool.hatch.version] 77 | path = "multiscale_spatial_image/__about__.py" 78 | 79 | [tool.pixi.project] 80 | channels = ["conda-forge"] 81 | platforms = ["win-64", "linux-64", "osx-64", "osx-arm64"] 82 | 83 | [tool.pixi.pypi-dependencies] 84 | multiscale-spatial-image = { path = ".", editable = true } 85 | 86 | [tool.pixi.tasks] 87 | 88 | [tool.pixi.environments] 89 | default = { solve-group = "default" } 90 | test = { features = ["test", "dask-image", "itk"], solve-group = "default" } 91 | notebooks = { features = ["test", "dask-image", "itk", "imagej", "notebooks"], solve-group = "default" } 92 | data = { features = ["data"], no-default-feature = true, solve-group = "default" } 93 | lint = { features = ["lint"], no-default-feature = true, solve-group = "default" } 94 | 95 | [tool.pixi.feature.test.tasks] 96 | test = { cmd = "pytest", description = "Run the test suite" } 97 | 98 | [tool.pixi.feature.test.dependencies] 99 | python = "3.10.*" 100 | 101 | [tool.pixi.feature.notebooks.dependencies] 102 | openjdk = "8.*" 103 | maven = ">=3.9.8,<3.10" 104 | jupyterlab = ">=4.2.4,<4.3" 105 | python = "3.10.*" 106 | 107 | [tool.pixi.feature.notebooks.tasks] 108 | init-imagej = { cmd = "python3 -c \"import imagej; ij = imagej.init('2.15.0'); print(ij.getVersion())\"", description = "Initialize the python imagej installation" } 109 | test-notebooks = { cmd = "pytest --nbmake --nbmake-timeout=3000 examples/ConvertImageioImageResource.ipynb examples/ConvertITKImage.ipynb examples/ConvertPyImageJDataset.ipynb examples/ConvertTiffFile.ipynb examples/HelloMultiscaleSpatialImageWorld.ipynb", depends-on = ["init-imagej"], description = "Test the notebooks" } 110 | dev-notebooks = { cmd = "jupyter lab examples", description = "Start Jupyter Lab" } 111 | 112 | [tool.pixi.feature.data.dependencies] 113 | python = ">=3.10.0,<4" 114 | pooch = ">=1.8.2,<2" 115 | 116 | [tool.pixi.feature.data.tasks] 117 | hash-data = { cmd = "tar cvf ../data.tar * && gzip -9 -f ../data.tar && echo 'New SHA256:' && python3 -c 'import pooch; print(pooch.file_hash(\"../data.tar.gz\"))'", cwd = "test/data", description = "Update the testing data tarball and get its sha256 hash" } 118 | 119 | [tool.pixi.feature.lint.dependencies] 120 | pre-commit = "*" 121 | 122 | [tool.pixi.feature.lint.tasks] 123 | pre-commit-install = { cmd = "pre-commit install", description = "Install pre-commit hooks" } 124 | pre-commit-run = { cmd = "pre-commit run --all", description = "Run pre-commit hooks on all repository files" } 125 | lint = { depends-on = ["pre-commit-run"], description = "Run linters" } 126 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spatial-image/multiscale-spatial-image/765c1b81f4235affa49a5ae673c6fa376ea04dfb/test/__init__.py -------------------------------------------------------------------------------- /test/_data.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | import pooch 5 | from zarr.storage import DirectoryStore 6 | import xarray as xr 7 | 8 | test_data_ipfs_cid = "bafybeiaskr5fxg6rbcwlxl6ibzqhubdleacenrpbnymc6oblwoi7ceqzta" 9 | test_data_sha256 = "507dd779cba007c46ea68a5fe8865cabd5d8a7e00816470faae9195d1f1c3cd1" 10 | 11 | 12 | test_dir = Path(__file__).resolve().parent 13 | extract_dir = "data" 14 | test_data_dir = test_dir / extract_dir 15 | test_data = pooch.create( 16 | path=test_dir, 17 | # base_url=f"https://{test_data_ipfs_cid}.ipfs.w3s.link/ipfs/{test_data_ipfs_cid}/", 18 | base_url="https://github.com/spatial-image/multiscale-spatial-image/releases/download/v2.0.0/", 19 | registry={ 20 | "data.tar.gz": f"sha256:{test_data_sha256}", 21 | }, 22 | retry_if_failed=5, 23 | ) 24 | 25 | 26 | @pytest.fixture 27 | def input_images(): 28 | untar = pooch.Untar(extract_dir=extract_dir) 29 | test_data.fetch("data.tar.gz", processor=untar) 30 | result = {} 31 | 32 | store = DirectoryStore( 33 | test_data_dir / "input" / "cthead1.zarr", dimension_separator="/" 34 | ) 35 | image_ds = xr.open_zarr(store) 36 | image_da = image_ds.cthead1 37 | result["cthead1"] = image_da 38 | 39 | store = DirectoryStore( 40 | test_data_dir / "input" / "small_head.zarr", dimension_separator="/" 41 | ) 42 | image_ds = xr.open_zarr(store) 43 | image_da = image_ds.small_head 44 | result["small_head"] = image_da 45 | 46 | store = DirectoryStore( 47 | test_data_dir / "input" / "2th_cthead1.zarr", 48 | ) 49 | image_ds = xr.open_zarr(store) 50 | image_da = image_ds["2th_cthead1"] 51 | result["2th_cthead1"] = image_da 52 | 53 | return result 54 | 55 | 56 | def verify_against_baseline(dataset_name, baseline_name, multiscale): 57 | store = DirectoryStore( 58 | test_data_dir / f"baseline/{dataset_name}/{baseline_name}", 59 | dimension_separator="/", 60 | ) 61 | dt = xr.open_datatree(store, engine="zarr", mode="r") 62 | xr.testing.assert_equal(dt.ds, multiscale.ds) 63 | for scale in multiscale.children: 64 | xr.testing.assert_equal(dt[scale].ds, multiscale[scale].ds) 65 | 66 | 67 | def store_new_image(dataset_name, baseline_name, multiscale_image): 68 | """Helper method for writing output results to disk 69 | for later upload as test baseline""" 70 | path = test_data_dir / f"baseline/{dataset_name}/{baseline_name}" 71 | store = DirectoryStore( 72 | str(path), 73 | dimension_separator="/", 74 | ) 75 | multiscale_image.to_zarr(store, mode="w") 76 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from spatial_image import to_spatial_image 4 | from multiscale_spatial_image import to_multiscale 5 | 6 | 7 | @pytest.fixture() 8 | def multiscale_data(): 9 | data = np.zeros((3, 200, 200)) 10 | dims = ("c", "y", "x") 11 | scale_factors = [2, 2] 12 | image = to_spatial_image(array_like=data, dims=dims) 13 | return to_multiscale(image, scale_factors=scale_factors) 14 | -------------------------------------------------------------------------------- /test/test_ngff_validation.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Dict 3 | import urllib3 4 | 5 | from referencing import Registry, Resource 6 | from jsonschema import Draft202012Validator 7 | 8 | from xarray import DataTree 9 | 10 | from multiscale_spatial_image import to_multiscale, MultiscaleSpatialImage 11 | from spatial_image import to_spatial_image 12 | import numpy as np 13 | import zarr 14 | 15 | http = urllib3.PoolManager() 16 | 17 | ngff_uri = "https://ngff.openmicroscopy.org" 18 | 19 | 20 | def load_schema(version: str = "0.4", strict: bool = False) -> Dict: 21 | strict_str = "" 22 | if strict: 23 | strict_str = "strict_" 24 | response = http.request( 25 | "GET", f"{ngff_uri}/{version}/schemas/{strict_str}image.schema" 26 | ) 27 | schema = json.loads(response.data.decode()) 28 | return schema 29 | 30 | 31 | def check_valid_ngff(multiscale: DataTree): 32 | store = zarr.storage.MemoryStore(dimension_separator="/") 33 | assert isinstance(multiscale.msi, MultiscaleSpatialImage) 34 | multiscale.msi.to_zarr(store, compute=True) 35 | zarr.convenience.consolidate_metadata(store) 36 | metadata = json.loads(store.get(".zmetadata"))["metadata"] 37 | ngff = metadata[".zattrs"] 38 | 39 | image_schema = load_schema(version="0.4", strict=False) 40 | # strict_image_schema = load_schema(version="0.4", strict=True) 41 | registry = Registry().with_resource( 42 | ngff_uri, resource=Resource.from_contents(image_schema) 43 | ) 44 | validator = Draft202012Validator(image_schema, registry=registry) 45 | # registry_strict = Registry().with_resource(ngff_uri, resource=Resource.from_contents(strict_image_schema)) 46 | # strict_validator = Draft202012Validator(strict_schema, registry=registry_strict) 47 | 48 | validator.validate(ngff) 49 | # Need to add NGFF metadata property 50 | # strict_validator.validate(ngff) 51 | 52 | 53 | def test_y_x_valid_ngff(): 54 | array = np.random.random((32, 16)) 55 | image = to_spatial_image(array) 56 | multiscale = to_multiscale(image, [2, 4]) 57 | 58 | check_valid_ngff(multiscale) 59 | 60 | 61 | def test_z_y_x_valid_ngff(): 62 | array = np.random.random((32, 32, 16)) 63 | image = to_spatial_image(array) 64 | multiscale = to_multiscale(image, [2, 4]) 65 | 66 | check_valid_ngff(multiscale) 67 | 68 | 69 | def test_z_y_x_c_valid_ngff(): 70 | array = np.random.random((32, 32, 16, 3)) 71 | image = to_spatial_image(array) 72 | multiscale = to_multiscale(image, [2, 4]) 73 | 74 | check_valid_ngff(multiscale) 75 | 76 | 77 | def test_t_z_y_x_c_valid_ngff(): 78 | array = np.random.random((2, 32, 32, 16, 3)) 79 | image = to_spatial_image(array) 80 | multiscale = to_multiscale(image, [2, 4]) 81 | 82 | check_valid_ngff(multiscale) 83 | -------------------------------------------------------------------------------- /test/test_operations.py: -------------------------------------------------------------------------------- 1 | def test_transpose(multiscale_data): 2 | multiscale_data = multiscale_data.msi.transpose("y", "x", "c") 3 | 4 | for scale in list(multiscale_data.keys()): 5 | assert multiscale_data[scale]["image"].dims == ("y", "x", "c") 6 | 7 | 8 | def test_reindex_arrays(multiscale_data): 9 | multiscale_data = multiscale_data.msi.reindex_data_arrays({"c": ["r", "g", "b"]}) 10 | for scale in list(multiscale_data.keys()): 11 | assert multiscale_data[scale].c.data.tolist() == ["r", "g", "b"] 12 | 13 | 14 | def test_assign_coords(multiscale_data): 15 | multiscale_data = multiscale_data.msi.assign_coords({"c": ["r", "g", "b"]}) 16 | for scale in list(multiscale_data.keys()): 17 | assert multiscale_data[scale].c.data.tolist() == ["r", "g", "b"] 18 | -------------------------------------------------------------------------------- /test/test_to_multiscale.py: -------------------------------------------------------------------------------- 1 | import xarray as xr 2 | import pytest 3 | from multiscale_spatial_image import to_multiscale 4 | 5 | from ._data import input_images # noqa: F401 6 | 7 | 8 | def test_base_scale(input_images): # noqa: F811 9 | image = input_images["cthead1"] 10 | 11 | multiscale = to_multiscale(image, []) 12 | xr.testing.assert_equal(image, multiscale["scale0"].ds["cthead1"]) 13 | 14 | image = input_images["small_head"] 15 | multiscale = to_multiscale(image, []) 16 | xr.testing.assert_equal(image, multiscale["scale0"].ds["small_head"]) 17 | 18 | with pytest.raises(ValueError): 19 | to_multiscale(image, scale_factors=[500]) 20 | to_multiscale(image, scale_factors=[{"x": 10}, {"x": 500, "y": 10}]) 21 | -------------------------------------------------------------------------------- /test/test_to_multiscale_dask_image.py: -------------------------------------------------------------------------------- 1 | from multiscale_spatial_image import Methods, to_multiscale 2 | 3 | from ._data import verify_against_baseline, input_images # noqa: F401 4 | 5 | 6 | def test_gaussian_isotropic_scale_factors(input_images): # noqa: F811 7 | dataset_name = "cthead1" 8 | image = input_images[dataset_name] 9 | baseline_name = "2_4/DASK_IMAGE_GAUSSIAN" 10 | multiscale = to_multiscale(image, [2, 4], method=Methods.DASK_IMAGE_GAUSSIAN) 11 | verify_against_baseline(dataset_name, baseline_name, multiscale) 12 | 13 | dataset_name = "cthead1" 14 | image = input_images[dataset_name] 15 | baseline_name = "2_3/DASK_IMAGE_GAUSSIAN" 16 | multiscale = to_multiscale(image, [2, 3], method=Methods.DASK_IMAGE_GAUSSIAN) 17 | verify_against_baseline(dataset_name, baseline_name, multiscale) 18 | 19 | dataset_name = "small_head" 20 | image = input_images[dataset_name] 21 | baseline_name = "2_3_4/DASK_IMAGE_GAUSSIAN" 22 | multiscale = to_multiscale(image, [2, 3, 4], method=Methods.DASK_IMAGE_GAUSSIAN) 23 | verify_against_baseline(dataset_name, baseline_name, multiscale) 24 | 25 | 26 | def test_gaussian_anisotropic_scale_factors(input_images): # noqa: F811 27 | dataset_name = "cthead1" 28 | image = input_images[dataset_name] 29 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 30 | multiscale = to_multiscale(image, scale_factors, method=Methods.DASK_IMAGE_GAUSSIAN) 31 | baseline_name = "x2y4_x1y2/DASK_IMAGE_GAUSSIAN" 32 | verify_against_baseline(dataset_name, baseline_name, multiscale) 33 | 34 | dataset_name = "small_head" 35 | image = input_images[dataset_name] 36 | scale_factors = [ 37 | {"x": 3, "y": 2, "z": 4}, 38 | {"x": 2, "y": 2, "z": 2}, 39 | {"x": 1, "y": 2, "z": 1}, 40 | ] 41 | multiscale = to_multiscale(image, scale_factors, method=Methods.DASK_IMAGE_GAUSSIAN) 42 | baseline_name = "x3y2z4_x2y2z2_x1y2z1/DASK_IMAGE_GAUSSIAN" 43 | verify_against_baseline(dataset_name, baseline_name, multiscale) 44 | 45 | 46 | def test_label_nearest_isotropic_scale_factors(input_images): # noqa: F811 47 | dataset_name = "2th_cthead1" 48 | image = input_images[dataset_name] 49 | baseline_name = "2_4/DASK_IMAGE_NEAREST" 50 | multiscale = to_multiscale(image, [2, 4], method=Methods.DASK_IMAGE_NEAREST) 51 | verify_against_baseline(dataset_name, baseline_name, multiscale) 52 | 53 | dataset_name = "2th_cthead1" 54 | image = input_images[dataset_name] 55 | baseline_name = "2_3/DASK_IMAGE_NEAREST" 56 | multiscale = to_multiscale(image, [2, 3], method=Methods.DASK_IMAGE_NEAREST) 57 | verify_against_baseline(dataset_name, baseline_name, multiscale) 58 | 59 | 60 | def test_label_nearest_anisotropic_scale_factors(input_images): # noqa: F811 61 | dataset_name = "2th_cthead1" 62 | image = input_images[dataset_name] 63 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 64 | multiscale = to_multiscale(image, scale_factors, method=Methods.DASK_IMAGE_NEAREST) 65 | baseline_name = "x2y4_x1y2/DASK_IMAGE_NEAREST" 66 | verify_against_baseline(dataset_name, baseline_name, multiscale) 67 | 68 | 69 | def test_label_mode_isotropic_scale_factors(input_images): # noqa: F811 70 | dataset_name = "2th_cthead1" 71 | image = input_images[dataset_name] 72 | baseline_name = "2_4/DASK_IMAGE_MODE" 73 | multiscale = to_multiscale(image, [2, 4], method=Methods.DASK_IMAGE_MODE) 74 | verify_against_baseline(dataset_name, baseline_name, multiscale) 75 | 76 | dataset_name = "2th_cthead1" 77 | image = input_images[dataset_name] 78 | baseline_name = "2_3/DASK_IMAGE_MODE" 79 | multiscale = to_multiscale(image, [2, 3], method=Methods.DASK_IMAGE_MODE) 80 | verify_against_baseline(dataset_name, baseline_name, multiscale) 81 | 82 | 83 | def test_label_mode_anisotropic_scale_factors(input_images): # noqa: F811 84 | dataset_name = "2th_cthead1" 85 | image = input_images[dataset_name] 86 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 87 | multiscale = to_multiscale(image, scale_factors, method=Methods.DASK_IMAGE_MODE) 88 | baseline_name = "x2y4_x1y2/DASK_IMAGE_MODE" 89 | verify_against_baseline(dataset_name, baseline_name, multiscale) 90 | -------------------------------------------------------------------------------- /test/test_to_multiscale_itk.py: -------------------------------------------------------------------------------- 1 | from multiscale_spatial_image import Methods, to_multiscale, itk_image_to_multiscale 2 | 3 | from ._data import verify_against_baseline, input_images # noqa: F401 4 | 5 | 6 | def test_isotropic_scale_factors(input_images): # noqa: F811 7 | dataset_name = "cthead1" 8 | image = input_images[dataset_name] 9 | multiscale = to_multiscale(image, [2, 4], method=Methods.ITK_BIN_SHRINK) 10 | baseline_name = "2_4/ITK_BIN_SHRINK" 11 | verify_against_baseline(dataset_name, baseline_name, multiscale) 12 | 13 | dataset_name = "cthead1" 14 | image = input_images[dataset_name] 15 | multiscale = to_multiscale(image, [2, 3], method=Methods.ITK_BIN_SHRINK) 16 | baseline_name = "2_3/ITK_BIN_SHRINK" 17 | verify_against_baseline(dataset_name, baseline_name, multiscale) 18 | 19 | dataset_name = "small_head" 20 | image = input_images[dataset_name] 21 | multiscale = to_multiscale(image, [2, 3, 4], method=Methods.ITK_BIN_SHRINK) 22 | baseline_name = "2_3_4/ITK_BIN_SHRINK" 23 | verify_against_baseline(dataset_name, baseline_name, multiscale) 24 | 25 | 26 | def test_gaussian_isotropic_scale_factors(input_images): # noqa: F811 27 | dataset_name = "cthead1" 28 | image = input_images[dataset_name] 29 | baseline_name = "2_4/ITK_GAUSSIAN" 30 | multiscale = to_multiscale(image, [2, 4], method=Methods.ITK_GAUSSIAN) 31 | verify_against_baseline(dataset_name, baseline_name, multiscale) 32 | 33 | dataset_name = "cthead1" 34 | image = input_images[dataset_name] 35 | baseline_name = "2_3/ITK_GAUSSIAN" 36 | multiscale = to_multiscale(image, [2, 3], method=Methods.ITK_GAUSSIAN) 37 | verify_against_baseline(dataset_name, baseline_name, multiscale) 38 | 39 | dataset_name = "small_head" 40 | image = input_images[dataset_name] 41 | baseline_name = "2_3_4/ITK_GAUSSIAN" 42 | multiscale = to_multiscale(image, [2, 3, 4], method=Methods.ITK_GAUSSIAN) 43 | verify_against_baseline(dataset_name, baseline_name, multiscale) 44 | 45 | 46 | def test_label_gaussian_isotropic_scale_factors(input_images): # noqa: F811 47 | dataset_name = "2th_cthead1" 48 | image = input_images[dataset_name] 49 | baseline_name = "2_4/ITK_LABEL_GAUSSIAN" 50 | multiscale = to_multiscale(image, [2, 4], method=Methods.ITK_LABEL_GAUSSIAN) 51 | verify_against_baseline(dataset_name, baseline_name, multiscale) 52 | 53 | dataset_name = "2th_cthead1" 54 | image = input_images[dataset_name] 55 | baseline_name = "2_3/ITK_LABEL_GAUSSIAN" 56 | multiscale = to_multiscale(image, [2, 3], method=Methods.ITK_LABEL_GAUSSIAN) 57 | verify_against_baseline(dataset_name, baseline_name, multiscale) 58 | 59 | 60 | def test_anisotropic_scale_factors(input_images): # noqa: F811 61 | dataset_name = "cthead1" 62 | image = input_images[dataset_name] 63 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 64 | multiscale = to_multiscale(image, scale_factors, method=Methods.ITK_BIN_SHRINK) 65 | baseline_name = ("x2y4_x1y2/ITK_BIN_SHRINK",) 66 | verify_against_baseline(dataset_name, baseline_name, multiscale) 67 | 68 | dataset_name = "small_head" 69 | image = input_images[dataset_name] 70 | scale_factors = [ 71 | {"x": 3, "y": 2, "z": 4}, 72 | {"x": 2, "y": 2, "z": 2}, 73 | {"x": 1, "y": 2, "z": 1}, 74 | ] 75 | multiscale = to_multiscale(image, scale_factors, method=Methods.ITK_BIN_SHRINK) 76 | baseline_name = "x3y2z4_x2y2z2_x1y2z1/ITK_BIN_SHRINK" 77 | verify_against_baseline(dataset_name, baseline_name, multiscale) 78 | 79 | 80 | def test_gaussian_anisotropic_scale_factors(input_images): # noqa: F811 81 | dataset_name = "cthead1" 82 | image = input_images[dataset_name] 83 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 84 | multiscale = to_multiscale(image, scale_factors, method=Methods.ITK_GAUSSIAN) 85 | baseline_name = "x2y4_x1y2/ITK_GAUSSIAN" 86 | verify_against_baseline(dataset_name, baseline_name, multiscale) 87 | 88 | dataset_name = "small_head" 89 | image = input_images[dataset_name] 90 | scale_factors = [ 91 | {"x": 3, "y": 2, "z": 4}, 92 | {"x": 2, "y": 2, "z": 2}, 93 | {"x": 1, "y": 2, "z": 1}, 94 | ] 95 | multiscale = to_multiscale(image, scale_factors, method=Methods.ITK_GAUSSIAN) 96 | baseline_name = "x3y2z4_x2y2z2_x1y2z1/ITK_GAUSSIAN" 97 | verify_against_baseline(dataset_name, baseline_name, multiscale) 98 | 99 | 100 | def test_label_gaussian_anisotropic_scale_factors(input_images): # noqa: F811 101 | dataset_name = "2th_cthead1" 102 | image = input_images[dataset_name] 103 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 104 | multiscale = to_multiscale(image, scale_factors, method=Methods.ITK_LABEL_GAUSSIAN) 105 | baseline_name = "x2y4_x1y2/ITK_LABEL_GAUSSIAN" 106 | verify_against_baseline(dataset_name, baseline_name, multiscale) 107 | 108 | 109 | def test_from_itk(input_images): # noqa: F811 110 | import itk 111 | import numpy as np 112 | 113 | # Test 2D with ITK default metadata 114 | dataset_name = "cthead1" 115 | image = itk.image_from_xarray(input_images[dataset_name]) 116 | scale_factors = [4, 2] 117 | multiscale = itk_image_to_multiscale(image, scale_factors) 118 | baseline_name = "4_2/from_itk" 119 | verify_against_baseline(dataset_name, baseline_name, multiscale) 120 | 121 | # Test 2D with nonunit metadata 122 | dataset_name = "cthead1" 123 | image = itk.image_from_xarray(input_images[dataset_name]) 124 | image.SetDirection(np.array([[-1, 0], [0, 1]])) 125 | image.SetSpacing([0.5, 2.0]) 126 | image.SetOrigin([3.0, 5.0]) 127 | 128 | name = "cthead1_nonunit_metadata" 129 | axis_units = {dim: "millimeters" for dim in ("x", "y", "z")} 130 | 131 | scale_factors = [4, 2] 132 | multiscale = itk_image_to_multiscale( 133 | image, 134 | scale_factors=scale_factors, 135 | anatomical_axes=False, 136 | axis_units=axis_units, 137 | name=name, 138 | ) 139 | baseline_name = "4_2/from_itk_nonunit_metadata" 140 | verify_against_baseline(dataset_name, baseline_name, multiscale) 141 | 142 | # Expect error for 2D image with anatomical axes 143 | try: 144 | itk_image_to_multiscale( 145 | image, scale_factors=scale_factors, anatomical_axes=True 146 | ) 147 | raise Exception( 148 | "Failed to catch expected exception for 2D image requesting anatomical axes" 149 | ) 150 | except ValueError: 151 | pass # caught expected exception 152 | 153 | # Test 3D with ITK default metadata 154 | dataset_name = "small_head" 155 | image = itk.image_from_xarray(input_images[dataset_name]) 156 | scale_factors = [4, 2] 157 | multiscale = itk_image_to_multiscale(image, scale_factors) 158 | baseline_name = "4_2/from_itk" 159 | verify_against_baseline(dataset_name, baseline_name, multiscale) 160 | 161 | # Test 3D with additional metadata 162 | dataset_name = "small_head" 163 | image = itk.image_from_xarray(input_images[dataset_name]) 164 | image.SetObjectName( 165 | str(input_images[dataset_name].name) 166 | ) # implicit in image_from_xarray in itk>v5.3rc04 167 | 168 | name = "small_head_anatomical" 169 | axis_units = {dim: "millimeters" for dim in input_images[dataset_name].dims} 170 | 171 | scale_factors = [4, 2] 172 | multiscale = itk_image_to_multiscale( 173 | image, 174 | scale_factors=scale_factors, 175 | anatomical_axes=True, 176 | axis_units=axis_units, 177 | name=name, 178 | ) 179 | baseline_name = "4_2/from_itk_anatomical" 180 | verify_against_baseline(dataset_name, baseline_name, multiscale) 181 | -------------------------------------------------------------------------------- /test/test_to_multiscale_xarray.py: -------------------------------------------------------------------------------- 1 | from multiscale_spatial_image import Methods, to_multiscale 2 | 3 | from ._data import verify_against_baseline, input_images # noqa: F401 4 | 5 | 6 | def test_isotropic_scale_factors(input_images): # noqa: F811 7 | dataset_name = "cthead1" 8 | image = input_images[dataset_name] 9 | baseline_name = "2_4/XARRAY_COARSEN" 10 | multiscale = to_multiscale(image, [2, 4], method=Methods.XARRAY_COARSEN) 11 | verify_against_baseline(dataset_name, baseline_name, multiscale) 12 | 13 | dataset_name = "cthead1" 14 | image = input_images[dataset_name] 15 | baseline_name = "2_3/XARRAY_COARSEN" 16 | multiscale = to_multiscale(image, [2, 3], method=Methods.XARRAY_COARSEN) 17 | verify_against_baseline(dataset_name, baseline_name, multiscale) 18 | 19 | dataset_name = "small_head" 20 | image = input_images[dataset_name] 21 | baseline_name = "2_3_4/XARRAY_COARSEN" 22 | multiscale = to_multiscale(image, [2, 3, 4], method=Methods.XARRAY_COARSEN) 23 | verify_against_baseline(dataset_name, baseline_name, multiscale) 24 | 25 | 26 | def test_anisotropic_scale_factors(input_images): # noqa: F811 27 | dataset_name = "cthead1" 28 | image = input_images[dataset_name] 29 | scale_factors = [{"x": 2, "y": 4}, {"x": 1, "y": 2}] 30 | multiscale = to_multiscale(image, scale_factors, method=Methods.XARRAY_COARSEN) 31 | baseline_name = "x2y4_x1y2/XARRAY_COARSEN" 32 | verify_against_baseline(dataset_name, baseline_name, multiscale) 33 | # Test default method: Methods.XARRAY_COARSEN 34 | multiscale = to_multiscale(image, scale_factors) 35 | verify_against_baseline(dataset_name, baseline_name, multiscale) 36 | 37 | dataset_name = "small_head" 38 | image = input_images[dataset_name] 39 | scale_factors = [ 40 | {"x": 3, "y": 2, "z": 4}, 41 | {"x": 2, "y": 2, "z": 2}, 42 | {"x": 1, "y": 2, "z": 1}, 43 | ] 44 | multiscale = to_multiscale(image, scale_factors, method=Methods.XARRAY_COARSEN) 45 | baseline_name = "x3y2z4_x2y2z2_x1y2z1/XARRAY_COARSEN" 46 | verify_against_baseline(dataset_name, baseline_name, multiscale) 47 | -------------------------------------------------------------------------------- /test/test_utils.py: -------------------------------------------------------------------------------- 1 | from multiscale_spatial_image import skip_non_dimension_nodes 2 | 3 | 4 | def test_skip_nodes(multiscale_data): 5 | @skip_non_dimension_nodes 6 | def transpose(ds, *args, **kwargs): 7 | return ds.transpose(*args, **kwargs) 8 | 9 | for scale in list(multiscale_data.keys()): 10 | assert multiscale_data[scale]["image"].dims == ("c", "y", "x") 11 | 12 | # applying this function without skipping the root node would fail as the root node does not have dimensions. 13 | result = multiscale_data.map_over_datasets(transpose, "y", "x", "c") 14 | for scale in list(result.keys()): 15 | assert result[scale]["image"].dims == ("y", "x", "c") 16 | --------------------------------------------------------------------------------