├── .coveragerc
├── .editorconfig
├── .gitattributes
├── .github
└── workflows
│ ├── publish_to_pypi.yaml
│ └── tox.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── LICENCE.txt
├── MANIFEST.in
├── README.rst
├── docs
├── Makefile
├── api
│ ├── neuroglancer_scripts.accessor.rst
│ ├── neuroglancer_scripts.chunk_encoding.rst
│ ├── neuroglancer_scripts.downscaling.rst
│ ├── neuroglancer_scripts.file_accessor.rst
│ ├── neuroglancer_scripts.http_accessor.rst
│ ├── neuroglancer_scripts.mesh.rst
│ ├── neuroglancer_scripts.precomputed_io.rst
│ ├── neuroglancer_scripts.rst
│ └── neuroglancer_scripts.utils.rst
├── conf.py
├── examples.rst
├── index.rst
├── neuroglancer-info.rst
├── release-notes.rst
├── script-usage.rst
└── serving-data.rst
├── examples
├── BigBrainRelease.2015
│ ├── .gitignore
│ ├── classif.nii.gz
│ ├── info_fullres.json
│ └── mesh_labels.csv
└── JuBrain
│ ├── .gitignore
│ ├── MPM.nii.gz
│ └── colin27T1_seg.nii.gz
├── experimental
├── mesh_to_vtk.py
├── off_to_vtk.py
└── stl_to_precomputed.py
├── pyproject.toml
├── script_tests
└── test_scripts.py
├── setup.cfg
├── src
└── neuroglancer_scripts
│ ├── __init__.py
│ ├── _compressed_segmentation.py
│ ├── _jpeg.py
│ ├── accessor.py
│ ├── chunk_encoding.py
│ ├── data_types.py
│ ├── downscaling.py
│ ├── dyadic_pyramid.py
│ ├── file_accessor.py
│ ├── http_accessor.py
│ ├── mesh.py
│ ├── precomputed_io.py
│ ├── scripts
│ ├── __init__.py
│ ├── compute_scales.py
│ ├── convert_chunks.py
│ ├── generate_scales_info.py
│ ├── link_mesh_fragments.py
│ ├── mesh_to_precomputed.py
│ ├── scale_stats.py
│ ├── slices_to_precomputed.py
│ ├── volume_to_precomputed.py
│ └── volume_to_precomputed_pyramid.py
│ ├── sharded_base.py
│ ├── sharded_file_accessor.py
│ ├── sharded_http_accessor.py
│ ├── transform.py
│ ├── utils.py
│ └── volume_reader.py
├── tox.ini
└── unit_tests
├── test_accessor.py
├── test_chunk_encoding.py
├── test_data_types.py
├── test_downscaling.py
├── test_dyadic_pyramid.py
├── test_file_accessor.py
├── test_http_accessor.py
├── test_mesh.py
├── test_precomputed_io.py
├── test_sharded_base.py
├── test_sharded_file_accessor.py
├── test_sharded_http_accessor.py
├── test_transform.py
├── test_utils.py
└── test_volume_reader.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | # .coveragerc to control coverage.py -*- mode: conf-unix; -*-
2 |
3 | [run]
4 | branch = True
5 |
6 | [report]
7 | # Regexes for lines to exclude from consideration
8 | exclude_lines =
9 | # Have to re-enable the standard pragma
10 | pragma: no cover
11 |
12 | # Don't complain about missing debug-only code:
13 | def __repr__
14 | if self\.debug
15 |
16 | # Don't complain if tests don't hit defensive assertion code:
17 | raise AssertionError
18 | raise NotImplementedError
19 |
20 | # Don't complain if non-runnable code isn't run:
21 | if 0:
22 | if __name__ == .__main__.:
23 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 |
3 | root = true
4 |
5 | # Defaults for every file
6 | [*]
7 | charset = utf-8
8 | end_of_line = lf
9 | indent_style = space
10 | insert_final_newline = true
11 | trim_trailing_whitespace = true
12 |
13 | [*.py]
14 | indent_style = space
15 | indent_size = 4
16 |
17 | [*.json]
18 | indent_size = 2
19 |
20 | # Makefiles must use tabs for indentation
21 | [Makefile]
22 | indent_style = tab
23 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Warn about whitespace errors
2 | * whitespace="trailing-space,tab-in-indent"
3 | Makefile whitespace="trailing-space"
4 |
5 | # Normalize end-of-line for files that Git detects to be text
6 | * text=auto
7 |
8 | # Python files
9 | *.py diff=python
10 |
11 | # Git LFS
12 | *.nii.gz filter=lfs diff=lfs merge=lfs -text
13 |
--------------------------------------------------------------------------------
/.github/workflows/publish_to_pypi.yaml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Publish release to PyPI
5 |
6 | on:
7 | release:
8 | types: [published]
9 |
10 | jobs:
11 | build:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v3.3.0
15 | - name: Set up Python
16 | uses: actions/setup-python@v4.5.0
17 | with:
18 | python-version: '3.x'
19 | - name: Install dependencies
20 | run: |
21 | python -m pip install --upgrade pip
22 | python -m pip install --upgrade build twine
23 | - name: Build the package
24 | run: |
25 | python -m build
26 | python -m twine check dist/*
27 | - name: Publish a Python distribution to PyPI
28 | uses: pypa/gh-action-pypi-publish@v1.6.4
29 | with:
30 | user: __token__
31 | password: ${{ secrets.PYPI_API_TOKEN }}
32 |
--------------------------------------------------------------------------------
/.github/workflows/tox.yaml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | push:
8 | branches:
9 | - master
10 | schedule:
11 | - cron: "34 22 * * SUN" # run weekly
12 | jobs:
13 | build:
14 | strategy:
15 | fail-fast: false
16 | matrix:
17 | python-version: [ '3.12', '3.11', '3.10', '3.9', '3.8', '3.7' ]
18 | runs-on: ['ubuntu-latest']
19 | include:
20 | - runs-on: 'ubuntu-20.04'
21 | python-version: '3.6'
22 | runs-on: ${{ matrix.runs-on }}
23 | steps:
24 | - uses: actions/checkout@v3
25 | with:
26 | lfs: true
27 | - name: Set up Python ${{ matrix.python-version }}
28 | uses: actions/setup-python@v4
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 | - name: PIP cache
32 | uses: actions/cache@v2
33 | with:
34 | path: ~/.cache/pip
35 | key: ${{ runner.os }}-pip-python${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
36 | restore-keys: |
37 | ${{ runner.os }}-pip-
38 | - name: Install dependencies
39 | run: |
40 | python -m pip install --upgrade pip
41 | pip install tox tox-gh-actions
42 | - name: Test with tox
43 | run: |
44 | tox
45 |
46 | # 2024-07-02: disable CodeCov (temporarily?), uploads do not work even with a token
47 | # Code coverage is run on Python 3.10, see tox.ini
48 | #- if: ${{ matrix.python-version == '3.10' }}
49 | # uses: codecov/codecov-action@v4
50 | # with:
51 | # token: ${{ secrets.CODECOV_TOKEN }}
52 | # fail_ci_if_error: true # optional (default = false)
53 | # verbose: true # optional (default = false)
54 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 |
5 | # C extensions
6 | *.so
7 |
8 | # Distribution / packaging
9 | .Python
10 | build/
11 | develop-eggs/
12 | dist/
13 | downloads/
14 | eggs/
15 | .eggs/
16 | lib/
17 | lib64/
18 | parts/
19 | sdist/
20 | var/
21 | wheels/
22 | *.egg-info/
23 | .installed.cfg
24 | *.egg
25 | MANIFEST
26 |
27 | # Installer logs
28 | pip-log.txt
29 | pip-delete-this-directory.txt
30 |
31 | # Unit test / coverage reports
32 | .pytest_cache
33 | htmlcov/
34 | .tox
35 | .coverage
36 | .coverage.*
37 | .cache
38 | coverage.xml
39 | *.cover
40 |
41 | # Sphinx documentation
42 | docs/_build/
43 |
44 | # Environments
45 | .env
46 | .venv
47 | env
48 | venv
49 | ENV
50 | env.bak
51 | venv.bak
52 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.6.0
4 | hooks:
5 | - id: check-added-large-files
6 | - id: check-case-conflict
7 | - id: check-executables-have-shebangs
8 | - id: check-json
9 | - id: check-merge-conflict
10 | - id: check-symlinks
11 | - id: check-toml
12 | - id: check-yaml
13 | - id: debug-statements
14 | - id: end-of-file-fixer
15 | - id: name-tests-test
16 | - id: trailing-whitespace
17 |
18 | - repo: https://github.com/astral-sh/ruff-pre-commit
19 | # Ruff version.
20 | rev: v0.5.0
21 | hooks:
22 | # Run the linter.
23 | - id: ruff
24 |
25 | - repo: https://github.com/mgedmin/check-manifest
26 | rev: "0.49"
27 | hooks:
28 | - id: check-manifest
29 |
30 | default_language_version:
31 | # force all unspecified python hooks to run python3
32 | python: python3
33 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | version: 2
6 |
7 | build:
8 | os: ubuntu-20.04
9 | tools:
10 | python: "3.9"
11 |
12 | sphinx:
13 | configuration: docs/conf.py
14 |
15 | formats:
16 | - epub
17 | - pdf
18 |
19 | python:
20 | install:
21 | - method: pip
22 | path: .
23 | extra_requirements:
24 | - docs
25 |
--------------------------------------------------------------------------------
/LICENCE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016-2023 Forschungszentrum Juelich GmbH
4 | Copyright (c) 2018-2023 CEA
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENCE.txt
2 | include .coveragerc
3 | exclude .editorconfig
4 | include tox.ini
5 | exclude .pre-commit-config.yaml
6 | exclude .readthedocs.yaml
7 |
8 | prune examples
9 | prune experimental
10 |
11 | recursive-include docs *.rst
12 | include docs/conf.py
13 | include docs/Makefile
14 | prune docs/_build
15 |
16 | recursive-include script_tests *.py
17 | recursive-include unit_tests *.py
18 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | neuroglancer-scripts
2 | ====================
3 |
4 | Tools for converting volumetric images and surface meshes to the pre-computed format of Neuroglancer_.
5 |
6 |
7 | .. image:: https://img.shields.io/pypi/v/neuroglancer-scripts.svg
8 | :target: https://pypi.python.org/pypi/neuroglancer-scripts
9 | :alt: PyPI Version
10 |
11 | .. image:: https://github.com/HumanBrainProject/neuroglancer-scripts/actions/workflows/tox.yaml/badge.svg
12 | :target: https://github.com/HumanBrainProject/neuroglancer-scripts/actions/workflows/tox.yaml
13 | :alt: Build Status
14 |
15 | .. image:: https://codecov.io/gh/HumanBrainProject/neuroglancer-scripts/branch/master/graph/badge.svg
16 | :target: https://codecov.io/gh/HumanBrainProject/neuroglancer-scripts
17 | :alt: Coverage Status
18 |
19 | .. image:: https://readthedocs.org/projects/neuroglancer-scripts/badge/?version=latest
20 | :target: http://neuroglancer-scripts.readthedocs.io/en/latest/?badge=latest
21 | :alt: Documentation Status
22 |
23 |
24 | Installation
25 | ------------
26 |
27 | The easiest way to install the latest stable version of neuroglancer-scripts is
28 | through ``pip``. Using a virtual environment is recommended:
29 |
30 | .. code-block:: shell
31 |
32 | python3 -m venv venv/
33 | . venv/bin/activate
34 | pip install neuroglancer-scripts
35 |
36 |
37 | Usage
38 | -----
39 |
40 | See the `documentation `_.
41 |
42 |
43 | Development
44 | -----------
45 |
46 | The code is hosted on https://github.com/HumanBrainProject/neuroglancer-scripts.
47 |
48 | Useful commands for development:
49 |
50 | .. code-block:: shell
51 |
52 | git clone https://github.com/HumanBrainProject/neuroglancer-scripts.git
53 |
54 | # Install in a virtual environment
55 | cd neuroglancer-scripts
56 | python3 -m venv venv/
57 | . venv/bin/activate
58 | pip install -e .[dev]
59 |
60 | # Tests
61 | pytest # run tests
62 | pytest --cov=neuroglancer_scripts --cov-report=html # detailed test coverage report
63 | tox # run tests under all supported Python versions
64 |
65 | # Please install pre-commit if you intend to contribute
66 | pre-commit install # install the pre-commit hook
67 |
68 |
69 | Contributing
70 | ============
71 |
72 | This repository uses `pre-commit`_ to ensure that all committed code follows minimal quality standards. Please install it and configure it to run as a pre-commit hook in your local repository (see above). Also, please note that the code quality checks may need a more recent version of Python than that required by neuroglancer_scripts itself (> 3.8 at the time of this writing).
73 |
74 |
75 | .. _Neuroglancer: https://github.com/google/neuroglancer
76 | .. _pre-commit: https://pre-commit.com/
77 |
78 |
79 | Acknowledgments
80 | ===============
81 |
82 | `cloud-volume `_ (BSD 3-Clause licensed) for compressed morton code and shard/minishard mask implementation.
83 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = neuroglancer-scripts
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.accessor.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.accessor module
2 | ======================================
3 |
4 | .. automodule:: neuroglancer_scripts.accessor
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.chunk_encoding.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.chunk\_encoding module
2 | =============================================
3 |
4 | .. automodule:: neuroglancer_scripts.chunk_encoding
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.downscaling.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.downscaling module
2 | =========================================
3 |
4 | .. automodule:: neuroglancer_scripts.downscaling
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.file_accessor.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.file\_accessor module
2 | ============================================
3 |
4 | .. automodule:: neuroglancer_scripts.file_accessor
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.http_accessor.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.http\_accessor module
2 | ============================================
3 |
4 | .. automodule:: neuroglancer_scripts.http_accessor
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.mesh.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.mesh module
2 | ============================================
3 |
4 | .. automodule:: neuroglancer_scripts.mesh
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.precomputed_io.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.precomputed\_io module
2 | =============================================
3 |
4 | .. automodule:: neuroglancer_scripts.precomputed_io
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.rst:
--------------------------------------------------------------------------------
1 | .. _python-api:
2 |
3 | Python API
4 | ==========
5 |
6 | neuroglancer\_scripts package
7 | -----------------------------
8 |
9 | .. automodule:: neuroglancer_scripts
10 | :members:
11 | :undoc-members:
12 | :show-inheritance:
13 |
14 | Submodules
15 | ----------
16 |
17 | .. toctree::
18 |
19 | neuroglancer_scripts.accessor
20 | neuroglancer_scripts.chunk_encoding
21 | neuroglancer_scripts.downscaling
22 | neuroglancer_scripts.file_accessor
23 | neuroglancer_scripts.http_accessor
24 | neuroglancer_scripts.mesh
25 | neuroglancer_scripts.precomputed_io
26 | neuroglancer_scripts.utils
27 |
--------------------------------------------------------------------------------
/docs/api/neuroglancer_scripts.utils.rst:
--------------------------------------------------------------------------------
1 | neuroglancer\_scripts\.utils module
2 | ===================================
3 |
4 | .. automodule:: neuroglancer_scripts.utils
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #
3 | # neuroglancer-scripts documentation build configuration file, created by
4 | # sphinx-quickstart on Fri Feb 2 15:05:24 2018.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | # If extensions (or modules to document with autodoc) are in another directory,
16 | # add these directories to sys.path here. If the directory is relative to the
17 | # documentation root, use os.path.abspath to make it absolute, like shown here.
18 | #
19 | import os
20 | import sys
21 |
22 | sys.path.insert(0, os.path.abspath('../src/'))
23 |
24 | import neuroglancer_scripts # noqa: E402
25 |
26 | # -- General configuration ------------------------------------------------
27 |
28 | # If your documentation needs a minimal Sphinx version, state it here.
29 | #
30 | # needs_sphinx = '1.0'
31 |
32 | # Add any Sphinx extension module names here, as strings. They can be
33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 | # ones.
35 | extensions = [
36 | 'sphinx.ext.autodoc',
37 | 'sphinx.ext.todo',
38 | 'sphinx.ext.coverage',
39 | 'sphinx.ext.viewcode',
40 | 'sphinx.ext.githubpages',
41 | 'sphinx.ext.intersphinx',
42 | ]
43 |
44 | intersphinx_mapping = {
45 | 'python': ('https://docs.python.org/3', None),
46 | 'numpy': ('https://numpy.org/doc/stable/', None),
47 | }
48 |
49 | # Add any paths that contain templates here, relative to this directory.
50 | templates_path = ['_templates']
51 |
52 | # The suffix(es) of source filenames.
53 | # You can specify multiple suffix as a list of string:
54 | #
55 | # source_suffix = ['.rst', '.md']
56 | source_suffix = '.rst'
57 |
58 | # The master toctree document.
59 | master_doc = 'index'
60 |
61 | # General information about the project.
62 | project = 'neuroglancer-scripts'
63 | copyright = '2016–2018, Forschungszentrum Jülich GmbH'
64 | author = 'Yann Leprince'
65 |
66 | # The version info for the project you're documenting, acts as replacement for
67 | # |version| and |release|, also used in various other places throughout the
68 | # built documents.
69 | #
70 | # The full version, including alpha/beta/rc tags.
71 | release = neuroglancer_scripts.__version__
72 | # The short X.Y version.
73 | version = ".".join(release.split(".", maxsplit=2)[:2])
74 |
75 | # The language for content autogenerated by Sphinx. Refer to documentation
76 | # for a list of supported languages.
77 | #
78 | # This is also used if you do content translation via gettext catalogs.
79 | # Usually you set "language" from the command line for these cases.
80 | language = 'en'
81 |
82 | # List of patterns, relative to source directory, that match files and
83 | # directories to ignore when looking for source files.
84 | # This patterns also effect to html_static_path and html_extra_path
85 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
86 |
87 | # The name of the Pygments (syntax highlighting) style to use.
88 | pygments_style = 'sphinx'
89 |
90 | # If true, `todo` and `todoList` produce output, else they produce nothing.
91 | todo_include_todos = True
92 |
93 |
94 | # -- Options for HTML output ----------------------------------------------
95 |
96 | # The theme to use for HTML and HTML Help pages. See the documentation for
97 | # a list of builtin themes.
98 | #
99 | html_theme = 'alabaster'
100 |
101 | # Theme options are theme-specific and customize the look and feel of a theme
102 | # further. For a list of options available for each theme, see the
103 | # documentation.
104 | #
105 | # html_theme_options = {}
106 |
107 | # Add any paths that contain custom static files (such as style sheets) here,
108 | # relative to this directory. They are copied after the builtin static files,
109 | # so a file named "default.css" will overwrite the builtin "default.css".
110 | #
111 | # html_static_path = ['_static']
112 |
113 | # Custom sidebar templates, must be a dictionary that maps document names
114 | # to template names.
115 | #
116 | # This is required for the alabaster theme
117 | # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
118 | html_sidebars = {
119 | '**': [
120 | 'about.html',
121 | 'navigation.html',
122 | 'relations.html', # needs 'show_related': True theme option to display
123 | 'searchbox.html',
124 | 'donate.html',
125 | ]
126 | }
127 |
128 |
129 | # -- Options for HTMLHelp output ------------------------------------------
130 |
131 | # Output file base name for HTML help builder.
132 | htmlhelp_basename = 'neuroglancer-scriptsdoc'
133 |
134 |
135 | # -- Options for LaTeX output ---------------------------------------------
136 |
137 | latex_elements = {
138 | # The paper size ('letterpaper' or 'a4paper').
139 | #
140 | # 'papersize': 'letterpaper',
141 |
142 | # The font size ('10pt', '11pt' or '12pt').
143 | #
144 | # 'pointsize': '10pt',
145 |
146 | # Additional stuff for the LaTeX preamble.
147 | #
148 | # 'preamble': '',
149 |
150 | # Latex figure (float) alignment
151 | #
152 | # 'figure_align': 'htbp',
153 | }
154 |
155 | # Grouping the document tree into LaTeX files. List of tuples
156 | # (source start file, target name, title,
157 | # author, documentclass [howto, manual, or own class]).
158 | latex_documents = [
159 | (master_doc, 'neuroglancer-scripts.tex',
160 | 'neuroglancer-scripts Documentation',
161 | 'Yann Leprince', 'manual'),
162 | ]
163 |
164 |
165 | # -- Options for manual page output ---------------------------------------
166 |
167 | # One entry per manual page. List of tuples
168 | # (source start file, name, description, authors, manual section).
169 | man_pages = [
170 | (master_doc, 'neuroglancer-scripts', 'neuroglancer-scripts Documentation',
171 | [author], 1)
172 | ]
173 |
174 |
175 | # -- Options for Texinfo output -------------------------------------------
176 |
177 | # Grouping the document tree into Texinfo files. List of tuples
178 | # (source start file, target name, title, author,
179 | # dir menu entry, description, category)
180 | texinfo_documents = [
181 | (master_doc, 'neuroglancer-scripts', 'neuroglancer-scripts Documentation',
182 | author, 'neuroglancer-scripts', 'One line description of project.',
183 | 'Miscellaneous'),
184 | ]
185 |
--------------------------------------------------------------------------------
/docs/examples.rst:
--------------------------------------------------------------------------------
1 | .. _Examples:
2 |
3 | Examples
4 | ========
5 |
6 | .. _JuBrain:
7 |
8 | Conversion of JuBrain
9 | ---------------------
10 |
11 | In the ``examples/JuBrain`` directory of the source distribution, you will find
12 | two Nifti files based on the JuBrain human brain atlas, as published in version
13 | 2.2c of the `SPM Anatomy Toolbox
14 | `_.
15 | Note that you need to use `git-lfs `_ in order to
16 | see the contents of the NIfTI files (otherwise you can download them `from the
17 | repository on Github
18 | `_.)
19 |
20 | Conversion of the grey-level template image (MNI Colin27 T1 MRI)
21 | ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
22 |
23 | .. code-block:: sh
24 |
25 | volume-to-precomputed \
26 | --generate-info \
27 | colin27T1_seg.nii.gz \
28 | colin27T1_seg/
29 |
30 | At this point, you need to edit ``colin27T1_seg/info_fullres.json`` to set
31 | ``"data_type": "uint8"``. This is needed because ``colin27T1_seg.nii.gz`` uses
32 | a peculiar encoding, with slope and intercept set in the NIfTI header, even
33 | though only integers between 0 and 255 are encoded.
34 |
35 | .. code-block:: sh
36 |
37 | generate-scales-info colin27T1_seg/info_fullres.json colin27T1_seg/
38 | volume-to-precomputed colin27T1_seg.nii.gz colin27T1_seg/
39 | compute-scales colin27T1_seg/
40 |
41 |
42 | Conversion of the Maximum Probability Map
43 | +++++++++++++++++++++++++++++++++++++++++
44 |
45 | .. code-block:: sh
46 |
47 | volume-to-precomputed --generate-info MPM.nii.gz MPM/
48 | generate-scales-info \
49 | --type=segmentation \
50 | --encoding=compressed_segmentation \
51 | MPM/info_fullres.json \
52 | MPM/
53 | volume-to-precomputed MPM.nii.gz MPM/
54 | compute-scales --downscaling-method=majority MPM/
55 |
56 |
57 | .. _BigBrain:
58 |
59 | Conversion of BigBrain
60 | ----------------------
61 |
62 | BigBrain is a very large image (6572 × 7404 × 5711 voxels) reconstructed from
63 | 7404 serial coronal section of a human brain, with a resolution of about
64 | 20 microns.
65 |
66 | 1. Download slices from ftp://bigbrain.loris.ca/BigBrainRelease.2015/2D_Final_Sections/Coronal/Png/Full_Resolution/
67 |
68 | 2. Create ``info_fullres.json`` with the appropriate metadata:
69 |
70 | .. code-block:: json
71 |
72 | {
73 | "type": "image",
74 | "data_type": "uint8",
75 | "num_channels": 1,
76 | "scales": [
77 | {
78 | "chunk_sizes": [],
79 | "encoding": "raw",
80 | "key": "full",
81 | "resolution": [21166.6666666666666, 20000, 21166.6666666666666],
82 | "size": [6572, 7404, 5711],
83 | "voxel_offset": [0, 0, 0]
84 | }
85 | ]
86 | }
87 |
88 | 3. Create raw chunks
89 |
90 | .. code-block:: sh
91 |
92 | generate-scales-info info_fullres.json 8bit/
93 | slices-to-precomputed --input-orientation RIA 8bit/
94 | compute-scales --outside-value=255 8bit/
95 |
96 | 4. Optionally, convert raw chunks to JPEG:
97 |
98 | .. code-block:: sh
99 |
100 | generate-scales-info --encoding=jpeg 8bit/info jpeg/
101 | convert-chunks --jpeg-plane=xz 8bit/ jpeg/
102 |
103 | 5. Convert the segmentation volume
104 | (``examples/BigBrainRelease.2015/classif.nii.gz`` in the source
105 | distribution, this is a voxelized version of the meshes below).
106 |
107 | .. code-block:: sh
108 |
109 | volume-to-precomputed --generate-info classif.nii.gz classif/
110 | generate-scales-info \
111 | --encoding=compressed_segmentation \
112 | classif/info_fullres.json \
113 | classif/
114 | volume-to-precomputed --load-full-volume classif.nii.gz classif/
115 | compute-scales --downscaling-method=majority classif/
116 |
117 | 6. Add the cortical meshes to the segmentation (downloaded from
118 | ftp://bigbrain.loris.ca/BigBrainRelease.2015/3D_Surfaces/Apr7_2016/gii/).
119 | The meshes are displayed in the 3D view.
120 |
121 | Finally, convert the Gifti meshes to mesh fragments in pre-computed format,
122 | and create the JSON files that Neuroglancer needs in order to find the mesh
123 | fragments. The coordinate transformation is needed for two reasons:
124 |
125 | - the translation is the inverted transform of the classification volume (as
126 | output by ``volume-to-precomputed``, it is needed to bring the mesh into
127 | alignment with the volume;
128 |
129 | - the -1 coefficients on the diagonal are needed because the X and Y axes
130 | are inverted in these Gifti files.
131 |
132 | .. code-block:: sh
133 |
134 | mesh-to-precomputed \
135 | --coord-transform=-1,0,0,70.7666,0,-1,0,73.01,0,0,1,58.8777 \
136 | --mesh-name=grey-left \
137 | gray_left_327680.gii \
138 | classif/
139 | mesh-to-precomputed \
140 | --coord-transform=-1,0,0,70.7666,0,-1,0,73.01,0,0,1,58.8777 \
141 | --mesh-name=grey-right \
142 | gray_right_327680.gii \
143 | classif/
144 | mesh-to-precomputed \
145 | --coord-transform=-1,0,0,70.7666,0,-1,0,73.01,0,0,1,58.8777 \
146 | --mesh-name=white-left \
147 | white_left_327680.gii \
148 | classif/
149 | mesh-to-precomputed \
150 | --coord-transform=-1,0,0,70.7666,0,-1,0,73.01,0,0,1,58.8777 \
151 | --mesh-name=white-right \
152 | white_right_327680.gii \
153 | classif/
154 | link-mesh-fragments --no-colon-suffix mesh_labels.csv classif/
155 |
156 |
157 | Conversion of the grey-level template image (sharded precomputed)
158 | +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
159 |
160 | .. code-block:: sh
161 |
162 | volume-to-precomputed \
163 | --generate-info \
164 | --sharding 1,1,0 \
165 | colin27T1_seg.nii.gz \
166 | colin27T1_seg_sharded
167 |
168 | At this point, you need to edit ``colin27T1_seg_sharded/info_fullres.json`` to set
169 | ``"data_type": "uint8"``. This is needed because ``colin27T1_seg.nii.gz`` uses
170 | a peculiar encoding, with slope and intercept set in the NIfTI header, even
171 | though only integers between 0 and 255 are encoded.
172 |
173 | .. code-block:: sh
174 |
175 | generate-scales-info colin27T1_seg_sharded/info_fullres.json colin27T1_seg_sharded/
176 | volume-to-precomputed \
177 | --sharding 1,1,0 \
178 | colin27T1_seg.nii.gz \
179 | colin27T1_seg_sharded/
180 | compute-scales colin27T1_seg_sharded/
181 |
182 |
183 | .. _Conversion of Big Brain to sharded precomputed format:
184 |
185 | Big Brain (20um) has been converted to neuroglancer precomputed format, and
186 | accessible at
187 | https://neuroglancer.humanbrainproject.eu/precomputed/BigBrainRelease.2015/8bit.
188 | Using this as the source volume, a sharded volume will be created.
189 |
190 | .. code-block:: sh
191 |
192 | mkdir sharded_bigbrain/
193 | curl --output sharded_bigbrain/info \
194 | https://neuroglancer.humanbrainproject.eu/precomputed/BigBrainRelease.2015/8bit/info
195 |
196 | At this point, sharded_bigbrain/info was edited to contain the desired sharding
197 | specification. For a smaller scale test run, 20um and 40um scales can be
198 | removed.
199 |
200 | .. code-block:: diff
201 |
202 | {
203 | "type": "image",
204 | "data_type": "uint8",
205 | "num_channels": 1,
206 | "scales": [
207 | {
208 | "chunk_sizes": [[64,64,64]],
209 | "encoding": "raw",
210 | "key": "20um",
211 | "resolution": [21166.6666666666666, 20000, 21166.6666666666666],
212 | "size": [6572, 7404, 5711],
213 | - "voxel_offset": [0, 0, 0]
214 | + "voxel_offset": [0, 0, 0],
215 | + "sharding": {
216 | + "@type": "neuroglancer_uint64_sharded_v1",
217 | + "data_encoding": "gzip",
218 | + "hash": "identity",
219 | + "minishard_bits": 2,
220 | + "minishard_index_encoding": "gzip",
221 | + "preshift_bits": 0,
222 | + "shard_bits": 2
223 | + }
224 | },
225 | // ...truncated for brevity
226 | ]
227 | }
228 |
229 | Start the conversion process.
230 |
231 | .. code-block:: sh
232 |
233 | convert-chunks \
234 | https://neuroglancer.humanbrainproject.eu/precomputed/BigBrainRelease.2015/8bit \
235 | ./sharded_bigbrain/
236 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. neuroglancer-scripts documentation master file, created by
2 | sphinx-quickstart on Fri Feb 2 15:05:24 2018.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Documentation of neuroglancer-scripts
7 | =====================================
8 |
9 | .. image:: https://img.shields.io/pypi/v/neuroglancer-scripts.svg
10 | :target: https://pypi.python.org/pypi/neuroglancer-scripts
11 | :alt: PyPI Version
12 |
13 | .. image:: https://github.com/HumanBrainProject/neuroglancer-scripts/actions/workflows/tox.yaml/badge.svg
14 | :target: https://github.com/HumanBrainProject/neuroglancer-scripts/actions/workflows/tox.yaml
15 | :alt: Build Status
16 |
17 | .. image:: https://codecov.io/gh/HumanBrainProject/neuroglancer-scripts/branch/master/graph/badge.svg
18 | :target: https://codecov.io/gh/HumanBrainProject/neuroglancer-scripts
19 | :alt: Coverage Status
20 |
21 | .. image:: https://readthedocs.org/projects/neuroglancer-scripts/badge/?version=latest
22 | :target: http://neuroglancer-scripts.readthedocs.io/en/latest/?badge=latest
23 | :alt: Documentation Status
24 |
25 |
26 | Installation
27 | ------------
28 |
29 | The easiest way to install the latest stable version of neuroglancer-scripts is
30 | through ``pip``. Using a virtual environment is recommended:
31 |
32 | .. code-block:: shell
33 |
34 | python3 -m venv venv/
35 | . venv/bin/activate
36 | pip install neuroglancer-scripts
37 |
38 |
39 | Usage
40 | -----
41 |
42 | The ``neuroglancer-scripts`` package provides :ref:`command-line tools
43 | `, and a :ref:`Python API `, for converting
44 | volumetric images and surface meshes to formats used by Neuroglancer_.
45 |
46 |
47 | Table of contents
48 | -----------------
49 |
50 | .. toctree::
51 | :maxdepth: 2
52 |
53 | script-usage
54 | serving-data
55 | neuroglancer-info
56 | examples
57 | api/neuroglancer_scripts
58 | release-notes
59 |
60 |
61 | Indices and tables
62 | ==================
63 |
64 | * :ref:`genindex`
65 | * :ref:`modindex`
66 | * :ref:`search`
67 |
68 | .. _Neuroglancer: https://github.com/google/neuroglancer
69 |
--------------------------------------------------------------------------------
/docs/neuroglancer-info.rst:
--------------------------------------------------------------------------------
1 | Using the converted data in Neuroglancer
2 | ========================================
3 |
4 | .. _info:
5 |
6 | The Neuroglancer *info* file
7 | ----------------------------
8 |
9 | See the `info JSON file specification
10 | `_
11 | from Neuroglancer.
12 |
13 |
14 | .. _half_voxel_shift:
15 |
16 | Different conventions for coordinate transformations
17 | ----------------------------------------------------
18 |
19 | Beware that Neuroglancer departs from the NIfTI convention in associating
20 | physical coordinates to voxels: Neuroglancer associates physical coordinates to
21 | the *corner* of a voxel, whereas NIfTI specifies that they refer to the
22 | *centre* of a voxel. Therefore, **images will be offset by half a voxel
23 | relative to meshes** if you do not compensate for this offset.
24 |
25 | For standalone meshes, this offset can be compensated for by using the
26 | ``transform`` URL parameter. For pre-computed segmentation meshes however, there
27 | is no way of specifying a different ``transform`` for the image and the
28 | associated meshes: the offset must be applied to the vertex coordinates. This
29 | can be achieved by using the ``--coord-transform`` option.
30 |
31 | Please note that if you want to display images correctly with respect to
32 | physical coordinates (e.g. stereotaxic coordinates), you have to use the
33 | ``transform`` parameter as well. The ``transform.json`` which is output by
34 | ``volume-to-precomputed`` *does* take the half-voxel shift into account.
35 |
--------------------------------------------------------------------------------
/docs/release-notes.rst:
--------------------------------------------------------------------------------
1 | Release notes
2 | =============
3 |
4 | 1.2.0 (2 July 2024)
5 | --------------------
6 |
7 | New features
8 | ~~~~~~~~~~~~
9 |
10 | - Add support for the sharded precomputed format. Many thanks to Xiao Gui for implementing this significant new feature in `PR #35 `_.
11 |
12 |
13 | Other improvements
14 | ~~~~~~~~~~~~~~~~~~
15 |
16 | - Performance of the HTTP accessor was greatly improved by reusing a HTTP session. Thanks to Xiao Gui for implementing this in `PR #33 `_.
17 |
18 |
19 | Python version support
20 | ~~~~~~~~~~~~~~~~~~~~~~
21 |
22 | - Add support and tests for Python 3.12.
23 |
24 | - Drop support for Python 3.5.
25 |
26 |
27 | Bug fixes
28 | ~~~~~~~~~
29 |
30 | - Fix incompatibilities with NumPy 2.0.0, which notably caused incorrect data to be encoded when using the `compressed_segmentation` encoding.
31 |
32 |
33 | 1.1.0 (7 March 2023)
34 | --------------------
35 |
36 | New features
37 | ~~~~~~~~~~~~
38 |
39 | - Add a `--compresslevel` option for controlling the GZip compression level. Many thanks to Bradley Lowekamp for implementing this feature in `PR #24 `_ / `PR #26 `_.
40 |
41 | - Add the `--type` and `--encoding` options to the all-in-one `volume-to-precomputed-pyramid` script. Many thanks to Xiao Gui for implementing this feature in `PR #29 `_.
42 |
43 |
44 | Python version support
45 | ~~~~~~~~~~~~~~~~~~~~~~
46 |
47 | - Add support and tests for Python 3.11.
48 |
49 |
50 | Bug fixes
51 | ~~~~~~~~~
52 |
53 | - Fix volume reading with recent Nibabel >= 5.0.0, due to the expired deprecation of `get_data`. Many thanks to Bradley Lowekamp for reporting this bug, and to Xiao Gui for implementing the fix in `PR #28 `_.
54 |
55 |
56 | 1.0.0 (17 November 2021)
57 | ------------------------
58 |
59 | New features
60 | ~~~~~~~~~~~~
61 |
62 | - Support for RGB NIfTI. Many thanks to Xiao Gui for implementing this feature in `PR #15 `_.
63 |
64 | - Support for LZW-compressed TIFF files. Many thanks to Xiao Gui for implementing this feature in `PR #14 `_.
65 |
66 |
67 | Python version support
68 | ~~~~~~~~~~~~~~~~~~~~~~
69 |
70 | - Add support and tests for Python 3.8, 3.9, and 3.10.
71 |
72 | - Drop support for Python 3.4
73 |
74 |
75 | Bug fixes
76 | ~~~~~~~~~
77 |
78 | - Fix mesh conversion with recent versions of Nibabel (`commit c03bb5c8 `_).
79 |
80 | - Bump the minimum version of tqdm to prevent a blocking crash (see `tqdm bug#613 `_).
81 |
82 |
83 | 0.3.0 (28 November 2018)
84 | ------------------------
85 |
86 | Bug fixes
87 | ~~~~~~~~~
88 |
89 | - Fix the swarm of warning messages that appear when downscaling images of
90 | integer type by the averaging method.
91 |
92 |
93 | Other improvements
94 | ~~~~~~~~~~~~~~~~~~
95 |
96 | - The default downscaling method is now chosen automatically based on the
97 | ``type`` of dataset: ``image`` uses local averaging, ``segmentation`` uses
98 | striding.
99 |
100 | - The command-line interface of ``mesh-to-precomputed`` was changed to work
101 | with Accessors, instead of being restricted to files. The command also now
102 | reads and writes the ``info`` file to make sure that Neuroglancer knows where
103 | to find the mesh fragments.
104 |
105 | - Introduced a new command ``link-mesh-fragments`` to create the small JSON
106 | files that are required to link segmentation labels to mesh fragments.
107 |
108 | 0.2.0 (16 October 2018)
109 | -----------------------
110 |
111 | Changes that affect converted data
112 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
113 |
114 | - Use correct rounding when downscaling by local average (`commit 8f77b486 `_).
115 |
116 |
117 | Bug fixes
118 | ~~~~~~~~~
119 |
120 | - Fixed passing of options between functions (`issue #7 `_,
121 | `commit 67430f13 `_,
122 | `commit f0e1e79d `_).
123 | Thanks to Ben Falk for reporting and fixing the issue.
124 |
125 | - Fixed the conversion of stacks of multi-channel images (`commit ff8d4dcc `_).
126 |
127 | - Fixed a crash when flippping mesh triangles for negative-determinant
128 | transformations (`commit 97545914 `_,
129 | `issue #5 `_).
130 |
131 | - Fixed a bug where the chunk size of extremely anisotropic volumes was set to
132 | zero (`commit 92264c91 `_).
133 |
134 | - Fixed loading of ``uint64`` TIFF files (`issue #8 `_).
135 | Thanks to Ben Falk for reporting and fixing the issue.
136 |
137 |
138 | Other improvements
139 | ~~~~~~~~~~~~~~~~~~
140 |
141 | - Introduced a new command ``volume-to-precomputed-pyramid`` for all-in-one
142 | conversion of volume in simple cases.
143 |
144 | - Introduced a new command ``convert-chunks`` which can be used to convert
145 | existing data in Neuroglancer prec-computed format to a different encoding.
146 |
147 | - ``slice-to-precomputed`` now uses proper rounding and clamping for performing
148 | data-type conversion.
149 |
150 | - Improved the conversion of meshes to Neuroglancer-compatible VTK format, and
151 | got rid of the dependency on `PyVTK `_.
152 |
153 | - Improved the performance of common use-cases by loading the full volume in
154 | memory by default. The old behaviour can be restored with ``--mmap``.
155 |
156 |
157 | 0.1.0 (7 February 2018)
158 | -----------------------
159 |
160 | Initial PyPI release.
161 |
--------------------------------------------------------------------------------
/docs/script-usage.rst:
--------------------------------------------------------------------------------
1 | .. _command-line:
2 |
3 | Command-line usage
4 | ==================
5 |
6 | This page will teach you how to use the scripts in common cases.
7 |
8 |
9 | Converting a single-file (NIfTI) volume
10 | ---------------------------------------
11 |
12 | If your input dataset is stored in a single file that can be read by Nibabel_
13 | (such as a NIfTI_ file), then you are in luck. The
14 | ``volume-to-precomputed-pyramid`` script will do all the work for you. It will
15 | convert your dataset to raw pre-computed chunks.
16 |
17 | Usage: ``volume-to-precomputed-pyramid [-h] [--ignore-scaling] [--mmap]
18 | [--input-min INPUT_MIN] [--input-max INPUT_MAX] [--no-gzip] [--flat]
19 | [--downscaling-method {auto,average,majority,stride}] [--outside-value
20 | OUTSIDE_VALUE] volume_filename dest_url``.
21 |
22 | You may want to use :ref:`convert-chunks ` in a second step, to
23 | further compres your dataset with JPEG or ``compressed_segmentation``
24 | encoding.
25 |
26 |
27 | Converting image volumes
28 | ------------------------
29 |
30 | The instructions below are applicable to the two accepted input data layouts:
31 |
32 | - Volumes in NIfTI format (or any other format readable by Nibabel). See
33 | :ref:`JuBrain` for an example.
34 |
35 | - Series of 2D slices. See :ref:`BigBrain` for an example.
36 |
37 |
38 | 1. Write the metadata for the full-resolution image `in JSON format
39 | `_.
40 | If your input data is readable by Nibabel, you can use
41 | ``volume-to-precomputed --generate-info`` do do the job. Here is an example
42 | with minimal metadata (note that the resolution is expressed in
43 | *nanometres*):
44 |
45 | .. code-block:: json
46 |
47 | {
48 | "type": "image",
49 | "data_type": "uint8",
50 | "num_channels": 1,
51 | "scales": [
52 | {
53 | "size": [151, 188, 154],
54 | "resolution": [1000000, 1000000, 1000000],
55 | "voxel_offset": [0, 0, 0]
56 | }
57 | ]
58 | }
59 |
60 | 2. Create metadata for all scales using ``generate-scales-info`` on the
61 | previously created JSON file. This step writes a file named ``info`` in the
62 | current directory, which is needed by Neuroglancer as well as by all the
63 | subsequent steps. You are advised to create a fresh directory for each
64 | dataset.
65 |
66 | You can use any lossless encoding for the following steps (i.e. ``raw`` or
67 | ``compressed_segmentation``).
68 |
69 | At this stage you may want to run ``scale-stats``, which displays the
70 | number of chunks that will be created, and their uncompressed size. Thus,
71 | you can make sure that you have enough disk space before proceeding.
72 |
73 | 3. Convert your data to raw full-resolution chunks by using one of these
74 | scripts:
75 |
76 | - ``slices-to-precomputed``
77 | - ``volume-to-precomputed``
78 |
79 | 4. Compute downscaled pyramid levels using ``compute-scales``. Make sure to
80 | use the correct downscaling method (``average`` for greyscale images,
81 | ``majority`` for segmentations, or ``stride`` for a fast low-quality
82 | downsampling).
83 |
84 | At this point the raw-format data is ready to be displayed in Neuroglancer.
85 |
86 | .. _convert-chunks:
87 |
88 | 5. Optionally, you can convert the raw chunks to a compressed format using
89 | ``convert-chunks``. You will need to generate these compressed chunks in
90 | a separate directory from the raw chunks, and generate a suitable *info*
91 | file by using the ``--encoding`` parameter to ``generate-scales-info``.
92 | Two compressed encodings are available:
93 |
94 | - ``compressed_segmentation``: lossless compression, recommended for images
95 | containing discrete labels;
96 | - ``jpeg``: lossy JPEG compression, see ``--jpeg-quality`` and
97 | ``--jpeg-plane``.
98 |
99 |
100 | Converting surface meshes
101 | -------------------------
102 |
103 | A surface mesh can be displayed in two ways in Neuroglancer: associated with a
104 | segmentation label as part of a ``segmentation`` type layer, or as a standalone
105 | mesh layer.
106 |
107 | A mesh associated with a segmentation label needs to be in a
108 | Neuroglancer-specific binary precomputed format. ``mesh-to-precomputed`` can be
109 | used to convert meshes to this format. The ``link-mesh-fragments`` command must
110 | then be used so that Neuroglancer knows what meshes are associated to each
111 | label of the segmentation. See the last step of :ref:`BigBrain` for an example.
112 |
113 |
114 | .. _Nibabel: https://nipy.org/nibabel/
115 | .. _NIfTI: https://nifti.nimh.nih.gov/
116 |
--------------------------------------------------------------------------------
/docs/serving-data.rst:
--------------------------------------------------------------------------------
1 | .. _serving-data:
2 |
3 | Serving the converted data
4 | ==========================
5 |
6 | .. _layouts:
7 |
8 | On-disk file layout
9 | -------------------
10 |
11 | Neuroglancer expects all chunks from a scale to be located in the same
12 | directory (*flat* layout). This is problematic when working with large volumes,
13 | because filesystems have problems with very large directories. As a result, a
14 | deep layout is used by default when saving the chunks to the filesystem:
15 |
16 | - Default deep layout: ``key/x-X/y-Y/z-Z``.
17 |
18 | The default layout is hierarchical, in order to keep the number of directory
19 | entries to a reasonable number. This means that you need to serve the data
20 | with URL rewriting.
21 |
22 | - Flat layout: ``key/x-X_y-Y_z-Z``
23 |
24 | Chunks are stored in the layout where Neuroglancer will fetch them, so you do
25 | not need to configure any URL rewriting. Do not use with very large datasets.
26 | This layout can be used by passing the ``--flat`` option to the conversion
27 | scripts.
28 |
29 |
30 | nginx
31 | -----
32 |
33 | A Docker image running a specially-configured *nginx* web-server is available
34 | for serving the converted data: `neuroglancer-docker
35 | `_.
36 |
37 | The relevant portion of the nginx configuration is reproduced here:
38 |
39 | .. code-block:: nginx
40 |
41 | gzip_static always;
42 | # All browsers that are compatible with Neuroglancer support gzip encoding
43 | gunzip off;
44 |
45 | location ~ ^(.*)/([0-9]+-[0-9]+)_([0-9]+-[0-9]+)_([0-9]+-[0-9]+)$ {
46 | # Chunks are stored in per-axis sub-directories to prevent
47 | # having too many files in a single directory
48 | alias $1/$2/$3/$4;
49 | }
50 |
51 | location ~ ^(.*):0$ {
52 | # Microsoft filesystems do not support colons in file names,
53 | # but they are needed for pre-computed meshes (e.g. 100:0). As
54 | # :0 is the most common (only?) suffix in use, we look for a
55 | # file with that suffix stripped.
56 | try_files $uri $1.json $1 =404;
57 | }
58 |
59 |
60 | Apache
61 | ------
62 |
63 | Alternatively, you serve the pre-computed images using Apache, with the
64 | following Apache configuration (e.g. put it in a ``.htaccess`` file):
65 |
66 | .. code-block:: apacheconf
67 |
68 | # If you get a 403 Forbidden error, try to comment out the Options directives
69 | # below (they may be disallowed by your server's AllowOverride setting).
70 |
71 |
72 | # Needed to use the data from a Neuroglancer instance served from a
73 | # different server (see http://enable-cors.org/server_apache.html).
74 | Header set Access-Control-Allow-Origin "*"
75 |
76 |
77 | # Data chunks are stored in sub-directories, in order to avoid having
78 | # directories with millions of entries. Therefore we need to rewrite URLs
79 | # because Neuroglancer expects a flat layout.
80 | Options FollowSymLinks
81 | RewriteEngine On
82 | RewriteRule "^(.*)/([0-9]+-[0-9]+)_([0-9]+-[0-9]+)_([0-9]+-[0-9]+)$" "$1/$2/$3/$4"
83 |
84 | # Microsoft filesystems do not support colons in file names, but pre-computed
85 | # meshes use a colon in the URI (e.g. 100:0). As :0 is the most common (only?)
86 | # suffix in use, we will serve a file that has this suffix stripped.
87 | RewriteCond "%{REQUEST_FILENAME}" !-f
88 | RewriteRule "^(.*):0$" "$1"
89 |
90 |
91 | # Allow serving pre-compressed files, which can save a lot of space for raw
92 | # chunks, compressed segmentation chunks, and mesh chunks.
93 | #
94 | # The AddType directive should in theory be replaced by a "RemoveType .gz"
95 | # directive, but with that configuration Apache fails to serve the
96 | # pre-compressed chunks (confirmed with Debian version 2.2.22-13+deb7u6).
97 | # Fixes welcome.
98 | Options Multiviews
99 | AddEncoding x-gzip .gz
100 | AddType application/octet-stream .gz
101 |
102 |
103 |
104 | Serving sharded data
105 | ====================
106 |
107 |
108 | Content-Encoding
109 | ----------------
110 |
111 | Sharded data must be served without any `Content-Encoding header
112 | _`.
113 |
114 |
115 | HTTP Range request
116 | ------------------
117 |
118 | Sharded data must be served by a webserver that supports `Range header
119 | _`.
120 |
121 | For development uses, python's bundled SimpleHTTPServer `does not support
122 | this _`. Recommended
123 | alternatives are:
124 |
125 | - `http-server (NodeJS)_`
126 |
127 | - `RangeHTTPServer(Python) _`
128 |
129 | For production uses, most modern static web servers supports range requests.
130 | The below is a list of web servers that were tested and works with sharded
131 | volumes.
132 |
133 | - nginx 1.25.3
134 |
135 | - httpd 2.4.58
136 |
137 | - caddy 2.7.5
138 |
139 | In addition, most object storage also supports range requests without
140 | additional configurations.
141 |
142 |
143 | Enable Access-Control-Allow-Origin header
144 | -----------------------------------------
145 |
146 | `Access-Control-Allow-Origin
147 | _`
148 | will need to be enabled if the volume is expected to be accessed cross origin.
149 |
--------------------------------------------------------------------------------
/examples/BigBrainRelease.2015/.gitignore:
--------------------------------------------------------------------------------
1 | */
2 | *.gii
3 |
--------------------------------------------------------------------------------
/examples/BigBrainRelease.2015/classif.nii.gz:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d85e30fdec530186f584fa00bea0aba3dcd945fbebbd9b3c5a209a0e5a621063
3 | size 4397301
4 |
--------------------------------------------------------------------------------
/examples/BigBrainRelease.2015/info_fullres.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "image",
3 | "data_type": "uint8",
4 | "num_channels": 1,
5 | "scales": [
6 | {
7 | "chunk_sizes": [],
8 | "encoding": "raw",
9 | "key": "full",
10 | "resolution": [21166.6666666666666, 20000, 21166.6666666666666],
11 | "size": [6572, 7404, 5711],
12 | "voxel_offset": [0, 0, 0]
13 | }
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/examples/BigBrainRelease.2015/mesh_labels.csv:
--------------------------------------------------------------------------------
1 | 0
2 | 100,grey-left,grey-right
3 | 200,white-left,white-right
4 |
--------------------------------------------------------------------------------
/examples/JuBrain/.gitignore:
--------------------------------------------------------------------------------
1 | */
2 |
--------------------------------------------------------------------------------
/examples/JuBrain/MPM.nii.gz:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:aa8890a5b00aece491b87a728a17ca91998792b129463cfab3d7ced6d71ce516
3 | size 191326
4 |
--------------------------------------------------------------------------------
/examples/JuBrain/colin27T1_seg.nii.gz:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:586b06c8b326d9b79121bff2141682012e17d4876a773e634b8ca1f9ca01eab5
3 | size 2418701
4 |
--------------------------------------------------------------------------------
/experimental/mesh_to_vtk.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2017, Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import sys
9 |
10 | import neuroglancer_scripts.mesh
11 | import nibabel
12 | import numpy as np
13 |
14 |
15 | def mesh_file_to_vtk(input_filename, output_filename, data_format="ascii",
16 | coord_transform=None):
17 | """Convert a mesh file read by nibabel to VTK format"""
18 | print(f"Reading {input_filename}")
19 | mesh = nibabel.load(input_filename)
20 | print()
21 | print("Summary")
22 | print("=======")
23 | mesh.print_summary()
24 |
25 | points_list = mesh.get_arrays_from_intent("NIFTI_INTENT_POINTSET")
26 | assert len(points_list) == 1
27 | points = points_list[0].data
28 |
29 | triangles_list = mesh.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE")
30 | assert len(triangles_list) == 1
31 | triangles = triangles_list[0].data
32 |
33 | if coord_transform is not None:
34 | if coord_transform.shape[0] == 4:
35 | assert np.all(coord_transform[3, :] == [0, 0, 0, 1])
36 | points = points.T
37 | points = np.dot(coord_transform[:3, :3], points)
38 | points += coord_transform[:3, 3, np.newaxis]
39 | points = points.T
40 | if np.linalg.det(coord_transform[:3, :3]) < 0:
41 | # Flip the triangles to fix inside/outside
42 | triangles = np.flip(triangles, axis=1)
43 |
44 | # Gifti uses millimetres, Neuroglancer expects nanometres
45 | points *= 1e6
46 |
47 | with open(output_filename, "w") as output_file:
48 | neuroglancer_scripts.mesh.save_mesh_as_neuroglancer_vtk(
49 | output_file, points, triangles
50 | )
51 |
52 |
53 | def parse_command_line(argv):
54 | """Parse the script's command line."""
55 | import argparse
56 | parser = argparse.ArgumentParser(
57 | description="""\
58 | Convert a mesh (readable by nibabel, e.g. in Gifti format) to VTK file format
59 | """)
60 | parser.add_argument("input_filename")
61 | parser.add_argument("output_filename")
62 | parser.add_argument("--ascii", action="store_const",
63 | dest="format", const="ascii", default="ascii",
64 | help="save the VTK file in ASCII format (default)")
65 | parser.add_argument("--binary", action="store_const",
66 | dest="format", const="binary",
67 | help="save the VTK file in binary format"
68 | " (not supported by Neuroglancer at this time)")
69 | parser.add_argument("--coord-transform",
70 | help="affine transformation to be applied to the"
71 | " coordinates, as a 4x4 matrix in homogeneous"
72 | " coordinates, in comma-separated row-major order"
73 | " (the last row is always 0 0 0 1 and may be omitted)"
74 | " (e.g. --coord-transform=1,0,0,0,0,1,0,0,0,0,1,0)")
75 | args = parser.parse_args(argv[1:])
76 |
77 | if args.coord_transform is not None:
78 | try:
79 | matrix = np.fromstring(args.coord_transform, sep=",")
80 | except ValueError as exc:
81 | parser.error(f"cannot parse --coord-transform: {exc.args[0]}"
82 | )
83 | if len(matrix) == 12:
84 | matrix = matrix.reshape(3, 4)
85 | elif len(matrix) == 16:
86 | matrix = matrix.reshape(4, 4)
87 | else:
88 | parser.error("--coord-transform must have 12 or 16 elements"
89 | f" ({len(matrix)} passed)")
90 |
91 | args.coord_transform = matrix
92 |
93 | return args
94 |
95 |
96 | def main(argv):
97 | """The script's entry point."""
98 | args = parse_command_line(argv)
99 | return mesh_file_to_vtk(args.input_filename, args.output_filename,
100 | data_format=args.format,
101 | coord_transform=args.coord_transform) or 0
102 |
103 |
104 | if __name__ == "__main__":
105 | sys.exit(main(sys.argv))
106 |
--------------------------------------------------------------------------------
/experimental/off_to_vtk.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2017, Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import gzip
9 | import re
10 | import sys
11 |
12 | import numpy as np
13 | import pyvtk
14 |
15 | # See description of OFF file format at
16 | # http://www.geomview.org/docs/html/OFF.html
17 |
18 |
19 | def off_mesh_file_to_vtk(input_filename, output_filename, data_format="binary",
20 | coord_transform=None):
21 | """Convert a mesh file from OFF format to VTK format"""
22 | print(f"Reading {input_filename}")
23 | with gzip.open(input_filename, "rt") as f:
24 | header_keyword = f.readline().strip()
25 | match = re.match(r"(ST)?(C)?(N)?(4)?(n)?OFF", header_keyword)
26 | # TODO check features from header keyword
27 | assert match
28 | assert not match.group(5) # nOFF is unsupported
29 | dimension_line = f.readline().strip()
30 | match = re.match(r"([+-]?[0-9]+)\s+([+-]?[0-9]+)(\s+([+-]?[0-9]+))?",
31 | dimension_line)
32 | assert match
33 | num_vertices = int(match.group(1))
34 | num_triangles = int(match.group(2))
35 | vertices = np.empty((num_vertices, 3), dtype=float)
36 | for i in range(num_vertices):
37 | components = f.readline().split()
38 | assert len(components) >= 3
39 | vertices[i, 0] = float(components[0])
40 | vertices[i, 1] = float(components[1])
41 | vertices[i, 2] = float(components[2])
42 | triangles = np.empty((num_triangles, 3), dtype=np.int_)
43 | for i in range(num_triangles):
44 | components = f.readline().split()
45 | assert len(components) >= 4
46 | assert components[0] == "3"
47 | triangles[i, 0] = float(components[1])
48 | triangles[i, 1] = float(components[2])
49 | triangles[i, 2] = float(components[3])
50 | print()
51 | print(f"{num_vertices} vertices and {num_triangles} triangles read"
52 | )
53 |
54 | points = vertices
55 |
56 | if coord_transform is not None:
57 | if coord_transform.shape[0] == 4:
58 | assert np.all(coord_transform[3, :] == [0, 0, 0, 1])
59 | points = points.T
60 | points = np.dot(coord_transform[:3, :3], points)
61 | points += coord_transform[:3, 3, np.newaxis]
62 | points = points.T
63 | if np.linalg.det(coord_transform[:3, :3]) < 0:
64 | # Flip the triangles to fix inside/outside
65 | triangles = np.flip(triangles, axis=1)
66 |
67 | # Gifti uses millimetres, Neuroglancer expects nanometres
68 | points *= 1e6
69 |
70 | # Workaround: dtype must be np.int_ (pyvtk does not recognize int32 as
71 | # integers)
72 | triangles = triangles.astype(np.int_)
73 |
74 | vtk_mesh = pyvtk.PolyData(points, polygons=triangles)
75 |
76 | vtk_data = pyvtk.VtkData(
77 | vtk_mesh,
78 | "Converted using "
79 | "https://github.com/HumanBrainProject/neuroglancer-scripts")
80 | vtk_data.tofile(output_filename, format=data_format)
81 |
82 |
83 | def parse_command_line(argv):
84 | """Parse the script's command line."""
85 | import argparse
86 | parser = argparse.ArgumentParser(
87 | description="""\
88 | Convert a mesh (readable by nibabel, e.g. in Gifti format) to VTK file format
89 | """)
90 | parser.add_argument("input_filename")
91 | parser.add_argument("output_filename")
92 | parser.add_argument("--ascii", action="store_const",
93 | dest="format", const="ascii", default="ascii",
94 | help="save the VTK file in ASCII format (default)")
95 | parser.add_argument("--binary", action="store_const",
96 | dest="format", const="binary",
97 | help="save the VTK file in binary format"
98 | " (not supported by Neuroglancer at this time)")
99 | parser.add_argument("--coord-transform",
100 | help="affine transformation to be applied to the"
101 | " coordinates, as a 4x4 matrix in homogeneous"
102 | " coordinates, in comma-separated row-major order"
103 | " (the last row is always 0 0 0 1 and may be omitted)"
104 | " (e.g. --coord-transform=1,0,0,0,0,1,0,0,0,0,1,0)")
105 | args = parser.parse_args(argv[1:])
106 |
107 | if args.coord_transform is not None:
108 | try:
109 | matrix = np.fromstring(args.coord_transform, sep=",")
110 | except ValueError as exc:
111 | parser.error(f"cannot parse --coord-transform: {exc.args[0]}"
112 | )
113 | if len(matrix) == 12:
114 | matrix = matrix.reshape(3, 4)
115 | elif len(matrix) == 16:
116 | matrix = matrix.reshape(4, 4)
117 | else:
118 | parser.error("--coord-transform must have 12 or 16 elements"
119 | f" ({len(matrix)} passed)")
120 |
121 | args.coord_transform = matrix
122 |
123 | return args
124 |
125 |
126 | def main(argv):
127 | """The script's entry point."""
128 | args = parse_command_line(argv)
129 | return off_mesh_file_to_vtk(args.input_filename, args.output_filename,
130 | data_format=args.format,
131 | coord_transform=args.coord_transform) or 0
132 |
133 |
134 | if __name__ == "__main__":
135 | sys.exit(main(sys.argv))
136 |
--------------------------------------------------------------------------------
/experimental/stl_to_precomputed.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2017, Forschungszentrum Juelich GmbH
4 | # Author: Pavel Chervakov
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | # noqa
9 |
10 | """
11 | Convert a mesh from STL ASCII to Neuroglancer pre-computed mesh format
12 |
13 | Currently STL triangles are just written to the output as is, i.e. normals are
14 | not considered and equal vertices are not reused.
15 | """
16 |
17 | import gzip
18 | import struct
19 | import sys
20 | from functools import partial
21 |
22 | # import numpy as np
23 |
24 | __VERTEX_STR_PREFIX = ' vertex '
25 |
26 |
27 | def __get_vertex(vstr: str, voxel_size):
28 | assert vstr.startswith(__VERTEX_STR_PREFIX)
29 | return list(map(lambda v: float(v) * 1e6 * voxel_size,
30 | vstr[len(__VERTEX_STR_PREFIX):-1].split()))
31 |
32 |
33 | def __get_vertices(septuple, voxel_size):
34 | assert len(septuple) == 7
35 | assert septuple[0].startswith(' facet normal')
36 | assert septuple[1] == ' outer loop\n'
37 | assert septuple[5] == ' endloop\n'
38 | assert septuple[6] == ' endfacet\n'
39 | return [__get_vertex(septuple[2], voxel_size), __get_vertex(
40 | septuple[3], voxel_size), __get_vertex(septuple[4], voxel_size)]
41 |
42 |
43 | def stl_file_to_precomputed(
44 | input_filename, output_filename, voxel_size=1.0, compress=True):
45 | with open(input_filename) as input_file:
46 | lines = input_file.readlines()
47 | assert lines[0] == 'solid ascii\n'
48 | assert lines[-1] == 'endsolid\n'
49 | assert (len(lines) - 2) % 7 == 0
50 |
51 | gv = partial(__get_vertices, voxel_size=voxel_size)
52 | triples = list(map(gv, [lines[(i * 7) + 1: ((i + 1) * 7) + 1]
53 | for i in range((len(lines) - 2) // 7)]))
54 | vertices = [vertex for triple in triples for vertex in triple]
55 | num_vertices = len(vertices)
56 | buf = bytearray()
57 | buf += struct.pack("=51.0.0",
4 | "wheel",
5 | ]
6 | build-backend = "setuptools.build_meta"
7 |
8 | [tool.ruff]
9 | target-version = "py37" # py36 does not exist
10 | line-length = 79
11 | indent-width = 4
12 |
13 | [tool.ruff.lint]
14 | extend-select = [
15 | "F",
16 | "E",
17 | "W",
18 | "I",
19 | "N",
20 | "NPY",
21 | "UP",
22 | ]
23 | ignore = [
24 | "N802", # Gives false positives when a name contains an uppercase acronym
25 | ]
26 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = neuroglancer-scripts
3 | version = attr: neuroglancer_scripts.__version__
4 | author = Yann Leprince
5 | author_email = yann.leprince@cea.fr
6 | description = Conversion of images to the Neuroglancer pre-computed format
7 | long_description = file: README.rst
8 | long_description_content_type = text/x-rst
9 | url = https://github.com/HumanBrainProject/neuroglancer-scripts
10 | classifiers =
11 | Development Status :: 5 - Production/Stable
12 | Environment :: Console
13 | Intended Audience :: Science/Research
14 | License :: OSI Approved :: MIT License
15 | Programming Language :: Python :: 3
16 | Programming Language :: Python :: 3.6
17 | Programming Language :: Python :: 3.7
18 | Programming Language :: Python :: 3.8
19 | Programming Language :: Python :: 3.9
20 | Programming Language :: Python :: 3.10
21 | Programming Language :: Python :: 3.11
22 | Programming Language :: Python :: 3.12
23 | Topic :: Scientific/Engineering :: Medical Science Apps.
24 | Topic :: Scientific/Engineering :: Visualization
25 | keywords = neuroimaging
26 |
27 | [options]
28 | package_dir =
29 | = src
30 | packages = find:
31 | python_requires = ~=3.6
32 | install_requires =
33 | nibabel >= 2
34 | numpy >= 1.17
35 | pillow >= 1.1.6
36 | requests >= 2
37 | scikit-image # TODO use pillow instead
38 | tqdm ~= 4.29
39 | imagecodecs # required to read LZW compressed tiff files
40 |
41 | [options.packages.find]
42 | where = src
43 |
44 | [options.extras_require]
45 | # Remember to keep test dependencies synchronized with the list of dependencies
46 | # in tox.ini (at the moment: pytest, requests-mock)
47 | dev =
48 | pytest
49 | requests-mock
50 | check-manifest
51 | pep8-naming
52 | pre-commit
53 | pytest-cov
54 | readme_renderer
55 | ruff
56 | sphinx
57 | tox
58 | docs =
59 | sphinx
60 |
61 | [options.entry_points]
62 | console_scripts =
63 | compute-scales = neuroglancer_scripts.scripts.compute_scales:main
64 | convert-chunks = neuroglancer_scripts.scripts.convert_chunks:main
65 | generate-scales-info = neuroglancer_scripts.scripts.generate_scales_info:main
66 | link-mesh-fragments = neuroglancer_scripts.scripts.link_mesh_fragments:main
67 | mesh-to-precomputed = neuroglancer_scripts.scripts.mesh_to_precomputed:main
68 | scale-stats = neuroglancer_scripts.scripts.scale_stats:main
69 | slices-to-precomputed = neuroglancer_scripts.scripts.slices_to_precomputed:main
70 | volume-to-precomputed = neuroglancer_scripts.scripts.volume_to_precomputed:main
71 | volume-to-precomputed-pyramid = neuroglancer_scripts.scripts.volume_to_precomputed_pyramid:main
72 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | """Conversion of images to the Neuroglancer pre-computed format.
7 |
8 | .. todo:: introduction to the high-level APIs
9 | """
10 |
11 | # Version used by setup.cfg and docs/conf.py (parsed with a regular
12 | # expression).
13 | #
14 | # Release checklist (based on https://packaging.python.org/):
15 | # 0. If this is a new major or minor version, create a X.Y release branch
16 | # 1. Ensure that tests pass for all supported Python version (Travis CI),
17 | # ensure that the API documentation is complete (sphinx-apidoc -o docs/api/
18 | # src/neuroglancer_scripts);
19 | # 2. Update the release notes;
20 | # 3. Bump the version number in this file (without committing yet);
21 | # 4. pip install -U build twine
22 | # 5. python3 -m build
23 | # 6. twine upload --repository-url https://test.pypi.org/legacy/ dist/*
24 | # 7. Commit the updated version number
25 | # 8. Tag the commit (git tag -a vX.Y.Z). The release notes for the last
26 | # version should be converted to plain text and included in the tag
27 | # message:
28 | # pandoc -t plain docs/release-notes.rst
29 | # For the GitHub release message, this line is useful:
30 | # pandoc --wrap=none -t gfm docs/release-notes.rst
31 | # 9. Bump the version number in this file to something that ends with .dev0
32 | # and commit
33 | # 10. Push the master branch and the new tag to Github
34 | # 11. Create the Release of GitHub, which will also publish the release to PyPI
35 | # through an Action (.github/workflows/publish_to_pypi.yaml).
36 | __version__ = "1.3.0.dev0"
37 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/_compressed_segmentation.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import functools
7 | import itertools
8 | import struct
9 |
10 | import numpy as np
11 |
12 | from neuroglancer_scripts.chunk_encoding import InvalidFormatError
13 | from neuroglancer_scripts.utils import ceil_div
14 |
15 |
16 | def pad_block(block, block_size):
17 | """Pad a block to block_size with its most frequent value"""
18 | unique_vals, unique_counts = np.unique(block, return_counts=True)
19 | most_frequent_value = unique_vals[np.argmax(unique_counts)]
20 | return np.pad(block,
21 | tuple((0, desired_size - actual_size)
22 | for desired_size, actual_size
23 | in zip(block_size, block.shape)),
24 | mode="constant", constant_values=most_frequent_value)
25 |
26 |
27 | def number_of_encoding_bits(elements):
28 | for bits in (0, 1, 2, 4, 8, 16, 32):
29 | if 2 ** bits >= elements:
30 | return bits
31 | raise AssertionError("Too many elements")
32 |
33 |
34 | COMPRESSED_SEGMENTATION_DATA_TYPES = (
35 | np.dtype(np.uint32).newbyteorder("<"),
36 | np.dtype(np.uint64).newbyteorder("<"),
37 | )
38 |
39 |
40 | def encode_chunk(chunk, block_size):
41 | # Construct file in memory step by step
42 | num_channels = chunk.shape[0]
43 | buf = bytearray(4 * num_channels)
44 |
45 | assert chunk.dtype in COMPRESSED_SEGMENTATION_DATA_TYPES
46 |
47 | for channel in range(num_channels):
48 | # Write offset of the current channel into the header
49 | assert len(buf) % 4 == 0
50 | struct.pack_into(" len(buf):
143 | raise InvalidFormatError("compressed_segmentation channel offset "
144 | "is too large (truncated file?)")
145 | _decode_channel_into(
146 | chunk, channel, buf[offset:next_offset], block_size
147 | )
148 |
149 | return chunk
150 |
151 |
152 | def _decode_channel_into(chunk, channel, buf, block_size):
153 | # Grid size (number of blocks in the chunk)
154 | gx = ceil_div(chunk.shape[3], block_size[0])
155 | gy = ceil_div(chunk.shape[2], block_size[1])
156 | gz = ceil_div(chunk.shape[1], block_size[2])
157 | block_num_elem = block_size[0] * block_size[1] * block_size[2]
158 | for z, y, x in np.ndindex((gz, gy, gx)):
159 | # Read the block header
160 | res = struct.unpack_from("> 24
163 | if bits not in (0, 1, 2, 4, 8, 16, 32):
164 | raise InvalidFormatError("Invalid number of encoding bits for "
165 | f"compressed_segmentation block ({bits})"
166 | )
167 | encoded_values_offset = 4 * res[1]
168 | lookup_table_past_end = lookup_table_offset + chunk.itemsize * min(
169 | (2 ** bits),
170 | ((len(buf) - lookup_table_offset) // chunk.itemsize)
171 | )
172 | lookup_table = np.frombuffer(
173 | buf[lookup_table_offset:lookup_table_past_end], dtype=chunk.dtype)
174 | if bits == 0:
175 | block = np.empty(block_size, dtype=chunk.dtype)
176 | try:
177 | block[...] = lookup_table[0]
178 | except IndexError as exc:
179 | raise InvalidFormatError(
180 | "Invalid compressed_segmentation data: indexing out of "
181 | "the lookup table") from exc
182 | else:
183 | values_per_32bit = 32 // bits
184 | encoded_values_end = encoded_values_offset + 4 * (
185 | ceil_div(block_num_elem, values_per_32bit)
186 | )
187 | if encoded_values_end > len(buf):
188 | raise InvalidFormatError(
189 | "Invalid compressed_segmentation data: file too short, "
190 | "insufficient room for encoded values"
191 | )
192 | packed_values = np.frombuffer(buf[encoded_values_offset:
193 | encoded_values_end], dtype=" 0
221 | assert 32 % bits == 0
222 | bitmask = (1 << bits) - 1
223 | values_per_32bit = 32 // bits
224 | padded_values = np.empty(
225 | values_per_32bit * ceil_div(num_values, values_per_32bit),
226 | dtype="I")
227 | for shift in range(values_per_32bit):
228 | padded_values[shift::values_per_32bit] = (
229 | (packed_values >> (shift * bits)) & bitmask)
230 | return padded_values[:num_values]
231 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/_jpeg.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import io
9 |
10 | import numpy as np
11 | import PIL.Image
12 |
13 | from neuroglancer_scripts.chunk_encoding import InvalidFormatError
14 |
15 |
16 | def encode_chunk(chunk, jpeg_quality, jpeg_plane):
17 | assert 0 <= jpeg_quality <= 100
18 | assert jpeg_plane in ("xy", "xz")
19 | num_channels = chunk.shape[0]
20 | if jpeg_plane == "xy":
21 | reshaped_chunk = chunk.reshape(
22 | num_channels, chunk.shape[1] * chunk.shape[2], chunk.shape[3])
23 | else: # jpeg_plane == "xz":
24 | reshaped_chunk = chunk.reshape(
25 | num_channels, chunk.shape[1], chunk.shape[2] * chunk.shape[3])
26 |
27 | if num_channels == 1:
28 | reshaped_chunk = np.squeeze(reshaped_chunk, 0)
29 | else:
30 | # Channels (RGB) need to be along the last axis for PIL
31 | reshaped_chunk = np.moveaxis(reshaped_chunk, 0, -1)
32 |
33 | img = PIL.Image.fromarray(reshaped_chunk)
34 | io_buf = io.BytesIO()
35 | # Chroma sub-sampling is disabled because it can create strong artefacts at
36 | # the border where the chunk size is odd. Progressive is enabled because it
37 | # generally creates smaller JPEG files.
38 | img.save(io_buf, format="jpeg", quality=jpeg_quality,
39 | optimize=True, progressive=True, subsampling=0)
40 | return io_buf.getvalue()
41 |
42 |
43 | def decode_chunk(buf, chunk_size, num_channels):
44 | io_buf = io.BytesIO(buf)
45 | try:
46 | img = PIL.Image.open(io_buf)
47 | except Exception as exc:
48 | raise InvalidFormatError(
49 | f"The JPEG-encoded chunk could not be decoded: {exc}"
50 | ) from exc
51 |
52 | if num_channels == 1 and img.mode != "L":
53 | raise InvalidFormatError(
54 | f"The JPEG chunk is encoded with mode={img.mode} instead of L"
55 | )
56 | if num_channels == 3 and img.mode != "RGB":
57 | raise InvalidFormatError(
58 | f"The JPEG chunk is encoded with mode={img.mode} instead of RGB"
59 | )
60 |
61 | flat_chunk = np.asarray(img)
62 | if num_channels == 3:
63 | # RGB channels are read by PIL along the last axis
64 | flat_chunk = np.moveaxis(flat_chunk, -1, 0)
65 | try:
66 | chunk = flat_chunk.reshape(num_channels,
67 | chunk_size[2], chunk_size[1], chunk_size[0])
68 | except Exception:
69 | raise InvalidFormatError("The JPEG-encoded chunk has an incompatible "
70 | f"shape ({flat_chunk.size // num_channels} "
71 | f"elements, expecting {np.prod(chunk_size)})")
72 | return chunk
73 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/data_types.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import logging
7 |
8 | import numpy as np
9 |
10 | __all__ = [
11 | "NG_DATA_TYPES",
12 | "NG_INTEGER_DATA_TYPES",
13 | "get_chunk_dtype_transformer",
14 | ]
15 |
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 | NG_MULTICHANNEL_DATATYPES = (('R', 'G', 'B'),)
20 | NG_INTEGER_DATA_TYPES = ("uint8", "uint16", "uint32", "uint64")
21 | NG_DATA_TYPES = NG_INTEGER_DATA_TYPES + ("float32",)
22 |
23 |
24 | # TODO re-factor into a class:
25 | # - implement reporting of non-preserved values (clipped / rounded)
26 | # - implement optional scaling
27 | # - print a warning for NaNs during float->int conversion
28 | def get_chunk_dtype_transformer(input_dtype, output_dtype, warn=True):
29 | """
30 |
31 | .. note::
32 | Conversion to uint64 may result in loss of precision, because of a
33 | known bug / approximation in NumPy, where dtype promotion between a
34 | 64-bit (u)int and any float will return float64, even though this type
35 | can only hold all integers up to 2**53 (see e.g.
36 | https://github.com/numpy/numpy/issues/8851).
37 | """
38 | input_dtype = np.dtype(input_dtype)
39 | output_dtype = np.dtype(output_dtype)
40 | if np.issubdtype(output_dtype, np.integer):
41 | output_min = np.iinfo(output_dtype).min
42 | output_max = np.iinfo(output_dtype).max
43 | else:
44 | output_min = 0.0
45 | output_max = 1.0
46 |
47 | work_dtype = np.promote_types(input_dtype, output_dtype)
48 |
49 | round_to_nearest = (
50 | np.issubdtype(output_dtype, np.integer)
51 | and not np.issubdtype(input_dtype, np.integer)
52 | )
53 | clip_values = (
54 | np.issubdtype(output_dtype, np.integer)
55 | and not np.can_cast(input_dtype, output_dtype, casting="safe")
56 | )
57 |
58 | logger.debug("dtype converter from %s to %s: "
59 | "work_dtype=%s, round_to_nearest=%s, clip_values=%s",
60 | input_dtype, output_dtype,
61 | work_dtype, round_to_nearest, clip_values)
62 | if warn and round_to_nearest:
63 | logger.warning("Values will be rounded to the nearest integer")
64 | if warn and clip_values:
65 | logger.warning("Values will be clipped to the range [%s, %s]",
66 | output_min, output_max)
67 |
68 | def chunk_transformer(chunk, preserve_input=True):
69 | assert np.can_cast(chunk.dtype, input_dtype, casting="equiv")
70 | if round_to_nearest or clip_values:
71 | chunk = np.array(chunk, dtype=work_dtype, copy=preserve_input)
72 | if round_to_nearest:
73 | np.rint(chunk, out=chunk)
74 | if clip_values:
75 | np.clip(chunk, output_min, output_max, out=chunk)
76 | return chunk.astype(output_dtype, casting="unsafe")
77 |
78 | return chunk_transformer
79 |
80 |
81 | def get_dtype_from_vol(volume):
82 | zero_index = tuple(0 for _ in volume.shape)
83 | return get_dtype(volume[zero_index].dtype)
84 |
85 |
86 | def get_dtype(input_dtype):
87 | if input_dtype.names is None:
88 | return input_dtype, False
89 | if input_dtype.names not in NG_MULTICHANNEL_DATATYPES:
90 | err = f'tuple datatype {input_dtype.names} not yet supported'
91 | raise NotImplementedError(err)
92 | for index, value in enumerate(input_dtype.names):
93 | err = 'Multichanneled datatype should have the same datatype'
94 | assert input_dtype[index].name == input_dtype[0].name, err
95 | return input_dtype[0], True
96 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/downscaling.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | """Downscaling is used to create a multi-resolution image pyramid.
9 |
10 | The central component here is the :class:`Downscaler` base class. Use
11 | :func:`get_downscaler` for instantiating a concrete downscaler object.
12 | """
13 |
14 | import numpy as np
15 |
16 | from neuroglancer_scripts.data_types import get_chunk_dtype_transformer
17 | from neuroglancer_scripts.utils import ceil_div
18 |
19 | __all__ = [
20 | "get_downscaler",
21 | "add_argparse_options",
22 | "Downscaler",
23 | "StridingDownscaler",
24 | "AveragingDownscaler",
25 | "MajorityDownscaler",
26 | ]
27 |
28 |
29 | def get_downscaler(downscaling_method, info=None, options={}):
30 | """Create a downscaler object.
31 |
32 | :param str downscaling_method: one of ``"average"``, ``"majority"``, or
33 | ``"stride"``
34 | :param dict options: options passed to the downscaler as kwargs.
35 | :returns: an instance of a sub-class of :class:`Downscaler`
36 | :rtype: Downscaler
37 | """
38 | if downscaling_method == "auto":
39 | if info["type"] == "image":
40 | return get_downscaler("average", info=None, options=options)
41 | else: # info["type"] == "segmentation":
42 | return get_downscaler("stride", info=None, options=options)
43 | elif downscaling_method == "average":
44 | outside_value = options.get("outside_value")
45 | return AveragingDownscaler(outside_value)
46 | elif downscaling_method == "majority":
47 | return MajorityDownscaler()
48 | elif downscaling_method == "stride":
49 | return StridingDownscaler()
50 | else:
51 | raise NotImplementedError("invalid downscaling method "
52 | + downscaling_method)
53 |
54 |
55 | def add_argparse_options(parser):
56 | """Add command-line options for downscaling.
57 |
58 | :param argparse.ArgumentParser parser: an argument parser
59 |
60 | The downscaling options can be obtained from command-line arguments with
61 | :func:`add_argparse_options` and passed to :func:`get_downscaler`::
62 |
63 | import argparse
64 | parser = argparse.ArgumentParser()
65 | add_argparse_options(parser)
66 | args = parser.parse_args()
67 | get_downscaler(args.downscaling_method, vars(args))
68 | """
69 | group = parser.add_argument_group("Options for downscaling")
70 | group.add_argument("--downscaling-method", default="auto",
71 | choices=("auto", "average", "majority", "stride"),
72 | help='The default is "auto", which chooses '
73 | '"average" or "stride" depending on the "type" '
74 | 'attribute of the dataset (for "image" or '
75 | '"segmentation", respectively). "average" is '
76 | 'recommended for grey-level images. "majority" is a '
77 | 'high-quality, but very slow method for segmentation '
78 | 'images. "stride" is fastest, but provides no '
79 | 'protection against aliasing artefacts.')
80 | group.add_argument("--outside-value", type=float, default=None,
81 | help='padding value used by the "average" downscaling '
82 | "method for computing the voxels at the border. If "
83 | "omitted, the volume is padded with its edge values.")
84 |
85 |
86 | class Downscaler:
87 | """Base class for downscaling algorithms."""
88 |
89 | def check_factors(self, downscaling_factors):
90 | """Test support for given downscaling factors.
91 |
92 | Subclasses must override this method if they do not support any
93 | combination of integer downscaling factors.
94 |
95 | :param downscaling_factors: sequence of integer downscaling factors
96 | (Dx, Dy, Dz)
97 | :type downscaling_factors: :class:`tuple` of :class:`int`
98 | :returns: whether the provided downscaling factors are supported
99 | :rtype: bool
100 | """
101 | return (
102 | len(downscaling_factors) == 3
103 | and all(isinstance(f, int) and 1 <= f for f in downscaling_factors)
104 | )
105 |
106 | def downscale(self, chunk, downscaling_factors):
107 | """Downscale a chunk according to the provided factors.
108 |
109 | :param numpy.ndarray chunk: chunk with (C, Z, Y, X) indexing
110 | :param downscaling_factors: sequence of integer downscaling factors
111 | (Dx, Dy, Dz)
112 | :type downscaling_factors: tuple
113 | :returns: the downscaled chunk, with shape ``(C, ceil_div(Z, Dz),
114 | ceil_div(Y, Dy), ceil_div(X, Dx))``
115 | :rtype: numpy.ndarray
116 | :raises NotImplementedError: if the downscaling factors are unsupported
117 | """
118 | raise NotImplementedError
119 |
120 |
121 | class StridingDownscaler(Downscaler):
122 | """Downscale using striding.
123 |
124 | This is a fast, low-quality downscaler that provides no protection against
125 | aliasing artefacts. It supports arbitrary downscaling factors.
126 | """
127 | def downscale(self, chunk, downscaling_factors):
128 | if not self.check_factors(downscaling_factors):
129 | raise NotImplementedError
130 | return chunk[:,
131 | ::downscaling_factors[2],
132 | ::downscaling_factors[1],
133 | ::downscaling_factors[0]]
134 |
135 |
136 | class AveragingDownscaler(Downscaler):
137 | """Downscale by a factor of two in any given direction, with averaging.
138 |
139 | This downscaler is suitable for grey-level images.
140 |
141 | .. todo::
142 | Use code from the neuroglancer module to support arbitrary factors.
143 | """
144 | def __init__(self, outside_value=None):
145 | if outside_value is None:
146 | self.padding_mode = "edge"
147 | self.pad_kwargs = {}
148 | else:
149 | self.padding_mode = "constant"
150 | self.pad_kwargs = {"constant_values": outside_value}
151 |
152 | def check_factors(self, downscaling_factors):
153 | return (
154 | len(downscaling_factors) == 3
155 | and all(f in (1, 2) for f in downscaling_factors)
156 | )
157 |
158 | def downscale(self, chunk, downscaling_factors):
159 | if not self.check_factors(downscaling_factors):
160 | raise NotImplementedError
161 | dtype = chunk.dtype
162 | # Use a floating-point type for arithmetic
163 | work_dtype = np.promote_types(chunk.dtype, np.float64)
164 | chunk = chunk.astype(work_dtype, casting="safe")
165 |
166 | half = work_dtype.type(0.5)
167 |
168 | if downscaling_factors[2] == 2:
169 | if chunk.shape[1] % 2 != 0:
170 | chunk = np.pad(chunk, ((0, 0), (0, 1), (0, 0), (0, 0)),
171 | self.padding_mode, **self.pad_kwargs)
172 | chunk = half * (chunk[:, ::2, :, :] + chunk[:, 1::2, :, :])
173 |
174 | if downscaling_factors[1] == 2:
175 | if chunk.shape[2] % 2 != 0:
176 | chunk = np.pad(chunk, ((0, 0), (0, 0), (0, 1), (0, 0)),
177 | self.padding_mode, **self.pad_kwargs)
178 | chunk = half * (chunk[:, :, ::2, :] + chunk[:, :, 1::2, :])
179 |
180 | if downscaling_factors[0] == 2:
181 | if chunk.shape[3] % 2 != 0:
182 | chunk = np.pad(chunk, ((0, 0), (0, 0), (0, 0), (0, 1)),
183 | self.padding_mode, **self.pad_kwargs)
184 | chunk = half * (chunk[:, :, :, ::2] + chunk[:, :, :, 1::2])
185 |
186 | dtype_converter = get_chunk_dtype_transformer(work_dtype, dtype,
187 | warn=False)
188 | return dtype_converter(chunk)
189 |
190 |
191 | class MajorityDownscaler(Downscaler):
192 | """Downscaler using majority voting.
193 |
194 | This downscaler is suitable for label images.
195 |
196 | .. todo::
197 | The majority downscaler could be *really* optimized (clever iteration
198 | with nditer, Cython, countless for appropriate cases)
199 | """
200 | def downscale(self, chunk, downscaling_factors):
201 | if not self.check_factors(downscaling_factors):
202 | raise NotImplementedError
203 | new_chunk = np.empty(
204 | (chunk.shape[0],
205 | ceil_div(chunk.shape[1], downscaling_factors[2]),
206 | ceil_div(chunk.shape[2], downscaling_factors[1]),
207 | ceil_div(chunk.shape[3], downscaling_factors[0])),
208 | dtype=chunk.dtype
209 | )
210 | for t, z, y, x in np.ndindex(*new_chunk.shape):
211 | zd = z * downscaling_factors[2]
212 | yd = y * downscaling_factors[1]
213 | xd = x * downscaling_factors[0]
214 | block = chunk[t,
215 | zd:(zd + downscaling_factors[2]),
216 | yd:(yd + downscaling_factors[1]),
217 | xd:(xd + downscaling_factors[0])]
218 |
219 | labels, counts = np.unique(block.flat, return_counts=True)
220 | new_chunk[t, z, y, x] = labels[np.argmax(counts)]
221 |
222 | return new_chunk
223 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/file_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | """Access to a Neuroglancer pre-computed dataset on the local filesystem.
7 |
8 | See the :mod:`~neuroglancer_scripts.accessor` module for a description of the
9 | API.
10 | """
11 |
12 | import gzip
13 | import os
14 | import pathlib
15 |
16 | import neuroglancer_scripts.accessor
17 | from neuroglancer_scripts.accessor import _CHUNK_PATTERN_FLAT, DataAccessError
18 |
19 | __all__ = [
20 | "FileAccessor",
21 | ]
22 |
23 |
24 | _CHUNK_PATTERN_SUBDIR = "{key}/{0}-{1}/{2}-{3}/{4}-{5}"
25 |
26 | NO_COMPRESS_MIME_TYPES = {
27 | "application/json",
28 | "image/jpeg",
29 | "image/png",
30 | }
31 |
32 |
33 | class FileAccessor(neuroglancer_scripts.accessor.Accessor):
34 | """Access a Neuroglancer pre-computed pyramid on the local file system.
35 |
36 | :param str base_dir: path to the directory containing the pyramid
37 | :param bool flat: use a flat file layout (see :ref:`layouts`)
38 | :param bool gzip: compress chunks losslessly with gzip
39 | """
40 |
41 | can_read = True
42 | can_write = True
43 |
44 | def __init__(self, base_dir, flat=False, gzip=True, compresslevel=9):
45 | self.base_path = pathlib.Path(base_dir)
46 | if flat:
47 | self.chunk_pattern = _CHUNK_PATTERN_FLAT
48 | else:
49 | self.chunk_pattern = _CHUNK_PATTERN_SUBDIR
50 | self.gzip = gzip
51 | self.compresslevel = compresslevel
52 |
53 | def file_exists(self, relative_path):
54 | relative_path = pathlib.Path(relative_path)
55 | file_path = self.base_path / relative_path
56 | if ".." in file_path.relative_to(self.base_path).parts:
57 | raise ValueError("only relative paths pointing under base_path "
58 | "are accepted")
59 | try:
60 | if file_path.is_file():
61 | return True
62 | elif file_path.with_name(file_path.name + ".gz").is_file():
63 | return True
64 | except OSError as exc:
65 | raise DataAccessError(
66 | f"Error fetching {file_path}: {exc}") from exc
67 | return False
68 |
69 | def fetch_file(self, relative_path):
70 | relative_path = pathlib.Path(relative_path)
71 | file_path = self.base_path / relative_path
72 | if ".." in file_path.relative_to(self.base_path).parts:
73 | raise ValueError("only relative paths pointing under base_path "
74 | "are accepted")
75 | try:
76 | if file_path.is_file():
77 | f = file_path.open("rb")
78 | elif file_path.with_name(file_path.name + ".gz").is_file():
79 | f = gzip.open(str(file_path.with_name(file_path.name + ".gz")),
80 | "rb")
81 | else:
82 | raise DataAccessError(f"Cannot find {relative_path} in "
83 | f"{self.base_path}")
84 | with f:
85 | return f.read()
86 | except OSError as exc:
87 | raise DataAccessError(
88 | f"Error fetching {file_path}: {exc}") from exc
89 |
90 | def store_file(self, relative_path, buf,
91 | mime_type="application/octet-stream",
92 | overwrite=False):
93 | relative_path = pathlib.Path(relative_path)
94 | file_path = self.base_path / relative_path
95 | if ".." in file_path.relative_to(self.base_path).parts:
96 | raise ValueError("only relative paths pointing under base_path "
97 | "are accepted")
98 | mode = "wb" if overwrite else "xb"
99 | try:
100 | os.makedirs(str(file_path.parent), exist_ok=True)
101 | if self.gzip and mime_type not in NO_COMPRESS_MIME_TYPES:
102 | with gzip.open(
103 | str(file_path.with_name(file_path.name + ".gz")),
104 | mode, compresslevel=self.compresslevel) as f:
105 | f.write(buf)
106 | else:
107 | with file_path.open(mode) as f:
108 | f.write(buf)
109 | except OSError as exc:
110 | raise DataAccessError(f"Error storing {file_path}: {exc}"
111 | ) from exc
112 |
113 | def fetch_chunk(self, key, chunk_coords):
114 | f = None
115 | try:
116 | for pattern in _CHUNK_PATTERN_FLAT, _CHUNK_PATTERN_SUBDIR:
117 | chunk_path = self._chunk_path(key, chunk_coords, pattern)
118 | if chunk_path.is_file():
119 | f = chunk_path.open("rb")
120 | elif chunk_path.with_name(chunk_path.name + ".gz").is_file():
121 | f = gzip.open(
122 | str(chunk_path.with_name(chunk_path.name + ".gz")),
123 | "rb"
124 | )
125 | if f is None:
126 | raise DataAccessError(
127 | "Cannot find chunk "
128 | f"{self._flat_chunk_basename(key, chunk_coords)} in "
129 | f"{self.base_path}"
130 | )
131 | with f:
132 | return f.read()
133 | except OSError as exc:
134 | raise DataAccessError(
135 | "Error accessing chunk "
136 | f"{self._flat_chunk_basename(key, chunk_coords)} in "
137 | f"{self.base_path}: {exc}" ) from exc
138 |
139 | def store_chunk(self, buf, key, chunk_coords,
140 | mime_type="application/octet-stream",
141 | overwrite=True):
142 | chunk_path = self._chunk_path(key, chunk_coords)
143 | mode = "wb" if overwrite else "xb"
144 | try:
145 | os.makedirs(str(chunk_path.parent), exist_ok=True)
146 | if self.gzip and mime_type not in NO_COMPRESS_MIME_TYPES:
147 | with gzip.open(
148 | str(chunk_path.with_name(chunk_path.name + ".gz")),
149 | mode, compresslevel=self.compresslevel) as f:
150 | f.write(buf)
151 | else:
152 | with chunk_path.open(mode) as f:
153 | f.write(buf)
154 | except OSError as exc:
155 | raise DataAccessError(
156 | "Error storing chunk "
157 | f"{self._flat_chunk_basename(key, chunk_coords)} in "
158 | f"{self.base_path}: {exc}" ) from exc
159 |
160 | def _chunk_path(self, key, chunk_coords, pattern=None):
161 | if pattern is None:
162 | pattern = self.chunk_pattern
163 | xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
164 | chunk_filename = pattern.format(
165 | xmin, xmax, ymin, ymax, zmin, zmax, key=key)
166 | return self.base_path / chunk_filename
167 |
168 | def _flat_chunk_basename(self, key, chunk_coords):
169 | xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
170 | chunk_filename = _CHUNK_PATTERN_FLAT.format(
171 | xmin, xmax, ymin, ymax, zmin, zmax, key=key)
172 | return chunk_filename
173 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/http_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | """Access to a Neuroglancer pre-computed dataset over HTTP.
7 |
8 | See the :mod:`~neuroglancer_scripts.accessor` module for a description of the
9 | API.
10 | """
11 |
12 | import urllib.parse
13 |
14 | import requests
15 |
16 | import neuroglancer_scripts.accessor
17 | from neuroglancer_scripts.accessor import _CHUNK_PATTERN_FLAT, DataAccessError
18 |
19 | __all__ = [
20 | "HttpAccessor",
21 | ]
22 |
23 |
24 | class HttpAccessor(neuroglancer_scripts.accessor.Accessor):
25 | """Access a Neuroglancer pre-computed pyramid with HTTP.
26 |
27 | .. note::
28 | This is a read-only accessor.
29 |
30 | :param str base_url: the URL containing the pyramid
31 | """
32 |
33 | can_read = True
34 | can_write = False
35 |
36 | def __init__(self, base_url):
37 | self._session = requests.Session()
38 |
39 | # Fix the base URL to end with a slash, discard query and fragment
40 | r = urllib.parse.urlsplit(base_url)
41 | self.base_url = urllib.parse.urlunsplit((
42 | r.scheme, r.netloc,
43 | r.path if r.path[-1] == "/" else r.path + "/",
44 | "", ""))
45 |
46 | def chunk_relative_url(self, key, chunk_coords):
47 | xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
48 | url_suffix = _CHUNK_PATTERN_FLAT.format(
49 | xmin, xmax, ymin, ymax, zmin, zmax, key=key)
50 | return url_suffix
51 |
52 | def fetch_chunk(self, key, chunk_coords):
53 | chunk_url = self.chunk_relative_url(key, chunk_coords)
54 | return self.fetch_file(chunk_url)
55 |
56 | def file_exists(self, relative_path):
57 | file_url = self.base_url + relative_path
58 | try:
59 | r = self._session.head(file_url)
60 | if r.status_code == requests.codes.not_found:
61 | return False
62 | r.raise_for_status()
63 | except requests.exceptions.RequestException as exc:
64 | raise DataAccessError("Error probing the existence of "
65 | f"{file_url}: {exc}") from exc
66 | return True
67 |
68 | def fetch_file(self, relative_path):
69 | file_url = self.base_url + relative_path
70 | try:
71 | r = self._session.get(file_url)
72 | r.raise_for_status()
73 | except requests.exceptions.RequestException as exc:
74 | raise DataAccessError(f"Error reading {file_url}: {exc}") from exc
75 | return r.content
76 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/mesh.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
3 | # Author: Yann Leprince
4 | #
5 | # This software is made available under the MIT licence, see LICENCE.txt.
6 |
7 | """I/O for meshes in formats understood by Neuroglancer.
8 |
9 | Neuroglancer understands two file formats for meshes:
10 |
11 | - A binary “precomputed” format for meshes that correspond to a
12 | ``segmentation`` layer.
13 |
14 | - A sub-set of the legacy VTK ASCII format can be used with the ``vtk://``
15 | datasource to represent a mesh that is not associated with a voxel
16 | segmentation (``SingleMesh`` layer). Vertices may have arbitrary scalar
17 | attributes.
18 | """
19 |
20 | import logging
21 | import re
22 | import struct
23 |
24 | import numpy as np
25 |
26 | import neuroglancer_scripts
27 |
28 | __all__ = [
29 | "InvalidMeshDataError",
30 | "save_mesh_as_neuroglancer_vtk",
31 | "save_mesh_as_precomputed",
32 | "read_precomputed_mesh",
33 | "affine_transform_mesh",
34 | ]
35 |
36 |
37 | logger = logging.getLogger(__name__)
38 |
39 |
40 | class InvalidMeshDataError(Exception):
41 | """Raised when mesh data cannot be decoded properly."""
42 | pass
43 |
44 |
45 | def save_mesh_as_neuroglancer_vtk(file, vertices, triangles,
46 | vertex_attributes=None, title=""):
47 | """Store a mesh in VTK format such that it can be read by Neuroglancer.
48 |
49 | :param file: a file-like object opened in text mode (its ``write`` method
50 | will be called with :class:`str` objects).
51 | :param numpy.ndarray vertices: the list of vertices of the mesh. Must be
52 | convertible to an array of size Nx3, type ``float32``. Coordinates
53 | will be interpreted by Neuroglancer in nanometres.
54 | :param numpy.ndarray triangles: the list of triangles of the mesh. Must be
55 | convertible to an array of size Mx3, with integer data type.
56 | :param list vertex_attributes: an iterable containing a description of
57 | vertex attributes (see below).
58 | :param str title: a title (comment) for the dataset. Cannot contain \\n,
59 | will be truncated to 255 characters.
60 | :raises AssertionError: if the inputs do not match the constraints above
61 |
62 | Each element of ``vertex_attributes`` must be a mapping (e.g.
63 | :class:`dict`) with the following keys:
64 |
65 | name
66 | The name of the vertex attribute, as a :class:`str`. Cannot contain
67 | white space.
68 |
69 | values
70 | The values of the attribute. Must be convertible to an array of size N
71 | or NxC, where N is the number of vertices, and C is the number of
72 | channels for the attribute (between 1 and 4).
73 |
74 | The output uses a sub-set of the legacy VTK ASCII format, which can be read
75 | by Neuroglancer (as of
76 | https://github.com/google/neuroglancer/blob/a8ce681660864ab3ac7c1086c0b4262e40f24707/src/neuroglancer/datasource/vtk/parse.ts).
77 | """
78 | vertices = np.asarray(vertices)
79 | assert vertices.ndim == 2
80 | triangles = np.asarray(triangles)
81 | assert triangles.ndim == 2
82 | assert triangles.shape[1] == 3
83 | assert "\n" not in title
84 | file.write("# vtk DataFile Version 3.0\n")
85 | if title:
86 | title += ". "
87 | title += ("Written by neuroglancer-scripts-"
88 | f"{neuroglancer_scripts.__version__}.")
89 | file.write(f"{title[:255]}\n")
90 | file.write("ASCII\n")
91 | file.write("DATASET POLYDATA\n")
92 | file.write("POINTS {:d} {}\n".format(vertices.shape[0], "float"))
93 | if not np.can_cast(vertices.dtype, np.float32):
94 | # As of a8ce681660864ab3ac7c1086c0b4262e40f24707 Neuroglancer reads
95 | # everything as float32 anyway
96 | logger.warning("Vertex coordinates will be converted to float32")
97 | np.savetxt(file, vertices.astype(np.float32), fmt="%.9g")
98 | file.write(f"POLYGONS {triangles.shape[0]:d} {4 * triangles.shape[0]:d}\n"
99 | )
100 | np.savetxt(file, np.insert(triangles, 0, 3, axis=1), fmt="%d")
101 | if vertex_attributes:
102 | file.write(f"POINT_DATA {vertices.shape[0]:d}\n")
103 | for vertex_attribute in vertex_attributes:
104 | name = vertex_attribute["name"]
105 | assert re.match("\\s", name) is None
106 | values = np.asarray(vertex_attribute["values"])
107 | assert values.shape[0] == vertices.shape[0]
108 | if values.ndim == 1:
109 | values = values[:, np.newaxis]
110 | assert values.ndim == 2
111 | num_components = values.shape[1]
112 | if num_components > 4:
113 | logger.warning("The file will not be strictly valid VTK "
114 | "because a SCALAR vertex attribute has more "
115 | "than 4 components")
116 | if not np.can_cast(values.dtype, np.float32):
117 | # As of a8ce681660864ab3ac7c1086c0b4262e40f24707 Neuroglancer
118 | # reads everything as float32 anyway
119 | logger.warning(f"Data for the '{name}' vertex attribute will "
120 | "be converted to float32")
121 | file.write("SCALARS {} {}".format(name, "float"))
122 | if num_components != 1:
123 | file.write(f" {num_components:d}")
124 | file.write("\nLOOKUP_TABLE {}\n".format("default"))
125 | np.savetxt(file, values.astype(np.float32), fmt="%.9g")
126 |
127 |
128 | def save_mesh_as_precomputed(file, vertices, triangles):
129 | """Store a mesh in Neuroglancer pre-computed format.
130 |
131 | :param file: a file-like object opened in binary mode (its ``write`` method
132 | will be called with :class:`bytes` objects).
133 | :param numpy.ndarray vertices: the list of vertices of the mesh. Must be
134 | convertible to an array of size Nx3 and type ``float32``. Coordinates
135 | will be interpreted by Neuroglancer in nanometres.
136 | :param numpy.ndarray triangles: the list of triangles of the mesh. Must be
137 | convertible to an array of size Mx3 and ``uint32`` data type.
138 | :raises AssertionError: if the inputs do not match the constraints above
139 | """
140 | vertices = np.asarray(vertices)
141 | assert vertices.ndim == 2
142 | triangles = np.asarray(triangles)
143 | assert triangles.ndim == 2
144 | assert triangles.shape[1] == 3
145 | if not np.can_cast(vertices.dtype, " num_vertices):
185 | raise InvalidMeshDataError("The mesh references nonexistent vertices")
186 | return (vertices, triangles)
187 |
188 |
189 | def affine_transform_mesh(vertices, triangles, coord_transform):
190 | """Transform a mesh through an affine transformation.
191 |
192 | :param numpy.ndarray vertices: the list of vertices of the mesh. Must be
193 | convertible to an array of size Nx3, type ``float32``.
194 | :param numpy.ndarray triangles: the list of triangles of the mesh, in an
195 | array of size Mx3.
196 | :param numpy.ndarray coord_transform: the affine transform to be applied
197 | to mesh vertices, as a 3×4 or 4×4 matrix in homogeneous coordinates.
198 | :returns: transformed ``(vertices, triangles)``
199 | :rtype: tuple
200 |
201 | Vertex coordinates are transformed with the provided affine transformation.
202 | Triangles will be flipped if the transformation has a negative determinant,
203 | in order to conserve the inside/outside of the mesh.
204 | """
205 | if coord_transform.shape[0] == 4:
206 | assert np.all(coord_transform[3, :] == [0, 0, 0, 1])
207 | vertices = vertices.T
208 | vertices = np.dot(coord_transform[:3, :3], vertices)
209 | vertices += coord_transform[:3, 3, np.newaxis]
210 | vertices = vertices.T
211 | if np.linalg.det(coord_transform[:3, :3]) < 0:
212 | # Flip the triangles to fix inside/outside
213 | triangles = np.flip(triangles, axis=1)
214 | return vertices, triangles
215 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/precomputed_io.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | """High-level access to Neuroglancer pre-computed datasets.
7 |
8 | The central component here is the :class:`PrecomputedIO` base class. Use
9 | :func:`get_IO_for_existing_dataset` or :func:`get_IO_for_new_dataset` for
10 | instantiating a concrete accessor object.
11 | """
12 |
13 | import json
14 |
15 | from neuroglancer_scripts import chunk_encoding
16 | from neuroglancer_scripts.chunk_encoding import InvalidInfoError
17 |
18 | __all__ = [
19 | "get_IO_for_existing_dataset",
20 | "get_IO_for_new_dataset",
21 | "PrecomputedIO",
22 | ]
23 |
24 |
25 | def get_IO_for_existing_dataset(accessor, encoder_options={}):
26 | """Create an object for accessing a pyramid with an existing *info*.
27 |
28 | :param Accessor accessor: a low-level accessor
29 | :param dict encoder_options: extrinsic encoder options
30 | :rtype: PrecomputedIO
31 | :raises DataAccessError: if the *info* file cannot be retrieved
32 | :raises InvalidInfoError: if the *info* file is not valid JSON
33 | :raises NotImplementedError: if the accessor is unable to read files
34 | """
35 | info_bytes = accessor.fetch_file("info")
36 | info_str = info_bytes.decode("utf-8")
37 | try:
38 | info = json.loads(info_str)
39 | except ValueError as exc:
40 | raise InvalidInfoError("Invalid JSON: {0}") from exc
41 | return PrecomputedIO(info, accessor, encoder_options=encoder_options)
42 |
43 |
44 | def get_IO_for_new_dataset(info, accessor, overwrite_info=False,
45 | encoder_options={}):
46 | """Create a new pyramid and store the provided *info*.
47 |
48 | :param dict info: the *info* of the new pyramid
49 | :param Accessor accessor: a low-level accessor
50 | :param dict encoder_options: extrinsic encoder options
51 | :raises DataAccessError: if the *info* file cannot be stored
52 | :raises NotImplementedError: if the accessor is unable to write files
53 | """
54 | info_str = json.dumps(info, separators=(",", ":"), sort_keys=True)
55 | info_bytes = info_str.encode("utf-8")
56 | accessor.store_file("info", info_bytes,
57 | mime_type="application/json",
58 | overwrite=overwrite_info)
59 | return PrecomputedIO(info, accessor, encoder_options=encoder_options)
60 |
61 |
62 | class PrecomputedIO:
63 | """Object for high-level access to a Neuroglancer precomputed dataset.
64 |
65 | An object of this class provides access to chunk data in terms of NumPy
66 | arrays. It handles the reading/writing of files through the provided
67 | ``accessor``, as well as the encoding/decoding of chunks.
68 |
69 | The structure of the dataset (*info*) is stored in the PrecomputedIO
70 | instance and **must not change** during its lifetime. If you need to change
71 | the *info* of a dataset, use :func:`get_IO_for_new_dataset` to store the
72 | new *info* and create a new PrecomputedIO object.
73 |
74 | :param dict info: description of the dataset's structure (see :ref:`info`).
75 |
76 | :param Accessor accessor: an object providing low-level access to the
77 | dataset's files (see
78 | :func:`neuroglancer_scripts.accessor.get_accessor_for_url`).
79 | :param dict encoder_options: extrinsic encoder options (see
80 | :func:`neuroglancer_scripts.chunk_encoding.get_encoder`).
81 | """
82 | def __init__(self, info, accessor, encoder_options={}):
83 | self._info = info
84 | self.accessor = accessor
85 | self._scale_info = {
86 | scale_info["key"]: scale_info for scale_info in info["scales"]
87 | }
88 | self._encoders = {
89 | scale_info["key"]: chunk_encoding.get_encoder(info, scale_info,
90 | encoder_options)
91 | for scale_info in info["scales"]
92 | }
93 |
94 | @property
95 | def info(self):
96 | """The precomputed dataset's *info* dictionary."""
97 | return self._info
98 |
99 | def scale_info(self, scale_key):
100 | """The *info* for a given scale.
101 |
102 | :param str scale_key: the *key* property of the chosen scale.
103 | :return: ``info["scales"][i]`` where ``info["scales"][i]["key"]
104 | == scale_key``
105 | :rtype: dict
106 | """
107 | return self._scale_info[scale_key]
108 |
109 | def scale_is_lossy(self, scale_key):
110 | """Test if the scale is using a lossy encoding.
111 |
112 | :param str scale_key: the *key* attribute of the scale
113 | :returns: True if the scale is using a lossy encoding
114 | :rtype bool:
115 | :raises KeyError: if the ``scale_key`` is not a valid scale of this
116 | dataset
117 | """
118 | return self._encoders[scale_key].lossy
119 |
120 | def validate_chunk_coords(self, scale_key, chunk_coords):
121 | """Validate the coordinates of a chunk.
122 |
123 | :returns: True if the chunk coordinates are valid according to the
124 | dataset's *info*
125 | :rtype bool:
126 | """
127 | xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
128 | scale_info = self.scale_info(scale_key)
129 | xs, ys, zs = scale_info["size"]
130 | if scale_info["voxel_offset"] != [0, 0, 0]:
131 | raise NotImplementedError("voxel_offset is not supported")
132 | for chunk_size in scale_info["chunk_sizes"]:
133 | xcs, ycs, zcs = chunk_size
134 | if (xmin % xcs == 0 and (xmax == min(xmin + xcs, xs))
135 | and ymin % ycs == 0 and (ymax == min(ymin + ycs, ys))
136 | and zmin % zcs == 0 and (zmax == min(zmin + zcs, zs))):
137 | return True
138 | return False
139 |
140 | def read_chunk(self, scale_key, chunk_coords):
141 | """Read a chunk from the dataset.
142 |
143 | The chunk coordinates **must** be compatible with the dataset's *info*.
144 | This can be checked with :meth:`validate_chunk_coords`.
145 |
146 | :param str scale_key: the *key* attribute of the scale
147 | :param tuple chunk_coords: the chunk coordinates ``(xmin, xmax, ymin,
148 | ymax, zmin, zmax)``
149 | :returns: chunk data contained in a 4-D NumPy array (C, Z, Y, X)
150 | :rtype: numpy.ndarray
151 | :raises DataAccessError: if the chunk's file cannot be accessed
152 | :raises InvalidFormatError: if the chunk cannot be decoded
153 | :raises AssertionError: if the chunk coordinates are incompatible with
154 | the dataset's *info*
155 | """
156 | assert self.validate_chunk_coords(scale_key, chunk_coords)
157 | xmin, xmax, ymin, ymax, zmin, zmax = chunk_coords
158 | buf = self.accessor.fetch_chunk(scale_key, chunk_coords)
159 | encoder = self._encoders[scale_key]
160 | chunk = encoder.decode(buf, (xmax - xmin, ymax - ymin, zmax - zmin))
161 | return chunk
162 |
163 | def write_chunk(self, chunk, scale_key, chunk_coords):
164 | """Write a chunk into the dataset.
165 |
166 | The chunk coordinates **must** be compatible with the dataset's *info*.
167 | This can be checked with :meth:`validate_chunk_coords`.
168 |
169 | :param numpy.ndarray chunk: chunk data contained in a 4-D NumPy array
170 | (C, Z, Y, X)
171 | :param str scale_key: the *key* attribute of the scale
172 | :param tuple chunk_coords: the chunk coordinates ``(xmin, xmax, ymin,
173 | ymax, zmin, zmax)``
174 | :raises DataAccessError: if the chunk's file cannot be accessed
175 | :raises AssertionError: if the chunk coordinates are incompatible with
176 | the dataset's *info*
177 | """
178 | assert self.validate_chunk_coords(scale_key, chunk_coords)
179 | encoder = self._encoders[scale_key]
180 | buf = encoder.encode(chunk)
181 | self.accessor.store_chunk(
182 | buf, scale_key, chunk_coords,
183 | mime_type=encoder.mime_type
184 | )
185 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | """Scripts for converting images to the Neuroglancer pre-computed format.
7 | """
8 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/compute_scales.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 |
9 | import sys
10 |
11 | import neuroglancer_scripts.accessor
12 | import neuroglancer_scripts.chunk_encoding
13 | import neuroglancer_scripts.downscaling
14 | import neuroglancer_scripts.dyadic_pyramid
15 | from neuroglancer_scripts import precomputed_io
16 |
17 |
18 | def compute_scales(work_dir=".", downscaling_method="average", options={}):
19 | """Generate lower scales following an input info file"""
20 | accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
21 | work_dir, options
22 | )
23 | pyramid_io = precomputed_io.get_IO_for_existing_dataset(
24 | accessor, encoder_options=options
25 | )
26 | downscaler = neuroglancer_scripts.downscaling.get_downscaler(
27 | downscaling_method, pyramid_io.info, options
28 | )
29 | neuroglancer_scripts.dyadic_pyramid.compute_dyadic_scales(
30 | pyramid_io, downscaler
31 | )
32 |
33 |
34 | def parse_command_line(argv):
35 | """Parse the script's command line."""
36 | import argparse
37 | parser = argparse.ArgumentParser(
38 | description="""\
39 | Create lower scales in Neuroglancer precomputed format
40 |
41 | The list of scales is read from a file named "info" in the working directory.
42 | All the lower resolutions are computed from the the highest resolution (first
43 | scale in the info file). Only downscaling by a factor of 2 is supported (any
44 | pyramid scheme created by generate_scales_info.py is appropriate).
45 | """)
46 | parser.add_argument("work_dir", help="working directory or URL")
47 |
48 | neuroglancer_scripts.accessor.add_argparse_options(parser)
49 | neuroglancer_scripts.downscaling.add_argparse_options(parser)
50 | neuroglancer_scripts.chunk_encoding.add_argparse_options(parser,
51 | allow_lossy=False)
52 |
53 | args = parser.parse_args(argv[1:])
54 | return args
55 |
56 |
57 | def main(argv=sys.argv):
58 | """The script's entry point."""
59 | import neuroglancer_scripts.utils
60 | neuroglancer_scripts.utils.init_logging_for_cmdline()
61 | args = parse_command_line(argv)
62 | return compute_scales(args.work_dir,
63 | args.downscaling_method,
64 | options=vars(args)) or 0
65 |
66 |
67 | if __name__ == "__main__":
68 | sys.exit(main())
69 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/convert_chunks.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, 2018 Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import logging
9 | import sys
10 |
11 | import numpy as np
12 | from tqdm import tqdm
13 |
14 | import neuroglancer_scripts.accessor
15 | import neuroglancer_scripts.chunk_encoding
16 | from neuroglancer_scripts import data_types, precomputed_io
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | def convert_chunks_for_scale(chunk_reader,
22 | dest_info, chunk_writer, scale_index,
23 | chunk_transformer):
24 | """Convert chunks for a given scale"""
25 | scale_info = dest_info["scales"][scale_index]
26 | key = scale_info["key"]
27 | size = scale_info["size"]
28 | dest_dtype = np.dtype(dest_info["data_type"]).newbyteorder("<")
29 |
30 | if chunk_reader.scale_is_lossy(key):
31 | logger.warning("Using data stored in a lossy format as an input for "
32 | "conversion (for scale %s)", key)
33 |
34 | for chunk_size in scale_info["chunk_sizes"]:
35 | chunk_range = ((size[0] - 1) // chunk_size[0] + 1,
36 | (size[1] - 1) // chunk_size[1] + 1,
37 | (size[2] - 1) // chunk_size[2] + 1)
38 | for x_idx, y_idx, z_idx in tqdm(
39 | np.ndindex(chunk_range), total=np.prod(chunk_range),
40 | unit="chunk",
41 | desc=f"converting scale {key}"):
42 | xmin = chunk_size[0] * x_idx
43 | xmax = min(chunk_size[0] * (x_idx + 1), size[0])
44 | ymin = chunk_size[1] * y_idx
45 | ymax = min(chunk_size[1] * (y_idx + 1), size[1])
46 | zmin = chunk_size[2] * z_idx
47 | zmax = min(chunk_size[2] * (z_idx + 1), size[2])
48 | chunk_coords = (xmin, xmax, ymin, ymax, zmin, zmax)
49 |
50 | chunk = chunk_reader.read_chunk(key, chunk_coords)
51 | chunk = chunk_transformer(chunk, preserve_input=False)
52 | # TODO add the possibility of data-type conversion (ideally through
53 | # a command-line flag)
54 | chunk_writer.write_chunk(
55 | chunk.astype(dest_dtype, casting="equiv"),
56 | key, chunk_coords
57 | )
58 |
59 |
60 | def convert_chunks(source_url, dest_url, copy_info=False,
61 | options={}):
62 | """Convert precomputed chunks between different encodings"""
63 | source_accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
64 | source_url
65 | )
66 | chunk_reader = precomputed_io.get_IO_for_existing_dataset(source_accessor)
67 | source_info = chunk_reader.info
68 | dest_accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
69 | dest_url, options
70 | )
71 | if copy_info:
72 | chunk_writer = precomputed_io.get_IO_for_new_dataset(
73 | source_info, dest_accessor, encoder_options=options
74 | )
75 | else:
76 | chunk_writer = precomputed_io.get_IO_for_existing_dataset(
77 | dest_accessor, encoder_options=options
78 | )
79 | dest_info = chunk_writer.info
80 |
81 | chunk_transformer = data_types.get_chunk_dtype_transformer(
82 | source_info["data_type"], dest_info["data_type"]
83 | )
84 | for scale_index in reversed(range(len(dest_info["scales"]))):
85 | convert_chunks_for_scale(chunk_reader,
86 | dest_info, chunk_writer, scale_index,
87 | chunk_transformer)
88 |
89 |
90 | def parse_command_line(argv):
91 | """Parse the script's command line."""
92 | import argparse
93 | parser = argparse.ArgumentParser(
94 | description="""\
95 | Convert Neuroglancer precomputed chunks between different encodings (raw,
96 | compressed_segmentation, or jpeg). The target encoding parameters are
97 | determined by a pre-existing info file in the destination directory (except in
98 | --copy-info mode). You can create such an info file with
99 | generate_scales_info.py.
100 | """)
101 | parser.add_argument("source_url",
102 | help="URL/directory where the input chunks are found")
103 | parser.add_argument("dest_url", default=".",
104 | help="URL/directory where the output chunks will be "
105 | "written.")
106 |
107 | parser.add_argument("--copy-info", action="store_true",
108 | help="Copy the info file instead of using a "
109 | "pre-existing. The data will be re-encoded with the "
110 | "same encoding as the original")
111 |
112 | neuroglancer_scripts.accessor.add_argparse_options(parser)
113 | neuroglancer_scripts.chunk_encoding.add_argparse_options(parser,
114 | allow_lossy=True)
115 |
116 | args = parser.parse_args(argv[1:])
117 | return args
118 |
119 |
120 | def main(argv=sys.argv):
121 | """The script's entry point."""
122 | import neuroglancer_scripts.utils
123 | neuroglancer_scripts.utils.init_logging_for_cmdline()
124 | args = parse_command_line(argv)
125 | return convert_chunks(args.source_url, args.dest_url,
126 | copy_info=args.copy_info,
127 | options=vars(args)) or 0
128 |
129 |
130 | if __name__ == "__main__":
131 | sys.exit(main())
132 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/generate_scales_info.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 |
9 | import json
10 | import logging
11 | import sys
12 |
13 | import neuroglancer_scripts.accessor
14 | import neuroglancer_scripts.dyadic_pyramid
15 | from neuroglancer_scripts import data_types, precomputed_io
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | def set_info_params(info, dataset_type=None, encoding=None):
21 | full_scale_info = info["scales"][0]
22 | if encoding:
23 | full_scale_info["encoding"] = encoding
24 | elif "encoding" not in full_scale_info:
25 | full_scale_info["encoding"] = "raw"
26 |
27 | if dataset_type:
28 | info["type"] = dataset_type
29 | elif "type" not in info:
30 | if full_scale_info["encoding"] == "compressed_segmentation":
31 | info["type"] = "segmentation"
32 | else:
33 | info["type"] = "image"
34 |
35 | if full_scale_info["encoding"] == "compressed_segmentation":
36 | if info["type"] != "segmentation":
37 | logger.warning("using compressed_segmentation encoding but "
38 | "'type' is not 'segmentation'")
39 | if "compressed_segmentation_block_size" not in full_scale_info:
40 | # TODO clamp block size to chunk size (e.g. thin chunks)
41 | full_scale_info["compressed_segmentation_block_size"] = [8, 8, 8]
42 | # compressed_segmentation only supports uint32 or uint64
43 | if info["data_type"] in ("uint8", "uint16"):
44 | info["data_type"] = "uint32"
45 | if info["data_type"] not in ("uint32", "uint64"):
46 | logger.warning("data type %s is not supported by the "
47 | "compressed_segmentation encoding",
48 | info["data_type"])
49 |
50 | if (info["type"] == "segmentation"
51 | and info["data_type"] not in data_types.NG_INTEGER_DATA_TYPES):
52 | logger.warning('the dataset is of type "segmentation" but has a '
53 | 'non-integer data_type (%s)', info["data_type"])
54 |
55 | if (info["type"] == "segmentation" and info["num_channels"] != 1):
56 | logger.warning('the dataset is of type "segmentation" but '
57 | 'num_channels is %d (must be 1)', info["data_type"])
58 |
59 |
60 | def generate_scales_info(input_fullres_info_filename,
61 | dest_url,
62 | target_chunk_size=64,
63 | dataset_type=None,
64 | encoding=None,
65 | max_scales=None):
66 | """Generate a list of scales from an input JSON file"""
67 | with open(input_fullres_info_filename) as f:
68 | info = json.load(f)
69 | set_info_params(info, dataset_type=dataset_type, encoding=encoding)
70 | neuroglancer_scripts.dyadic_pyramid.fill_scales_for_dyadic_pyramid(
71 | info,
72 | target_chunk_size=target_chunk_size,
73 | max_scales=max_scales
74 | )
75 | accessor = neuroglancer_scripts.accessor.get_accessor_for_url(dest_url)
76 | # This writes the info file
77 | precomputed_io.get_IO_for_new_dataset(info, accessor)
78 |
79 |
80 | def parse_command_line(argv):
81 | """Parse the script's command line."""
82 | import argparse
83 | parser = argparse.ArgumentParser(
84 | description="""\
85 | Create a list of scales in Neuroglancer "info" JSON file format.
86 |
87 | Output is written to a file named "info" at dest_url.
88 | """)
89 | parser.add_argument("fullres_info",
90 | help="JSON file containing the full-resolution info")
91 | parser.add_argument("dest_url", help="directory/URL where the converted "
92 | "dataset will be written")
93 | parser.add_argument("--max-scales", type=int, default=None,
94 | help="maximum number of scales to generate"
95 | " (default: unlimited)")
96 | parser.add_argument("--target-chunk-size", type=int, default=64,
97 | help="target chunk size (default 64). This size will"
98 | " be used for cubic chunks, the size of anisotropic"
99 | " chunks will be adapted to contain approximately the"
100 | " same number of voxels.")
101 | parser.add_argument("--type", default=None,
102 | choices=("image", "segmentation"),
103 | help="Type of dataset (image or segmentation). By"
104 | " default this is inherited from the fullres_info"
105 | " file, with a fallback to image.")
106 | parser.add_argument("--encoding", default=None,
107 | choices=("raw", "jpeg", "compressed_segmentation"),
108 | help="data encoding (raw, jpeg, or"
109 | " compressed_segmentation). By default this is"
110 | " inherited from the fullres_info file, with a"
111 | " fallback to raw.")
112 |
113 | neuroglancer_scripts.accessor.add_argparse_options(
114 | parser, write_chunks=False, write_files=False
115 | )
116 |
117 | args = parser.parse_args(argv[1:])
118 | return args
119 |
120 |
121 | def main(argv=sys.argv):
122 | """The script's entry point."""
123 | import neuroglancer_scripts.utils
124 | neuroglancer_scripts.utils.init_logging_for_cmdline()
125 | args = parse_command_line(argv)
126 | return generate_scales_info(args.fullres_info,
127 | args.dest_url,
128 | target_chunk_size=args.target_chunk_size,
129 | dataset_type=args.type,
130 | encoding=args.encoding,
131 | max_scales=args.max_scales) or 0
132 |
133 |
134 | if __name__ == "__main__":
135 | sys.exit(main())
136 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/link_mesh_fragments.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2018, CEA
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import csv
9 | import json
10 | import logging
11 | import os.path
12 | import sys
13 |
14 | import neuroglancer_scripts.accessor
15 | import neuroglancer_scripts.precomputed_io as precomputed_io
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | def fragment_exists(fragment_name, mesh_dir):
21 | return (
22 | os.path.isfile(os.path.join(mesh_dir, fragment_name))
23 | or os.path.isfile(os.path.join(mesh_dir, fragment_name + ".gz"))
24 | )
25 |
26 |
27 | def make_mesh_fragment_links(input_csv, dest_url, no_colon_suffix=False,
28 | options={}):
29 | if no_colon_suffix:
30 | filename_format = "{0}/{1}"
31 | else:
32 | filename_format = "{0}/{1}:0"
33 | accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
34 | dest_url, options
35 | )
36 | info = precomputed_io.get_IO_for_existing_dataset(accessor).info
37 | if "mesh" not in info:
38 | logger.critical('The info file is missing the "mesh" key, please '
39 | 'use mesh-to-precomputed first.')
40 | return 1
41 | mesh_dir = info["mesh"]
42 |
43 | with open(input_csv, newline="") as csv_file:
44 | for line in csv.reader(csv_file):
45 | numeric_label = int(line[0])
46 | fragment_list = line[1:]
47 | # Output a warning for missing fragments
48 | for fragment_name in fragment_list:
49 | if not accessor.file_exists(mesh_dir + "/" + fragment_name):
50 | logger.warning("missing fragment %s", fragment_name)
51 | relative_filename = filename_format.format(mesh_dir, numeric_label)
52 | json_str = json.dumps({"fragments": fragment_list},
53 | separators=(",", ":"))
54 | accessor.store_file(relative_filename, json_str.encode("utf-8"),
55 | mime_type="application/json")
56 |
57 |
58 | def parse_command_line(argv):
59 | """Parse the script's command line."""
60 | import argparse
61 | parser = argparse.ArgumentParser(
62 | description="""\
63 | Create JSON files linking mesh fragments to labels of a segmentation layer.
64 |
65 | The input is a CSV file, where each line contains the integer label in the
66 | first cell, followed by an arbitrary number of cells whose contents name the
67 | fragment files corresponding to the given integer label.
68 | """)
69 | parser.add_argument("input_csv",
70 | help="CSV file containing the mapping between integer "
71 | "labels and mesh fragment name, in the format "
72 | "described above")
73 | parser.add_argument("dest_url",
74 | help="base directory/URL of the output dataset")
75 |
76 | parser.add_argument("--no-colon-suffix",
77 | dest="no_colon_suffix", action="store_true",
78 | help="omit the :0 suffix in the name of created JSON "
79 | "files (e.g. 10 instead of 10:0). This is necessary "
80 | "on filesystems that disallow colons, such as FAT.")
81 |
82 | neuroglancer_scripts.accessor.add_argparse_options(parser,
83 | write_chunks=False)
84 |
85 | args = parser.parse_args(argv[1:])
86 | return args
87 |
88 |
89 | def main(argv=sys.argv):
90 | """The script's entry point."""
91 | import neuroglancer_scripts.utils
92 | neuroglancer_scripts.utils.init_logging_for_cmdline()
93 | args = parse_command_line(argv)
94 | return make_mesh_fragment_links(args.input_csv, args.dest_url,
95 | no_colon_suffix=args.no_colon_suffix,
96 | options=vars(args)) or 0
97 |
98 |
99 | if __name__ == "__main__":
100 | sys.exit(main())
101 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/mesh_to_precomputed.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2017, Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import io
9 | import logging
10 | import pathlib
11 | import sys
12 |
13 | import nibabel
14 | import numpy as np
15 |
16 | import neuroglancer_scripts.accessor
17 | import neuroglancer_scripts.mesh
18 | import neuroglancer_scripts.precomputed_io as precomputed_io
19 |
20 | logger = logging.getLogger(__name__)
21 |
22 |
23 | def mesh_file_to_precomputed(input_path, dest_url, mesh_name=None,
24 | mesh_dir=None, coord_transform=None, options={}):
25 | """Convert a mesh read by nibabel to Neuroglancer precomputed format"""
26 | input_path = pathlib.Path(input_path)
27 | accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
28 | dest_url, options
29 | )
30 | info = precomputed_io.get_IO_for_existing_dataset(accessor).info
31 | if mesh_dir is None:
32 | mesh_dir = "mesh" # default value
33 | if "mesh" not in info:
34 | info["mesh"] = mesh_dir
35 | # Write the updated info file
36 | precomputed_io.get_IO_for_new_dataset(
37 | info, accessor, overwrite_info=True
38 | )
39 | if mesh_dir != info["mesh"]:
40 | logger.critical("The provided --mesh-dir value does not match the "
41 | "value stored in the info file")
42 | return 1
43 | if info["type"] != "segmentation":
44 | logger.warning('The dataset has type "image" instead of '
45 | '"segmentation", Neuroglancer will not use the meshes.')
46 |
47 | if mesh_name is None:
48 | mesh_name = input_path.stem
49 |
50 | mesh = nibabel.load(str(input_path))
51 |
52 | points_list = mesh.get_arrays_from_intent("NIFTI_INTENT_POINTSET")
53 | assert len(points_list) == 1
54 | points = points_list[0].data
55 |
56 | triangles_list = mesh.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE")
57 | assert len(triangles_list) == 1
58 | triangles = triangles_list[0].data
59 |
60 | if coord_transform is not None:
61 | points_dtype = points.dtype
62 | points, triangles = neuroglancer_scripts.mesh.affine_transform_mesh(
63 | points, triangles, coord_transform
64 | )
65 | # Convert vertices back to their original type to avoid the warning
66 | # that save_mesh_as_precomputed prints when downcasting to float32.
67 | points = points.astype(np.promote_types(points_dtype, np.float32),
68 | casting="same_kind")
69 |
70 | # Gifti uses millimetres, Neuroglancer expects nanometres.
71 | # points can be a read-only array, so we cannot use the *= operator.
72 | points = 1e6 * points
73 |
74 | io_buf = io.BytesIO()
75 | neuroglancer_scripts.mesh.save_mesh_as_precomputed(
76 | io_buf, points, triangles.astype("uint32")
77 | )
78 | accessor.store_file(mesh_dir + "/" + mesh_name, io_buf.getvalue(),
79 | mime_type="application/octet-stream")
80 |
81 |
82 | def parse_command_line(argv):
83 | """Parse the script's command line."""
84 | import argparse
85 | parser = argparse.ArgumentParser(
86 | description="""\
87 | Convert a mesh to Neuroglancer pre-computed mesh format.
88 |
89 | This tool can convert any mesh format that is readable by nibabel, e.g. Gifti.
90 |
91 | The resulting files are so-called mesh fragments, which will not be directly
92 | visible by Neuroglancer. The fragments need to be linked to the integer labels
93 | of the associated segmentation image, through small JSON files in the mesh
94 | directory (see the link-mesh-fragments command).
95 | """)
96 | parser.add_argument("input_mesh", type=pathlib.Path,
97 | help="input mesh file to be read by Nibabel")
98 | parser.add_argument("dest_url",
99 | help="base directory/URL of the output dataset")
100 |
101 | parser.add_argument("--mesh-dir", default=None,
102 | help='sub-directory of the dataset where the mesh '
103 | 'file(s) will be written. If given, this value must '
104 | 'match the "mesh" key of the info file. It will be '
105 | 'written to the info file if not already present. '
106 | '(default: "mesh").')
107 | parser.add_argument("--mesh-name", default=None,
108 | help="name of the precomputed mesh file (default: "
109 | "basename of the input mesh file)")
110 | parser.add_argument("--coord-transform",
111 | help="affine transformation to be applied to the"
112 | " coordinates, as a 4x4 matrix in homogeneous"
113 | " coordinates, with the translation in millimetres,"
114 | " in comma-separated row-major order"
115 | " (the last row is always 0 0 0 1 and may be omitted)"
116 | " (e.g. --coord-transform=1,0,0,0,0,1,0,0,0,0,1,0)")
117 |
118 | neuroglancer_scripts.accessor.add_argparse_options(parser,
119 | write_chunks=False)
120 |
121 | args = parser.parse_args(argv[1:])
122 | # TODO factor in library
123 | if args.coord_transform is not None:
124 | try:
125 | matrix = np.fromstring(args.coord_transform, sep=",")
126 | except ValueError as exc:
127 | parser.error(f"cannot parse --coord-transform: {exc.args[0]}"
128 | )
129 | if len(matrix) == 12:
130 | matrix = matrix.reshape(3, 4)
131 | elif len(matrix) == 16:
132 | matrix = matrix.reshape(4, 4)
133 | else:
134 | parser.error("--coord-transform must have 12 or 16 elements"
135 | f" ({len(matrix)} passed)")
136 |
137 | args.coord_transform = matrix
138 |
139 | return args
140 |
141 |
142 | def main(argv=sys.argv):
143 | """The script's entry point."""
144 | import neuroglancer_scripts.utils
145 | neuroglancer_scripts.utils.init_logging_for_cmdline()
146 | args = parse_command_line(argv)
147 | return mesh_file_to_precomputed(args.input_mesh, args.dest_url,
148 | mesh_name=args.mesh_name,
149 | mesh_dir=args.mesh_dir,
150 | coord_transform=args.coord_transform,
151 | options=vars(args)) or 0
152 |
153 |
154 | if __name__ == "__main__":
155 | sys.exit(main())
156 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/scale_stats.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, 2023 Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | # Author: Xiao Gui
6 | #
7 | # This software is made available under the MIT licence, see LICENCE.txt.
8 |
9 | import sys
10 |
11 | import numpy as np
12 |
13 | import neuroglancer_scripts.accessor
14 | from neuroglancer_scripts import precomputed_io
15 | from neuroglancer_scripts.utils import readable_count
16 |
17 |
18 | def show_scales_info(info):
19 | total_size = 0
20 | total_chunks = 0
21 | total_directories = 0
22 | dtype = np.dtype(info["data_type"]).newbyteorder("<")
23 | num_channels = info["num_channels"]
24 | for scale in info["scales"]:
25 | scale_name = scale["key"]
26 | size = scale["size"]
27 |
28 | shard_info = "Unsharded"
29 | shard_spec = scale.get("sharding")
30 | sharding_num_directories = None
31 | if shard_spec:
32 | shard_bits = shard_spec.get("shard_bits")
33 | shard_info = f"Sharded: {shard_bits}bits"
34 | sharding_num_directories = 2 ** shard_bits + 1
35 |
36 | for chunk_size in scale["chunk_sizes"]:
37 | size_in_chunks = [(s - 1) // cs + 1 for s,
38 | cs in zip(size, chunk_size)]
39 | num_chunks = np.prod(size_in_chunks)
40 | num_directories = (
41 | sharding_num_directories
42 | if sharding_num_directories is not None
43 | else size_in_chunks[0] * (1 + size_in_chunks[1]))
44 | size_bytes = np.prod(size) * dtype.itemsize * num_channels
45 | print(f"Scale {scale_name}, {shard_info}, chunk size {chunk_size}:"
46 | f" {num_chunks:,d} chunks, {num_directories:,d} directories,"
47 | f" raw uncompressed size {readable_count(size_bytes)}B")
48 | total_size += size_bytes
49 | total_chunks += num_chunks
50 | total_directories += num_directories
51 | print("---")
52 | print(f"Total: {total_chunks:,d} chunks, {total_directories:,d} "
53 | f"directories, raw uncompressed size {readable_count(total_size)}B")
54 |
55 |
56 | def show_scale_file_info(url, options={}):
57 | """Show information about a list of scales."""
58 | accessor = neuroglancer_scripts.accessor.get_accessor_for_url(url, options)
59 | io = precomputed_io.get_IO_for_existing_dataset(accessor)
60 | info = io.info
61 | show_scales_info(info)
62 |
63 |
64 | def parse_command_line(argv):
65 | """Parse the script's command line."""
66 | import argparse
67 | parser = argparse.ArgumentParser(
68 | description="""\
69 | Show information about a list of scales in Neuroglancer "info" JSON file format
70 | """)
71 | parser.add_argument("url", default=".",
72 | help='directory/URL containing the "info" file')
73 |
74 | neuroglancer_scripts.accessor.add_argparse_options(
75 | parser, write_chunks=False, write_files=False
76 | )
77 | args = parser.parse_args(argv[1:])
78 | return args
79 |
80 |
81 | def main(argv=sys.argv):
82 | """The script's entry point."""
83 | import neuroglancer_scripts.utils
84 | neuroglancer_scripts.utils.init_logging_for_cmdline()
85 | args = parse_command_line(argv)
86 | return show_scale_file_info(args.url, options=vars(args)) or 0
87 |
88 |
89 | if __name__ == "__main__":
90 | sys.exit(main())
91 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/volume_to_precomputed.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016, 2017, Forschungszentrum Juelich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import sys
9 |
10 | import neuroglancer_scripts.accessor
11 | import neuroglancer_scripts.chunk_encoding
12 | import neuroglancer_scripts.volume_reader
13 |
14 |
15 | def parse_command_line(argv):
16 | """Parse the script's command line."""
17 | import argparse
18 | parser = argparse.ArgumentParser(
19 | description="""\
20 | Convert a volume from Nifti to Neuroglancer pre-computed format
21 |
22 | Chunks are saved with the same data orientation as the input volume.
23 |
24 | The image values will be scaled (additionally to any slope/intercept scaling
25 | defined in the file header) if --input-max is specified. If --input-min is
26 | omitted, it is assumed to be zero.
27 | """)
28 | parser.add_argument("volume_filename",
29 | help="source Nifti file containing the data")
30 | parser.add_argument("dest_url", help="directory/URL where the converted "
31 | "dataset will be written")
32 |
33 | parser.add_argument("--generate-info", action="store_true",
34 | help="generate an 'info_fullres.json' file containing "
35 | "the metadata read for this volume, then exit")
36 |
37 | group = parser.add_argument_group("Option for reading the input file")
38 | group.add_argument("--ignore-scaling", action="store_true",
39 | help="read the values as stored on disk, without "
40 | "applying the data scaling (slope/intercept) from the "
41 | "volume header")
42 | group.add_argument("--load-full-volume", action="store_true", default=True,
43 | help=argparse.SUPPRESS)
44 | group.add_argument("--mmap", dest="load_full_volume", action="store_false",
45 | help="use memory-mapping to avoid loading the full "
46 | "volume in memory. This is useful if the input volume "
47 | "is too large to fit memory, but it will slow down "
48 | "the conversion significantly.")
49 |
50 | # TODO split into a module
51 | group = parser.add_argument_group(
52 | "Options for data type conversion and scaling")
53 | group.add_argument("--input-min", type=float, default=None,
54 | help="input value that will be mapped to the minimum "
55 | "output value")
56 | group.add_argument("--input-max", type=float, default=None,
57 | help="input value that will be mapped to the maximum "
58 | "output value")
59 |
60 | group.add_argument("--sharding", type=str, default=None,
61 | help="enable sharding. Value must be int,int,int, "
62 | "representing minishard encoding bits, shard encoding"
63 | "bits and preshift bits respectively.")
64 |
65 | neuroglancer_scripts.accessor.add_argparse_options(parser)
66 | neuroglancer_scripts.chunk_encoding.add_argparse_options(parser,
67 | allow_lossy=False)
68 |
69 | args = parser.parse_args(argv[1:])
70 |
71 | if args.input_max is None and args.input_min is not None:
72 | parser.error("--input-min cannot be specified if --input-max is "
73 | "omitted")
74 |
75 | return args
76 |
77 |
78 | def main(argv=sys.argv):
79 | """The script's entry point."""
80 | import neuroglancer_scripts.utils
81 | neuroglancer_scripts.utils.init_logging_for_cmdline()
82 | args = parse_command_line(argv)
83 | if args.generate_info:
84 | return neuroglancer_scripts.volume_reader.volume_file_to_info(
85 | args.volume_filename,
86 | args.dest_url,
87 | ignore_scaling=args.ignore_scaling,
88 | input_min=args.input_min,
89 | input_max=args.input_max,
90 | options=vars(args)
91 | ) or 0
92 | else:
93 | return neuroglancer_scripts.volume_reader.volume_file_to_precomputed(
94 | args.volume_filename,
95 | args.dest_url,
96 | ignore_scaling=args.ignore_scaling,
97 | input_min=args.input_min,
98 | input_max=args.input_max,
99 | load_full_volume=args.load_full_volume,
100 | options=vars(args)
101 | ) or 0
102 |
103 |
104 | if __name__ == "__main__":
105 | sys.exit(main())
106 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/scripts/volume_to_precomputed_pyramid.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python3
2 | #
3 | # Copyright (c) 2016–2018, Forschungszentrum Jülich GmbH
4 | # Author: Yann Leprince
5 | #
6 | # This software is made available under the MIT licence, see LICENCE.txt.
7 |
8 | import json
9 | import logging
10 | import sys
11 |
12 | import nibabel
13 |
14 | import neuroglancer_scripts.accessor
15 | import neuroglancer_scripts.chunk_encoding
16 | import neuroglancer_scripts.downscaling
17 | import neuroglancer_scripts.dyadic_pyramid
18 | import neuroglancer_scripts.scripts.generate_scales_info
19 | from neuroglancer_scripts import precomputed_io, volume_reader
20 |
21 | logger = logging.getLogger(__name__)
22 |
23 |
24 | def volume_to_precomputed_pyramid(volume_filename,
25 | dest_url,
26 | downscaling_method="average",
27 | ignore_scaling=False,
28 | input_min=None,
29 | input_max=None,
30 | load_full_volume=True,
31 | dataset_type=None,
32 | encoding=None,
33 | options={}):
34 | img = nibabel.load(volume_filename)
35 | formatted_info, _, _, _ = volume_reader.nibabel_image_to_info(
36 | img,
37 | ignore_scaling=ignore_scaling,
38 | input_min=input_min,
39 | input_max=input_max,
40 | options=options
41 | )
42 | info = json.loads(formatted_info)
43 | accessor = neuroglancer_scripts.accessor.get_accessor_for_url(
44 | dest_url, options
45 | )
46 | neuroglancer_scripts.scripts.generate_scales_info.set_info_params(
47 | info,
48 | dataset_type=dataset_type,
49 | encoding=encoding
50 | )
51 | neuroglancer_scripts.dyadic_pyramid.fill_scales_for_dyadic_pyramid(
52 | info
53 | )
54 | try:
55 | precomputed_writer = precomputed_io.get_IO_for_new_dataset(
56 | info, accessor
57 | )
58 | except neuroglancer_scripts.accessor.DataAccessError as exc:
59 | logger.error(f"Cannot write info: {exc}")
60 | return 1
61 | volume_reader.nibabel_image_to_precomputed(
62 | img, precomputed_writer,
63 | ignore_scaling, input_min, input_max,
64 | load_full_volume, options
65 | )
66 | downscaler = neuroglancer_scripts.downscaling.get_downscaler(
67 | downscaling_method, info, options
68 | )
69 | neuroglancer_scripts.dyadic_pyramid.compute_dyadic_scales(
70 | precomputed_writer, downscaler
71 | )
72 |
73 |
74 | def parse_command_line(argv):
75 | """Parse the script's command line."""
76 | import argparse
77 | parser = argparse.ArgumentParser(
78 | description="""\
79 | Convert a volume from Nifti to Neuroglancer pre-computed format.
80 |
81 | Chunks are saved with the same data orientation as the input volume.
82 |
83 | The image values will be scaled (after any slope/intercept scaling
84 | defined in the file header) if --input-max is specified. If --input-min
85 | is omitted, it is assumed to be zero.
86 | """)
87 | parser.add_argument("volume_filename",
88 | help="source Nifti file containing the data")
89 | parser.add_argument("dest_url", help="directory/URL where the converted "
90 | "dataset will be written")
91 |
92 | group = parser.add_argument_group("Option for reading the input file")
93 | group.add_argument("--ignore-scaling", action="store_true",
94 | help="read the values as stored on disk, without "
95 | "applying the data scaling (slope/intercept) from the "
96 | "volume header")
97 | group.add_argument("--load-full-volume", action="store_true", default=True,
98 | help=argparse.SUPPRESS)
99 | group.add_argument("--mmap", dest="load_full_volume", action="store_false",
100 | help="use memory-mapping to avoid loading the full "
101 | "volume in memory. This is useful if the input volume "
102 | "is too large to fit memory, but it will slow down "
103 | "the conversion significantly.")
104 |
105 | # TODO split into a module
106 | group = parser.add_argument_group(
107 | "Options for data type conversion and scaling")
108 | group.add_argument("--input-min", type=float, default=None,
109 | help="input value that will be mapped to the minimum "
110 | "output value")
111 | group.add_argument("--input-max", type=float, default=None,
112 | help="input value that will be mapped to the maximum "
113 | "output value")
114 | group.add_argument("--type", default=None,
115 | choices=("image", "segmentation"),
116 | help="Type of dataset (image or segmentation). By"
117 | " default this is inherited from the fullres_info"
118 | " file, with a fallback to image.")
119 | group.add_argument("--encoding", default=None,
120 | choices=("raw", "jpeg", "compressed_segmentation"),
121 | help="data encoding (raw, jpeg, or"
122 | " compressed_segmentation). By default this is"
123 | " inherited from the fullres_info file, with a"
124 | " fallback to raw.")
125 |
126 | neuroglancer_scripts.accessor.add_argparse_options(parser)
127 | neuroglancer_scripts.downscaling.add_argparse_options(parser)
128 | neuroglancer_scripts.chunk_encoding.add_argparse_options(parser,
129 | allow_lossy=False)
130 |
131 | args = parser.parse_args(argv[1:])
132 |
133 | if args.input_max is None and args.input_min is not None:
134 | parser.error("--input-min cannot be specified if --input-max is "
135 | "omitted")
136 |
137 | return args
138 |
139 |
140 | def main(argv=sys.argv):
141 | """The script's entry point."""
142 | import neuroglancer_scripts.utils
143 | neuroglancer_scripts.utils.init_logging_for_cmdline()
144 | args = parse_command_line(argv)
145 | return volume_to_precomputed_pyramid(
146 | args.volume_filename,
147 | args.dest_url,
148 | downscaling_method=args.downscaling_method,
149 | ignore_scaling=args.ignore_scaling,
150 | input_min=args.input_min,
151 | input_max=args.input_max,
152 | load_full_volume=args.load_full_volume,
153 | dataset_type=args.type,
154 | encoding=args.encoding,
155 | options=vars(args)
156 | ) or 0
157 |
158 |
159 | if __name__ == "__main__":
160 | sys.exit(main())
161 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/sharded_http_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2023, 2024 Forschungszentrum Juelich GmbH
2 | # Author: Xiao Gui
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import json
7 | from typing import Dict
8 |
9 | import numpy as np
10 | import requests
11 |
12 | import neuroglancer_scripts.http_accessor
13 | from neuroglancer_scripts.sharded_base import (
14 | CMCReadWrite,
15 | ShardCMC,
16 | ShardedAccessorBase,
17 | ShardedIOError,
18 | ShardedScaleBase,
19 | ShardSpec,
20 | ShardVolumeSpec,
21 | )
22 |
23 |
24 | class HttpShard(ShardCMC):
25 |
26 | # legacy shard has two files, .index and .data
27 | # latest implementation is the concatentation of [.index,.data] into .shard
28 | is_legacy = False
29 |
30 | can_read_cmc = True
31 | can_write_cmc = False
32 |
33 | def __init__(self, base_url, session: requests.Session,
34 | shard_key: np.uint64,
35 | shard_spec: ShardSpec):
36 | self._session = session
37 | self.base_url = base_url.rstrip("/") + "/"
38 | super().__init__(shard_key, shard_spec)
39 | self.populate_minishard_dict()
40 | assert self.can_read_cmc
41 |
42 | def file_exists(self, filepath):
43 | resp = self._session.head(f"{self.base_url}{filepath}")
44 | if resp.status_code in (200, 404):
45 | return resp.status_code == 200
46 | resp.raise_for_status()
47 | return False
48 |
49 | def read_bytes(self, offset: int, length: int) -> bytes:
50 | if not self.can_read_cmc:
51 | raise ShardedIOError("Shard cannot read")
52 |
53 | file_url = f"{self.base_url}{self.shard_key_str}"
54 | if self.is_legacy:
55 | if offset < self.header_byte_length:
56 | file_url += ".index"
57 | else:
58 | file_url += ".data"
59 | offset = offset - self.header_byte_length
60 | else:
61 | file_url += ".shard"
62 |
63 | range_value = f"bytes={offset}-{offset+length-1}"
64 | resp = self._session.get(file_url, headers={
65 | "Range": range_value
66 | })
67 | resp.raise_for_status()
68 | content = resp.content
69 | if len(content) != length:
70 | raise ShardedIOError(f"Getting {file_url} error. Expecting "
71 | f"{length} bytes (Range: {range_value}), "
72 | f"but got {len(content)}.")
73 | return content
74 |
75 | def fetch_cmc_chunk(self, cmc: np.uint64):
76 | minishard_key = self.get_minishard_key(cmc)
77 | assert minishard_key in self.minishard_dict
78 | return self.minishard_dict[minishard_key].fetch_cmc_chunk(cmc)
79 |
80 |
81 | class HttpShardedScale(ShardedScaleBase):
82 |
83 | can_read_cmc = True
84 | can_write_cmc = False
85 |
86 | def __init__(self, base_url: str, session: requests.Session, key: str,
87 | shard_spec: ShardSpec,
88 | shard_volume_spec: ShardVolumeSpec):
89 | super().__init__(key, shard_spec, shard_volume_spec)
90 | self.base_url = base_url
91 | self._session = session
92 | self.shard_dict: Dict[np.uint64, HttpShard] = {}
93 |
94 | def get_shard(self, shard_key: np.uint64) -> CMCReadWrite:
95 | if shard_key not in self.shard_dict:
96 | http_shard = HttpShard(f"{self.base_url}{self.key}/",
97 | self._session,
98 | shard_key,
99 | self.shard_spec)
100 | self.shard_dict[shard_key] = http_shard
101 | return self.shard_dict[shard_key]
102 |
103 |
104 | class ShardedHttpAccessor(neuroglancer_scripts.http_accessor.HttpAccessor,
105 | ShardedAccessorBase):
106 | def __init__(self, base_url):
107 | neuroglancer_scripts.http_accessor.HttpAccessor.__init__(self,
108 | base_url)
109 | self.shard_scale_dict: Dict[str, HttpShardedScale] = {}
110 | self.info = json.loads(self.fetch_file("info"))
111 |
112 | def fetch_chunk(self, key, chunk_coords):
113 | if key not in self.shard_scale_dict:
114 | sharding = self.get_sharding_spec(key)
115 | scale = self.get_scale(key)
116 | chunk_sizes, = scale.get("chunk_sizes", [[]])
117 | sizes = scale.get("size")
118 | shard_spec = ShardSpec(**sharding)
119 | shard_volume_spec = ShardVolumeSpec(chunk_sizes, sizes)
120 | self.shard_scale_dict[key] = HttpShardedScale(self.base_url,
121 | self._session,
122 | key,
123 | shard_spec,
124 | shard_volume_spec)
125 | return self.shard_scale_dict[key].fetch_chunk(chunk_coords)
126 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/transform.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import json
7 |
8 | import numpy as np
9 |
10 | __all__ = [
11 | "nifti_to_neuroglancer_transform",
12 | "matrix_as_compact_urlsafe_json",
13 | ]
14 |
15 |
16 | def nifti_to_neuroglancer_transform(nifti_transformation_matrix, voxel_size):
17 | """Compensate the half-voxel shift between Neuroglancer and Nifti.
18 |
19 | Nifti specifies that the transformation matrix (legacy, qform, or sform)
20 | gives the spatial coordinates of the *centre* of a voxel, while the
21 | Neuroglancer "transform" matrix specifies the *corner* of voxels.
22 |
23 | This function compensates the resulting half-voxel shift by adjusting the
24 | translation parameters accordingly.
25 | """
26 | ret = np.array(nifti_transformation_matrix, copy=True, dtype=float)
27 | ret[:3, 3] -= np.dot(ret[:3, :3], 0.5 * np.asarray(voxel_size))
28 | return ret
29 |
30 |
31 | def matrix_as_compact_urlsafe_json(matrix):
32 | # Transform tre matrix, transforming numbers whose floating-point
33 | # representation has a trailing .0 to integers
34 | array = [[int(x) if str(x).endswith(".0") and int(x) == x
35 | else x for x in row] for row in matrix]
36 | return json.dumps(array, indent=None, separators=('_', ':'))
37 |
--------------------------------------------------------------------------------
/src/neuroglancer_scripts/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 |
7 | """Miscellaneous utility functions.
8 | """
9 |
10 | import collections
11 |
12 | import numpy as np
13 |
14 | __all__ = [
15 | "ceil_div",
16 | "permute",
17 | "invert_permutation",
18 | "readable_count",
19 | "LENGTH_UNITS",
20 | "format_length",
21 | ]
22 |
23 |
24 | def ceil_div(a, b):
25 | """Ceil integer division (``ceil(a / b)`` using integer arithmetic)."""
26 | return (a - 1) // b + 1
27 |
28 |
29 | def permute(seq, p):
30 | """Permute the elements of a sequence according to a permutation.
31 |
32 | :param seq: a sequence to be permuted
33 | :param p: a permutation (sequence of integers between ``0`` and
34 | ``len(seq) - 1``)
35 | :returns: ``tuple(seq[i] for i in p)``
36 | :rtype: tuple
37 | """
38 | return tuple(seq[i] for i in p)
39 |
40 |
41 | def invert_permutation(p):
42 | """Invert a permutation.
43 |
44 | :param p: a permutation (sequence of integers between ``0`` and
45 | ``len(p) - 1``)
46 | :returns: an array ``s``, where ``s[i]`` gives the index of ``i`` in ``p``
47 | :rtype: numpy.ndarray
48 | """
49 | p = np.asarray(p)
50 | s = np.empty(p.size, p.dtype)
51 | s[p] = np.arange(p.size)
52 | return s
53 |
54 |
55 | _IEC_PREFIXES = [
56 | (2 ** 10, "ki"),
57 | (2 ** 20, "Mi"),
58 | (2 ** 30, "Gi"),
59 | (2 ** 40, "Ti"),
60 | (2 ** 50, "Pi"),
61 | (2 ** 60, "Ei"),
62 | ]
63 |
64 |
65 | def readable_count(count):
66 | """Format a number to a human-readable string with an IEC binary prefix.
67 |
68 | The resulting string has a minimum of 2 significant digits. It is never
69 | longer than 6 characters, unless the input exceeds 2**60. You are expected
70 | to concatenate the result with the relevant unit (e.g. B for bytes):
71 |
72 | >>> readable_count(512) + "B"
73 | '512 B'
74 | >>> readable_count(1e10) + "B"
75 | '9.3 GiB'
76 |
77 | :param int count: number to be converted (must be >= 0)
78 | :returns: a string representation of the number with an IEC binary prefix
79 | :rtype: str
80 |
81 | """
82 | assert count >= 0
83 | num_str = format(count, ".0f")
84 | if len(num_str) <= 3:
85 | return num_str + " "
86 | for factor, prefix in _IEC_PREFIXES:
87 | if count > 10 * factor:
88 | num_str = format(count / factor, ".0f")
89 | else:
90 | num_str = format(count / factor, ".1f")
91 | if len(num_str) <= 3:
92 | return num_str + " " + prefix
93 | # Fallback: use the last prefix
94 | factor, prefix = _IEC_PREFIXES[-1]
95 | return f"{count / factor:,.0f} {prefix}"
96 |
97 |
98 | LENGTH_UNITS = collections.OrderedDict([
99 | ("km", 1e-12),
100 | ("m", 1e-9),
101 | ("mm", 1e-6),
102 | ("um", 1e-3),
103 | ("nm", 1.),
104 | ("pm", 1e3),
105 | ])
106 | """List of physical units of length."""
107 |
108 |
109 | def format_length(length_nm, unit):
110 | """Format a length according to the provided unit (input in nanometres).
111 |
112 | :param float length_nm: a length in nanometres
113 | :param str unit: must be one of ``LENGTH_UNITS.keys``
114 | :return: the formatted length, rounded to the specified unit (no fractional
115 | part is printed)
116 | :rtype: str
117 | """
118 | return format(length_nm * LENGTH_UNITS[unit], ".0f") + unit
119 |
120 |
121 | def init_logging_for_cmdline():
122 | """Set up a sane logging configuration for command-line tools.
123 |
124 | This must be called early in the main function.
125 | """
126 | import logging
127 | logging.basicConfig(format="%(levelname)s: %(message)s", level="INFO")
128 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # tox (https://tox.readthedocs.io/) is a tool for running tests
2 | # in multiple virtualenvs. This configuration file will run the
3 | # test suite on all supported python versions. To use it, "pip install tox"
4 | # and then run "tox" from this directory.
5 |
6 | # NOTE: remember to update the classifiers in setup.cfg when Python versions
7 | # are added/removed
8 | [tox]
9 | envlist = py36, py37, py38, py39, py310, py311, py312, codestyle, docs, cov
10 | isolated_build = True
11 |
12 | [gh-actions]
13 | python =
14 | 3.6: py36
15 | 3.7: py37
16 | 3.8: py38
17 | 3.9: py39
18 | 3.10: py310, cov # Update .github/workflows/tox.yaml if cov is moved
19 | 3.11: py311, codestyle
20 |
21 | [testenv]
22 | commands = pytest {posargs}
23 | # Remember to keep synchronized with extras_require in setup.cfg
24 | deps =
25 | pytest
26 | requests-mock
27 |
28 | [testenv:docs]
29 | changedir = docs
30 | deps = sphinx
31 | commands = python -m sphinx -W -n -b html -d {envtmpdir}/doctrees . {envtmpdir}/html
32 |
33 | [testenv:cov]
34 | commands = pytest --cov=neuroglancer_scripts --cov-report=xml
35 | deps =
36 | {[testenv]deps}
37 | pytest-cov
38 |
39 | [testenv:codestyle]
40 | # pre-commit needs to clone Git repositories over https
41 | passenv = http_proxy,https_proxy,no_proxy
42 | commands = pre-commit run --all-files
43 | deps =
44 | pre-commit
45 |
46 | [pytest]
47 | filterwarnings =
48 | default
49 | error:::neuroglancer_scripts
50 |
--------------------------------------------------------------------------------
/unit_tests/test_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, 2023 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | # Author: Xiao Gui
4 | #
5 | # This software is made available under the MIT licence, see LICENCE.txt.
6 |
7 | import argparse
8 | import json
9 | import pathlib
10 | from unittest.mock import patch
11 |
12 | import pytest
13 | from neuroglancer_scripts.accessor import (
14 | Accessor,
15 | DataAccessError,
16 | URLError,
17 | add_argparse_options,
18 | convert_file_url_to_pathname,
19 | get_accessor_for_url,
20 | )
21 | from neuroglancer_scripts.file_accessor import FileAccessor
22 | from neuroglancer_scripts.http_accessor import HttpAccessor
23 | from neuroglancer_scripts.sharded_base import ShardedAccessorBase
24 | from neuroglancer_scripts.sharded_file_accessor import ShardedFileAccessor
25 | from neuroglancer_scripts.sharded_http_accessor import ShardedHttpAccessor
26 |
27 |
28 | @pytest.mark.parametrize("accessor_options", [
29 | {},
30 | {"gzip": True, "flat": False, "unknown_option": None},
31 | ])
32 | def test_get_accessor_for_url(accessor_options):
33 | assert isinstance(get_accessor_for_url(""), Accessor)
34 | a = get_accessor_for_url(".", accessor_options)
35 | assert isinstance(a, FileAccessor)
36 | assert a.base_path == pathlib.Path(".")
37 | a = get_accessor_for_url("file:///absolute", accessor_options)
38 | assert isinstance(a, FileAccessor)
39 | assert a.base_path == pathlib.Path("/absolute")
40 | a = get_accessor_for_url("http://example/", accessor_options)
41 | assert isinstance(a, HttpAccessor)
42 | assert a.base_url == "http://example/"
43 | with pytest.raises(URLError, match="scheme"):
44 | get_accessor_for_url("weird://", accessor_options)
45 | with pytest.raises(URLError, match="decod"):
46 | get_accessor_for_url("file:///%ff", accessor_options)
47 |
48 |
49 | valid_info_str = json.dumps({
50 | "scales": [
51 | {
52 | "key": "foo",
53 | "sharding": {
54 | "@type": "neuroglancer_uint64_sharded_v1"
55 | }
56 | }
57 | ]
58 | })
59 |
60 |
61 | @patch.object(ShardedAccessorBase, "info_is_sharded")
62 | @pytest.mark.parametrize("scheme", ["https://", "http://", ""])
63 | @pytest.mark.parametrize("fetch_file_returns, info_is_sharded_returns, exp", [
64 | (DataAccessError("foobar"), None, False),
65 | ('mal formed json', None, False),
66 | (valid_info_str, False, False),
67 | (valid_info_str, True, True),
68 | ])
69 | def test_sharded_accessor_via_info(info_is_sharded_mock, fetch_file_returns,
70 | info_is_sharded_returns, exp, scheme):
71 |
72 | if isinstance(info_is_sharded_returns, Exception):
73 | info_is_sharded_mock.side_effect = info_is_sharded_returns
74 | else:
75 | info_is_sharded_mock.return_value = info_is_sharded_returns
76 |
77 | assert scheme in ("https://", "http://", "file://", "")
78 | if scheme in ("file://", ""):
79 | base_acc_cls = FileAccessor
80 | shard_accessor_cls = ShardedFileAccessor
81 | if scheme in ("https://", "http://"):
82 | base_acc_cls = HttpAccessor
83 | shard_accessor_cls = ShardedHttpAccessor
84 | with patch.object(base_acc_cls, "fetch_file") as fetch_file_mock:
85 | if isinstance(fetch_file_returns, Exception):
86 | fetch_file_mock.side_effect = fetch_file_returns
87 | else:
88 | fetch_file_mock.return_value = fetch_file_returns
89 |
90 | result = get_accessor_for_url(f"{scheme}example/")
91 | assert isinstance(result, shard_accessor_cls if exp else base_acc_cls)
92 |
93 | if info_is_sharded_returns is None:
94 | info_is_sharded_mock.assert_not_called()
95 | else:
96 | info_is_sharded_mock.assert_called_once()
97 |
98 |
99 | @pytest.mark.parametrize("write_chunks", [True, False])
100 | @pytest.mark.parametrize("write_files", [True, False])
101 | def test_add_argparse_options(write_chunks, write_files):
102 | # Test default values
103 | parser = argparse.ArgumentParser()
104 | add_argparse_options(parser,
105 | write_chunks=write_chunks,
106 | write_files=write_files)
107 | args = parser.parse_args([])
108 | get_accessor_for_url(".", vars(args))
109 |
110 |
111 | def test_add_argparse_options_parsing():
112 | # Test correct parsing
113 | parser = argparse.ArgumentParser()
114 | add_argparse_options(parser)
115 | args = parser.parse_args(["--flat"])
116 | assert args.flat is True
117 | args = parser.parse_args(["--no-gzip"])
118 | assert args.gzip is False
119 |
120 |
121 | def test_convert_file_url_to_pathname():
122 | assert convert_file_url_to_pathname("") == ""
123 | assert convert_file_url_to_pathname("relative/path") == "relative/path"
124 | assert (convert_file_url_to_pathname("relative/../path")
125 | == "relative/../path")
126 | assert (convert_file_url_to_pathname("/path/with spaces")
127 | == "/path/with spaces")
128 | assert convert_file_url_to_pathname("/absolute/path") == "/absolute/path"
129 | assert convert_file_url_to_pathname("file:///") == "/"
130 | with pytest.raises(URLError):
131 | convert_file_url_to_pathname("http://")
132 | with pytest.raises(URLError):
133 | convert_file_url_to_pathname("file://invalid/")
134 | assert convert_file_url_to_pathname("file:///test") == "/test"
135 | assert convert_file_url_to_pathname("file://localhost/test") == "/test"
136 | assert (convert_file_url_to_pathname("file:///with%20space")
137 | == "/with space")
138 | assert convert_file_url_to_pathname("precomputed://file:///") == "/"
139 |
--------------------------------------------------------------------------------
/unit_tests/test_data_types.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import numpy as np
7 | import pytest
8 | from neuroglancer_scripts.data_types import (
9 | NG_DATA_TYPES,
10 | NG_INTEGER_DATA_TYPES,
11 | get_chunk_dtype_transformer,
12 | get_dtype,
13 | )
14 |
15 |
16 | def test_data_types_lists():
17 | for data_type in NG_DATA_TYPES:
18 | np.dtype(data_type)
19 | for data_type in NG_INTEGER_DATA_TYPES:
20 | assert np.issubdtype(data_type, np.integer)
21 |
22 |
23 | @pytest.mark.parametrize("float_dtype", [
24 | np.float32,
25 | np.float64,
26 | pytest.param("float96",
27 | marks=pytest.mark.skipif(not hasattr(np, "float96"),
28 | reason="no float96 dtype")),
29 | pytest.param("float128",
30 | marks=pytest.mark.skipif(not hasattr(np, "float128"),
31 | reason="no float128 dtype")),
32 | ])
33 | def test_dtype_conversion_float_to_int(float_dtype):
34 | reference_test_data = np.array(
35 | [-np.inf, -100, 0.4, 0.6, 2**8, 2**16, 2**32, 2**64, np.inf],
36 | dtype=float_dtype
37 | )
38 | test_data = np.copy(reference_test_data)
39 | t = get_chunk_dtype_transformer(test_data.dtype, "uint8")
40 | assert np.array_equal(
41 | t(test_data),
42 | np.array([0, 0, 0, 1, 255, 255, 255, 255, 255], dtype="uint8")
43 | )
44 | # Ensure that the input data was not modified in-place
45 | assert np.array_equal(test_data, reference_test_data)
46 |
47 | t = get_chunk_dtype_transformer(test_data.dtype, "uint16")
48 | assert np.array_equal(
49 | t(test_data),
50 | np.array([0, 0, 0, 1, 256, 65535, 65535, 65535, 65535], dtype="uint16")
51 | )
52 | # Ensure that the input data was not modified in-place
53 | assert np.array_equal(test_data, reference_test_data)
54 |
55 | t = get_chunk_dtype_transformer(test_data.dtype, "uint32")
56 | assert np.array_equal(
57 | t(test_data),
58 | np.array([0, 0, 0, 1, 256, 65536, 2**32-1, 2**32-1, 2**32-1],
59 | dtype="uint32")
60 | )
61 | # Ensure that the input data was not modified in-place
62 | assert np.array_equal(test_data, reference_test_data)
63 |
64 | # Use a different test for uint64: tests for 2**64 and +infinity are
65 | # expected to fail due to a bug in NumPy, see below.
66 | uint64_test_data = np.array(
67 | [-np.inf, -100, 0.4, 0.6, 2**63],
68 | dtype=float_dtype
69 | )
70 | t = get_chunk_dtype_transformer(uint64_test_data.dtype, "uint64")
71 | assert np.array_equal(
72 | t(uint64_test_data),
73 | np.array([0, 0, 0, 1, 2**63], dtype="uint64")
74 | )
75 |
76 |
77 | @pytest.mark.xfail(reason="known bug in NumPy", strict=True)
78 | def test_dtype_conversion_float_to_uint64():
79 | # Conversion to uint64 may result in loss of precision, because of a known
80 | # bug / approximation in NumPy, where dtype promotion between a 64-bit
81 | # (u)int and any float will return float64, even though this type can only
82 | # hold all integers up to 2**53 (see e.g.
83 | # https://github.com/numpy/numpy/issues/8851).
84 | test_data = np.array([2**64, np.inf])
85 | t = get_chunk_dtype_transformer(test_data.dtype, "uint64")
86 | assert np.array_equal(
87 | t(test_data),
88 | np.array([2**64 - 1, 2**64 - 1], dtype="uint64")
89 | )
90 |
91 |
92 | @pytest.mark.parametrize("dtype", NG_DATA_TYPES)
93 | def test_dtype_conversion_identity(dtype):
94 | if np.issubdtype(dtype, np.integer):
95 | iinfo = np.iinfo(dtype)
96 | test_data = np.array([iinfo.min, 0, iinfo.max], dtype=dtype)
97 | else:
98 | finfo = np.finfo(dtype)
99 | test_data = np.array([finfo.min, 0, finfo.max, np.inf],
100 | dtype=dtype)
101 | t = get_chunk_dtype_transformer(dtype, dtype)
102 | res = t(test_data)
103 | assert np.array_equal(res, test_data)
104 |
105 |
106 | @pytest.mark.parametrize("dtype", NG_INTEGER_DATA_TYPES)
107 | def test_dtype_conversion_integer_upcasting(dtype):
108 | iinfo_uint64 = np.iinfo(np.uint64)
109 | iinfo = np.iinfo(dtype)
110 | # The test will need to be rewritten if NG_INTEGER_DATA_TYPES ever includes
111 | # signed data types
112 | assert (iinfo_uint64.max >= iinfo.max and iinfo_uint64.min <= iinfo.min)
113 |
114 | test_data = np.array([iinfo.min, iinfo.max], dtype=dtype)
115 | t = get_chunk_dtype_transformer(test_data.dtype, "uint64")
116 | assert np.array_equal(t(test_data), test_data)
117 |
118 |
119 | @pytest.mark.parametrize("dtype", NG_INTEGER_DATA_TYPES)
120 | def test_dtype_conversion_integer_downcasting(dtype):
121 | iinfo_uint64 = np.iinfo(np.uint64)
122 | iinfo = np.iinfo(dtype)
123 | # The test will need to be rewritten if NG_INTEGER_DATA_TYPES ever includes
124 | # signed data types
125 | assert (iinfo_uint64.max >= iinfo.max and iinfo_uint64.min <= iinfo.min)
126 |
127 | test_data = np.array([iinfo_uint64.min, iinfo.min,
128 | iinfo.max, iinfo_uint64.max], dtype=np.uint64)
129 | t = get_chunk_dtype_transformer(test_data.dtype, dtype)
130 | assert np.array_equal(
131 | t(test_data),
132 | np.array([iinfo.min, iinfo.min,
133 | iinfo.max, iinfo.max], dtype=dtype)
134 | )
135 |
136 |
137 | def test_unsupported_tupled_dtype():
138 | rng = np.random.default_rng()
139 | random_val = rng.integers(256, size=(3, 3, 3, 3), dtype=np.uint8)
140 | wrong_type = np.dtype([('A', 'u1'), ('B', 'u1'), ('C', 'u1')])
141 | new_data = random_val.copy().view(dtype=wrong_type).reshape((3, 3, 3))
142 |
143 | with pytest.raises(NotImplementedError):
144 | get_dtype(new_data.dtype)
145 |
146 |
147 | def test_supported_tupled_dtype():
148 | rng = np.random.default_rng()
149 | random_val = rng.integers(256, size=(3, 3, 3, 3), dtype=np.uint8)
150 | right_type = np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])
151 | new_data = random_val.copy().view(dtype=right_type).reshape((3, 3, 3))
152 | dtype, isrgb = get_dtype(new_data.dtype)
153 | assert dtype.name == 'uint8'
154 | assert isrgb
155 |
--------------------------------------------------------------------------------
/unit_tests/test_downscaling.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import numpy as np
7 | import pytest
8 | from neuroglancer_scripts.downscaling import (
9 | AveragingDownscaler,
10 | Downscaler,
11 | MajorityDownscaler,
12 | StridingDownscaler,
13 | add_argparse_options,
14 | get_downscaler,
15 | )
16 |
17 |
18 | @pytest.mark.parametrize("method", ["average", "majority", "stride"])
19 | @pytest.mark.parametrize("options", [
20 | {},
21 | {"outside_value": 1.0, "unknown_option": None},
22 | ])
23 | def test_get_downscaler(method, options):
24 | d = get_downscaler(method, options)
25 | assert isinstance(d, Downscaler)
26 |
27 |
28 | def test_get_downscaler_auto_image():
29 | d = get_downscaler("auto", info={"type": "image"})
30 | assert isinstance(d, AveragingDownscaler)
31 |
32 |
33 | def test_get_downscaler_auto_segmentation():
34 | d = get_downscaler("auto", info={"type": "segmentation"})
35 | assert isinstance(d, StridingDownscaler)
36 |
37 |
38 | def test_add_argparse_options():
39 | import argparse
40 | parser = argparse.ArgumentParser()
41 | add_argparse_options(parser)
42 | # Test default values
43 | args = parser.parse_args([])
44 | get_downscaler(args.downscaling_method, {"type": "image"}, vars(args))
45 | args = parser.parse_args(["--downscaling-method", "auto"])
46 | assert args.downscaling_method == "auto"
47 | # Test correct parsing
48 | args = parser.parse_args(["--downscaling-method", "average"])
49 | assert args.downscaling_method == "average"
50 | assert args.outside_value is None
51 | args = parser.parse_args(["--downscaling-method", "average",
52 | "--outside-value", "255"])
53 | assert args.downscaling_method == "average"
54 | assert args.outside_value == 255.
55 | args = parser.parse_args(["--downscaling-method", "majority"])
56 | assert args.downscaling_method == "majority"
57 | args = parser.parse_args(["--downscaling-method", "stride"])
58 | assert args.downscaling_method == "stride"
59 |
60 |
61 | @pytest.mark.parametrize("dx", [1, 2])
62 | @pytest.mark.parametrize("dy", [1, 2])
63 | @pytest.mark.parametrize("dz", [1, 2])
64 | def test_dyadic_downscaling(dx, dy, dz):
65 | scaling_factors = dx, dy, dz
66 | lowres_chunk = np.arange(3 * 7 * 4 * 2, dtype="f").reshape(2, 4, 7, 3)
67 | upscaled_chunk = np.empty((2, dz * 4, dy * 7, dx * 3),
68 | dtype=lowres_chunk.dtype)
69 | for x, y, z in np.ndindex(scaling_factors):
70 | upscaled_chunk[:, z::dz, y::dy, x::dx] = lowres_chunk
71 |
72 | # Shorten the chunk by 1 voxel in every direction where it was upscaled
73 | truncation_slicing = (np.s_[:],) + tuple(
74 | np.s_[:-1] if s == 2 else np.s_[:] for s in reversed(scaling_factors)
75 | )
76 | truncated_chunk = upscaled_chunk[truncation_slicing]
77 |
78 | d = StridingDownscaler()
79 | assert np.array_equal(d.downscale(upscaled_chunk, scaling_factors),
80 | lowres_chunk)
81 | assert np.array_equal(d.downscale(truncated_chunk, scaling_factors),
82 | lowres_chunk)
83 |
84 | d = AveragingDownscaler()
85 | assert np.array_equal(d.downscale(upscaled_chunk, scaling_factors),
86 | lowres_chunk)
87 | assert np.array_equal(d.downscale(truncated_chunk, scaling_factors),
88 | lowres_chunk)
89 |
90 | d = MajorityDownscaler()
91 | assert np.array_equal(d.downscale(upscaled_chunk, scaling_factors),
92 | lowres_chunk)
93 | assert np.array_equal(d.downscale(truncated_chunk, scaling_factors),
94 | lowres_chunk)
95 |
96 |
97 | def test_averaging_downscaler_rounding():
98 | d = AveragingDownscaler()
99 | test_chunk = np.array([[1, 1], [1, 0]], dtype="uint8").reshape(1, 2, 2, 1)
100 | assert np.array_equal(d.downscale(test_chunk, (1, 2, 2)),
101 | np.array([1], dtype="uint8").reshape(1, 1, 1, 1))
102 |
--------------------------------------------------------------------------------
/unit_tests/test_dyadic_pyramid.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import logging
7 |
8 | from neuroglancer_scripts.dyadic_pyramid import (
9 | choose_unit_for_key,
10 | fill_scales_for_dyadic_pyramid,
11 | )
12 |
13 |
14 | def test_choose_unit_for_key():
15 | assert choose_unit_for_key(1e-3) == "pm"
16 | assert choose_unit_for_key(1) == "nm"
17 | assert choose_unit_for_key(1e3) == "um"
18 | assert choose_unit_for_key(1e6) == "mm"
19 | assert choose_unit_for_key(1e9) == "m"
20 |
21 |
22 | def test_fill_scales_for_dyadic_pyramid_small_volume():
23 | info = fill_scales_for_dyadic_pyramid({"scales": [{
24 | "size": [1, 1, 1],
25 | "resolution": [1, 1, 1],
26 | }]})
27 | assert len(info["scales"]) > 0
28 | assert info["scales"][0]["size"] == [1, 1, 1]
29 | assert info["scales"][0]["resolution"] == [1, 1, 1]
30 | assert "chunk_sizes" in info["scales"][0]
31 |
32 |
33 | def test_fill_scales_for_dyadic_pyramid_simple_isotropic():
34 | info = fill_scales_for_dyadic_pyramid({"scales": [{
35 | "size": [256, 256, 256],
36 | "resolution": [1e6, 1e6, 1e6],
37 | }]}, target_chunk_size=64)
38 | assert info["scales"][0]["size"] == [256, 256, 256]
39 | assert info["scales"][0]["resolution"] == [1e6, 1e6, 1e6]
40 | assert info["scales"][0]["chunk_sizes"] == [[64, 64, 64]]
41 | assert info["scales"][1]["size"] == [128, 128, 128]
42 | assert info["scales"][1]["resolution"] == [2e6, 2e6, 2e6]
43 | assert info["scales"][1]["chunk_sizes"] == [[64, 64, 64]]
44 |
45 |
46 | def test_fill_scales_for_dyadic_pyramid_max_scales():
47 | info = fill_scales_for_dyadic_pyramid({"scales": [{
48 | "size": [1024, 1024, 1024],
49 | "resolution": [1e6, 1e6, 1e6],
50 | }]}, target_chunk_size=64, max_scales=2)
51 | assert len(info["scales"]) == 2
52 |
53 |
54 | def chunk_extent_anisotropy(resolution, chunk_size):
55 | chunk_extent = [r * cs for r, cs in zip(resolution, chunk_size)
56 | if cs > 1]
57 | return max(chunk_extent) / min(chunk_extent)
58 |
59 |
60 | def test_fill_scales_for_dyadic_pyramid_simple_anisotropic():
61 | info = fill_scales_for_dyadic_pyramid({"scales": [{
62 | "size": [512, 256, 256],
63 | "resolution": [1e6, 2e6, 2e6],
64 | }]}, target_chunk_size=64)
65 | assert info["scales"][0]["size"] == [512, 256, 256]
66 | assert info["scales"][0]["resolution"] == [1e6, 2e6, 2e6]
67 | assert chunk_extent_anisotropy(info["scales"][0]["resolution"],
68 | info["scales"][0]["chunk_sizes"][0]) < 2
69 | assert info["scales"][1]["size"] == [256, 256, 256]
70 | assert info["scales"][1]["resolution"] == [2e6, 2e6, 2e6]
71 | assert info["scales"][1]["chunk_sizes"] == [[64, 64, 64]]
72 | assert info["scales"][2]["size"] == [128, 128, 128]
73 | assert info["scales"][2]["resolution"] == [4e6, 4e6, 4e6]
74 | assert info["scales"][2]["chunk_sizes"] == [[64, 64, 64]]
75 |
76 |
77 | def test_fill_scales_for_dyadic_pyramid_extremely_anisotropic():
78 | info = fill_scales_for_dyadic_pyramid({"scales": [{
79 | "size": [256000000, 256, 256],
80 | "resolution": [1, 1e6, 1e6],
81 | }]}, target_chunk_size=64)
82 | assert info["scales"][0]["size"] == [256000000, 256, 256]
83 | assert info["scales"][0]["resolution"] == [1, 1e6, 1e6]
84 | assert chunk_extent_anisotropy(info["scales"][0]["resolution"],
85 | info["scales"][0]["chunk_sizes"][0]) < 2
86 | assert info["scales"][0]["chunk_sizes"] == [[64 ** 3, 1, 1]]
87 |
88 |
89 | def test_fill_scales_for_dyadic_pyramid_extra_scales(caplog):
90 | info = fill_scales_for_dyadic_pyramid({"scales": [
91 | {
92 | "size": [256, 256, 256],
93 | "resolution": [1e6, 1e6, 1e6],
94 | },
95 | {
96 | "size": [64, 64, 128],
97 | "resolution": [4e6, 4e6, 2e6],
98 | }]}, target_chunk_size=64)
99 | # ensure that a warning is printed
100 | assert any(r.levelno == logging.WARNING for r in caplog.records)
101 | assert info["scales"][0]["size"] == [256, 256, 256]
102 | assert info["scales"][0]["resolution"] == [1e6, 1e6, 1e6]
103 | assert info["scales"][0]["chunk_sizes"] == [[64, 64, 64]]
104 | # ensure that the second scale was ignored
105 | assert info["scales"][1]["size"] == [128, 128, 128]
106 | assert info["scales"][1]["resolution"] == [2e6, 2e6, 2e6]
107 | assert info["scales"][1]["chunk_sizes"] == [[64, 64, 64]]
108 |
--------------------------------------------------------------------------------
/unit_tests/test_file_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import pathlib
7 |
8 | import pytest
9 | from neuroglancer_scripts.accessor import (
10 | DataAccessError,
11 | )
12 | from neuroglancer_scripts.file_accessor import FileAccessor
13 |
14 |
15 | @pytest.mark.parametrize("compresslevel", [0, 1, 9])
16 | @pytest.mark.parametrize("flat", [False, True])
17 | @pytest.mark.parametrize("gzip", [False, True])
18 | def test_file_accessor_roundtrip(tmpdir, gzip, flat, compresslevel):
19 | a = FileAccessor(str(tmpdir), gzip=gzip, flat=flat,
20 | compresslevel=compresslevel)
21 | fake_info = b'{"scales": [{"key": "key"}]}'
22 | fake_chunk_buf = b"d a t a"
23 | chunk_coords = (0, 1, 0, 1, 0, 1)
24 | a.store_file("info", fake_info, mime_type="application/json")
25 | assert a.fetch_file("info") == fake_info
26 | a.store_chunk(fake_chunk_buf, "key", chunk_coords,
27 | mime_type="application/octet-stream")
28 | assert a.fetch_chunk("key", chunk_coords) == fake_chunk_buf
29 | chunk_coords2 = (0, 1, 0, 1, 1, 2)
30 | a.store_chunk(fake_chunk_buf, "key", chunk_coords2,
31 | mime_type="image/jpeg")
32 | assert a.fetch_chunk("key", chunk_coords2) == fake_chunk_buf
33 |
34 |
35 | def test_file_accessor_file_exists(tmpdir):
36 | a = FileAccessor(str(tmpdir))
37 | assert a.file_exists("nonexistent_file") is False
38 | with (tmpdir / "real_file").open("w"):
39 | pass # create an empty file
40 | assert a.file_exists("real_file") is True
41 | assert a.file_exists("nonexistent_dir/file") is False
42 |
43 |
44 | def test_file_accessor_nonexistent_directory():
45 | a = FileAccessor("/nonexistent/directory")
46 | with pytest.raises(DataAccessError):
47 | a.fetch_file("info")
48 | with pytest.raises(DataAccessError):
49 | a.store_file("info", b"")
50 | chunk_coords = (0, 1, 0, 1, 0, 1)
51 | with pytest.raises(DataAccessError):
52 | a.fetch_chunk("key", chunk_coords)
53 | with pytest.raises(DataAccessError):
54 | a.store_chunk(b"", "key", chunk_coords)
55 |
56 |
57 | def test_file_accessor_errors(tmpdir):
58 | # tmpdir from pytest is missing features of pathlib
59 | tmpdir = pathlib.Path(str(tmpdir))
60 | a = FileAccessor(str(tmpdir))
61 | chunk_coords = (0, 1, 0, 1, 0, 1)
62 | with pytest.raises(DataAccessError):
63 | a.fetch_file("info")
64 | with pytest.raises(DataAccessError):
65 | a.fetch_chunk("key", chunk_coords)
66 |
67 | inaccessible_file = tmpdir / "inaccessible"
68 | inaccessible_file.touch(mode=0o000, exist_ok=False)
69 | with pytest.raises(DataAccessError):
70 | a.fetch_file("inaccessible")
71 |
72 | inaccessible_chunk = tmpdir / "inaccessible_key" / "0-1_0-1_0-1"
73 | inaccessible_chunk.parent.mkdir(mode=0o000)
74 | with pytest.raises(DataAccessError):
75 | a.fetch_chunk("inaccessible_key", chunk_coords)
76 | with pytest.raises(DataAccessError):
77 | a.store_chunk(b"", "inaccessible_key", chunk_coords)
78 |
79 | with pytest.raises(DataAccessError):
80 | a.file_exists("inaccessible_key/dummy")
81 | with pytest.raises(DataAccessError):
82 | a.store_file("inaccessible_key/dummy", b"")
83 |
84 | # Allow pytest to remove tmpdir with os.rmtree
85 | inaccessible_chunk.parent.chmod(mode=0o755)
86 |
87 | invalid_gzip_file = tmpdir / "invalid.gz"
88 | with invalid_gzip_file.open("w") as f:
89 | f.write("not gzip compressed")
90 | with pytest.raises(DataAccessError):
91 | print(a.fetch_file("invalid"))
92 |
93 | a.store_file("existing", b"")
94 | with pytest.raises(DataAccessError):
95 | a.store_file("existing", b"", overwrite=False)
96 | a.store_file("existing", b"", overwrite=True)
97 |
98 | with pytest.raises(ValueError):
99 | a.file_exists("../forbidden")
100 | with pytest.raises(ValueError):
101 | a.fetch_file("../forbidden")
102 | with pytest.raises(ValueError):
103 | a.store_file("../forbidden", b"")
104 |
--------------------------------------------------------------------------------
/unit_tests/test_http_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import json
7 |
8 | import pytest
9 | import requests
10 | from neuroglancer_scripts.accessor import (
11 | DataAccessError,
12 | )
13 | from neuroglancer_scripts.http_accessor import HttpAccessor
14 |
15 |
16 | @pytest.mark.parametrize("base_url", [
17 | "http://h.test/i/",
18 | "http://h.test/i",
19 | ])
20 | def test_http_accessor(base_url, requests_mock):
21 | dummy_info = {"scales": [{"key": "key"}]}
22 | dummy_chunk_buf = b"d a t a"
23 | chunk_coords = (0, 1, 0, 1, 0, 1)
24 | a = HttpAccessor(base_url)
25 |
26 | requests_mock.get("http://h.test/i/info", json=dummy_info)
27 | fetched_info = a.fetch_file("info")
28 | assert json.loads(fetched_info.decode()) == dummy_info
29 |
30 | requests_mock.head("http://h.test/i/info", status_code=200)
31 | assert a.file_exists("info") is True
32 |
33 | requests_mock.head("http://h.test/i/info", status_code=404)
34 | assert a.file_exists("info") is False
35 |
36 | requests_mock.get("http://h.test/i/key/0-1_0-1_0-1",
37 | content=dummy_chunk_buf)
38 | fetched_chunk = a.fetch_chunk("key", chunk_coords)
39 | assert fetched_chunk == dummy_chunk_buf
40 |
41 |
42 | def test_http_accessor_errors(requests_mock):
43 | chunk_coords = (0, 1, 0, 1, 0, 1)
44 | a = HttpAccessor("http://h.test/i/")
45 |
46 | requests_mock.head("http://h.test/i/info", status_code=500)
47 | with pytest.raises(DataAccessError):
48 | a.file_exists("info")
49 |
50 | requests_mock.get("http://h.test/i/info", status_code=404)
51 | with pytest.raises(DataAccessError):
52 | a.fetch_file("info")
53 |
54 | requests_mock.get("http://h.test/i/info",
55 | exc=requests.exceptions.ConnectTimeout)
56 | with pytest.raises(DataAccessError):
57 | a.fetch_file("info")
58 |
59 | requests_mock.get("http://h.test/i/key/0-1_0-1_0-1", status_code=404)
60 | with pytest.raises(DataAccessError):
61 | a.fetch_chunk("key", chunk_coords)
62 |
--------------------------------------------------------------------------------
/unit_tests/test_mesh.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import gzip
7 | import io
8 |
9 | import numpy as np
10 | from neuroglancer_scripts.mesh import (
11 | read_precomputed_mesh,
12 | save_mesh_as_neuroglancer_vtk,
13 | save_mesh_as_precomputed,
14 | )
15 |
16 |
17 | def dummy_mesh(num_vertices=4, num_triangles=3):
18 | vertices = np.reshape(
19 | np.arange(3 * num_vertices, dtype=np.float32),
20 | (num_vertices, 3)
21 | )
22 | triangles = np.reshape(
23 | np.arange(3 * num_triangles, dtype=np.uint32),
24 | (num_triangles, 3)
25 | ) % num_vertices
26 | return vertices, triangles
27 |
28 |
29 | def test_precomputed_mesh_roundtrip():
30 | vertices, triangles = dummy_mesh()
31 | file = io.BytesIO()
32 | save_mesh_as_precomputed(file, vertices, triangles)
33 | file.seek(0)
34 | vertices2, triangles2 = read_precomputed_mesh(file)
35 | assert np.array_equal(vertices, vertices2)
36 | assert np.array_equal(triangles, triangles2)
37 |
38 |
39 | def test_precomputed_mesh_gzip_file_roundtrip():
40 | vertices, triangles = dummy_mesh()
41 | bytes_io = io.BytesIO()
42 | with gzip.GzipFile(fileobj=bytes_io, mode="wb") as f:
43 | save_mesh_as_precomputed(f, vertices, triangles)
44 | buf = bytes_io.getvalue()
45 | with gzip.GzipFile(fileobj=io.BytesIO(buf), mode="rb") as f:
46 | vertices2, triangles2 = read_precomputed_mesh(f)
47 | assert np.array_equal(vertices, vertices2)
48 | assert np.array_equal(triangles, triangles2)
49 |
50 |
51 | def test_write_vtk_mesh():
52 | vertices, triangles = dummy_mesh()
53 | file = io.StringIO()
54 | save_mesh_as_neuroglancer_vtk(
55 | file, vertices, triangles,
56 | vertex_attributes=[{
57 | "name": "dummy_attribute",
58 | "values": np.arange(vertices.shape[0])
59 | }],
60 | title="dummy mesh"
61 | )
62 |
--------------------------------------------------------------------------------
/unit_tests/test_precomputed_io.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import numpy as np
7 | import pytest
8 | from neuroglancer_scripts.accessor import get_accessor_for_url
9 | from neuroglancer_scripts.chunk_encoding import InvalidInfoError
10 | from neuroglancer_scripts.precomputed_io import (
11 | get_IO_for_existing_dataset,
12 | get_IO_for_new_dataset,
13 | )
14 |
15 | DUMMY_INFO = {
16 | "type": "image",
17 | "data_type": "uint16",
18 | "num_channels": 1,
19 | "scales": [
20 | {
21 | "key": "key",
22 | "size": [8, 3, 15],
23 | "resolution": [1e6, 1e6, 1e6],
24 | "voxel_offset": [0, 0, 0],
25 | "chunk_sizes": [[8, 8, 8]],
26 | "encoding": "raw",
27 | }
28 | ]
29 | }
30 |
31 |
32 | def test_precomputed_IO_chunk_roundtrip(tmpdir):
33 | accessor = get_accessor_for_url(str(tmpdir))
34 | # Minimal info file
35 | io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
36 | dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
37 | chunk_coords = (0, 8, 0, 3, 8, 15)
38 | io.write_chunk(dummy_chunk, "key", chunk_coords)
39 | assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
40 |
41 | io2 = get_IO_for_existing_dataset(accessor)
42 | assert io2.info == DUMMY_INFO
43 | assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
44 |
45 |
46 | def test_precomputed_IO_info_error(tmpdir):
47 | with (tmpdir / "info").open("w") as f:
48 | f.write("invalid JSON")
49 | accessor = get_accessor_for_url(str(tmpdir))
50 | with pytest.raises(InvalidInfoError):
51 | get_IO_for_existing_dataset(accessor)
52 |
53 |
54 | def test_precomputed_IO_validate_chunk_coords(tmpdir):
55 | accessor = get_accessor_for_url(str(tmpdir))
56 | # Minimal info file
57 | io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
58 | good_chunk_coords = (0, 8, 0, 3, 0, 8)
59 | bad_chunk_coords = (0, 8, 1, 4, 0, 8)
60 | assert io.validate_chunk_coords("key", good_chunk_coords) is True
61 | assert io.validate_chunk_coords("key", bad_chunk_coords) is False
62 |
63 |
64 | def test_raw_encoding_lossy_info(tmpdir):
65 | accessor = get_accessor_for_url(str(tmpdir))
66 | # Minimal info file
67 | io_raw = get_IO_for_new_dataset(DUMMY_INFO, accessor)
68 | assert not io_raw.scale_is_lossy("key")
69 |
70 |
71 | def test_compressed_segmentation_encoding_lossy_info(tmpdir):
72 | accessor = get_accessor_for_url(str(tmpdir))
73 | io = get_IO_for_new_dataset(
74 | {
75 | "type": "image",
76 | "data_type": "uint32",
77 | "num_channels": 1,
78 | "scales": [
79 | {
80 | "key": "key",
81 | "size": [8, 3, 15],
82 | "resolution": [1e6, 1e6, 1e6],
83 | "voxel_offset": [0, 0, 0],
84 | "chunk_sizes": [[8, 8, 8]],
85 | "encoding": "compressed_segmentation",
86 | "compressed_segmentation_block_size": [8, 8, 8],
87 | }
88 | ]
89 | },
90 | accessor
91 | )
92 | assert not io.scale_is_lossy("key")
93 |
94 |
95 | def test_jpeg_encoding_lossy_info(tmpdir):
96 | accessor = get_accessor_for_url(str(tmpdir))
97 | io = get_IO_for_new_dataset(
98 | {
99 | "type": "image",
100 | "data_type": "uint8",
101 | "num_channels": 1,
102 | "scales": [
103 | {
104 | "key": "key",
105 | "size": [8, 3, 15],
106 | "resolution": [1e6, 1e6, 1e6],
107 | "voxel_offset": [0, 0, 0],
108 | "chunk_sizes": [[8, 8, 8]],
109 | "encoding": "jpeg",
110 | }
111 | ]
112 | },
113 | accessor
114 | )
115 | assert io.scale_is_lossy("key")
116 |
--------------------------------------------------------------------------------
/unit_tests/test_sharded_http_accessor.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2023, 2024 Forschungszentrum Juelich GmbH
2 | # Author: Xiao Gui
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | from unittest.mock import MagicMock, patch
7 |
8 | import numpy as np
9 | import pytest
10 | from neuroglancer_scripts.sharded_http_accessor import (
11 | HttpShard,
12 | HttpShardedScale,
13 | ShardedHttpAccessor,
14 | ShardSpec,
15 | ShardVolumeSpec,
16 | )
17 | from requests import Session
18 | from requests import exceptions as re_exc
19 |
20 | hdr_len = int(2 ** 2 * 16)
21 |
22 |
23 | @pytest.fixture
24 | def shard_spec():
25 | return ShardSpec(2, 2)
26 |
27 |
28 | @pytest.fixture
29 | def shard_key():
30 | return np.uint64(1), "1"
31 |
32 |
33 | # HttpShard
34 | @pytest.mark.parametrize("base_url", [
35 | "http://test-foo/path/20um/",
36 | "http://test-foo/path/20um"
37 | ])
38 | @pytest.mark.parametrize("get_mock_result, exp_is_legacy, err_flag", [
39 | ((True, True, False), True, False),
40 | ((False, False, True), False, False),
41 | ((True, False, True), False, False),
42 | ((False, True, True), False, False),
43 | ((True, True, True), False, False),
44 |
45 | ((False, False, False), None, True),
46 | ])
47 | @patch.object(HttpShard, "get_minishards_offsets", return_value=[])
48 | def test_http_shard(get_msh_offset_m, base_url, shard_key, shard_spec,
49 | requests_mock, get_mock_result, exp_is_legacy, err_flag):
50 |
51 | base_url = base_url if base_url.endswith("/") else (base_url + "/")
52 | shard_key, shard_key_str = shard_key
53 |
54 | for mresult, ext in zip(get_mock_result, ["index", "data", "shard"]):
55 | status = 200 if mresult else 404
56 | requests_mock.head(f"{base_url}{shard_key_str}.{ext}",
57 | status_code=status)
58 |
59 | if err_flag:
60 | with pytest.raises(Exception):
61 | HttpShard(base_url, Session(), shard_key, shard_spec)
62 | pass
63 | get_msh_offset_m.assert_not_called()
64 | return
65 |
66 | shard = HttpShard(base_url, Session(), shard_key, shard_spec)
67 | assert shard.is_legacy == exp_is_legacy
68 |
69 |
70 | sc_base_url = "http://test-foo/path/20um/"
71 |
72 |
73 | @pytest.fixture
74 | def legacy_http_shard(shard_key, shard_spec, requests_mock):
75 | shard_key, shard_key_str = shard_key
76 | requests_mock.head(f"{sc_base_url}{shard_key_str}.index", status_code=200)
77 | requests_mock.head(f"{sc_base_url}{shard_key_str}.data", status_code=200)
78 | requests_mock.head(f"{sc_base_url}{shard_key_str}.shard", status_code=404)
79 | with patch.object(HttpShard, "get_minishards_offsets", return_value=[]):
80 | yield HttpShard(sc_base_url, Session(), shard_key, shard_spec)
81 |
82 |
83 | @pytest.fixture
84 | def modern_http_shard(shard_key, shard_spec, requests_mock):
85 | shard_key, shard_key_str = shard_key
86 | requests_mock.head(f"{sc_base_url}{shard_key_str}.index", status_code=404)
87 | requests_mock.head(f"{sc_base_url}{shard_key_str}.data", status_code=404)
88 | requests_mock.head(f"{sc_base_url}{shard_key_str}.shard", status_code=200)
89 | with patch.object(HttpShard, "get_minishards_offsets", return_value=[]):
90 | yield HttpShard(sc_base_url, Session(), shard_key, shard_spec)
91 |
92 |
93 | @pytest.mark.parametrize("f_name", [
94 | "legacy_http_shard",
95 | "modern_http_shard",
96 | ])
97 | def test_sharded_http_file_exists(f_name, requests_mock,
98 | request):
99 | shard: HttpShard = request.getfixturevalue(f_name)
100 | exists = "exist.txt"
101 | notexists = "notexists.txt"
102 | error = "error.txt"
103 | nerror = "networkerr.txt"
104 | requests_mock.head(f"{sc_base_url}{exists}", status_code=200)
105 | requests_mock.head(f"{sc_base_url}{notexists}", status_code=404)
106 | requests_mock.head(f"{sc_base_url}{error}", status_code=500)
107 | requests_mock.head(f"{sc_base_url}{nerror}", exc=re_exc.ConnectTimeout)
108 | assert shard.file_exists(exists)
109 | assert not shard.file_exists(notexists)
110 | with pytest.raises(Exception):
111 | shard.file_exists(error)
112 | with pytest.raises(Exception):
113 | shard.file_exists(nerror)
114 |
115 |
116 | @pytest.mark.parametrize("offsetlen", [
117 | (0, hdr_len),
118 | (hdr_len, 5),
119 | ])
120 | @pytest.mark.parametrize("f_name", [
121 | "legacy_http_shard",
122 | "modern_http_shard",
123 | ])
124 | def test_sharded_http_read_bytes(f_name, requests_mock, offsetlen,
125 | request):
126 | shard: HttpShard = request.getfixturevalue(f_name)
127 | offset, length = offsetlen
128 |
129 | filename = "1.shard"
130 | if f_name == "legacy_http_shard":
131 | filename = "1.index" if (offset < hdr_len) else "1.data"
132 | offset = offset if offset < hdr_len else (offset - hdr_len)
133 |
134 | requests_mock.get(f"{sc_base_url}{filename}", request_headers={
135 | "Range": f"bytes={offset}-{offset + length - 1}"
136 | }, content=b"\0" * length)
137 |
138 | assert b"\0" * length == shard.read_bytes(*offsetlen)
139 |
140 | with pytest.raises(Exception):
141 | requests_mock.get(f"{sc_base_url}{filename}", request_headers={
142 | "Range": f"bytes={offset}-{offset + length - 1}"
143 | }, content=b"\0" * (length + 1))
144 | shard.read_bytes(*offsetlen)
145 |
146 |
147 | def test_sharded_http_fetch_cmc(modern_http_shard):
148 | minishard_key = "foo-bar"
149 | cmc = np.uint64(123)
150 | with patch.object(modern_http_shard, "get_minishard_key",
151 | return_value=minishard_key):
152 | assert len(modern_http_shard.minishard_dict) == 0
153 | with pytest.raises(Exception):
154 | modern_http_shard.fetch_cmc_chunk(cmc)
155 |
156 | mock_minishard = MagicMock()
157 | modern_http_shard.minishard_dict[minishard_key] = mock_minishard
158 | mock_minishard.fetch_cmc_chunk.return_value = b"foo-bar"
159 |
160 | assert modern_http_shard.fetch_cmc_chunk(cmc) == b"foo-bar"
161 | mock_minishard.fetch_cmc_chunk.assert_called_once_with(cmc)
162 |
163 |
164 | base_url = "http://test-foo/path"
165 |
166 |
167 | @pytest.fixture
168 | def shard_volume_spec():
169 | return ShardVolumeSpec([64, 64, 64], [128, 128, 128])
170 |
171 |
172 | class DummyCls:
173 | def __init__(self, *args, **kwargs):
174 | ...
175 |
176 |
177 | # HttpShardedScale
178 | @pytest.mark.parametrize("key_exists", [True, False])
179 | @patch("neuroglancer_scripts.sharded_http_accessor.HttpShard", DummyCls)
180 | def test_http_sharded_scale_get_shard(key_exists, shard_spec,
181 | shard_volume_spec):
182 | scale = HttpShardedScale(base_url, Session(), "20um", shard_spec,
183 | shard_volume_spec)
184 |
185 | scale.shard_dict = MagicMock()
186 | scale.shard_dict.__contains__.return_value = key_exists
187 | scale.shard_dict.__getitem__.return_value = "foo-bar"
188 |
189 | cmc = np.uint64(123)
190 | return_val = scale.get_shard(cmc)
191 |
192 | scale.shard_dict.__contains__.assert_called_once_with(cmc)
193 | if key_exists:
194 | scale.shard_dict.__setitem__.assert_not_called()
195 | else:
196 | scale.shard_dict.__setitem__.assert_called_once()
197 | scale.shard_dict.__getitem__.assert_called_once_with(cmc)
198 | assert return_val == "foo-bar"
199 |
200 |
201 | @pytest.fixture
202 | def sh_http_accessor(requests_mock):
203 | requests_mock.get(f"{base_url}/info", json={
204 | "scales": [
205 | {
206 | "key": "20mm",
207 | "size": [256, 256, 256],
208 | "chunk_sizes": [
209 | [64, 64, 64]
210 | ],
211 | "sharding": {
212 | "@type": "neuroglancer_uint64_sharded_v1",
213 | "shard_bits": 2,
214 | "minishard_bits": 2
215 | }
216 | },
217 | {
218 | "key": "40mm",
219 | "size": [128, 128, 128],
220 | "chunk_sizes": [
221 | [64, 64, 64]
222 | ],
223 | "sharding": {
224 | "@type": "neuroglancer_uint64_sharded_v1",
225 | "shard_bits": 2,
226 | "minishard_bits": 2
227 | }
228 | }
229 | ]
230 | })
231 | requests_mock.get(f"{base_url}/20mm/0.shard", status_code=200)
232 | return ShardedHttpAccessor(base_url)
233 |
234 |
235 | @pytest.mark.parametrize("key_exists", [True, False])
236 | def test_sharded_http_accessor(sh_http_accessor, key_exists):
237 |
238 | sh_sc_dict = sh_http_accessor.shard_scale_dict = MagicMock()
239 | sh_sc_dict.__contains__.return_value = key_exists
240 | mocked_sharded_scale = sh_sc_dict.__getitem__.return_value
241 | mocked_sharded_scale.fetch_chunk.return_value = b"foo-bar"
242 |
243 | coord = (64, 128, 0, None, 0, ["foo-bar"])
244 | return_val = sh_http_accessor.fetch_chunk("20mm", coord)
245 |
246 | sh_sc_dict.__contains__.assert_called_once_with("20mm")
247 | if key_exists:
248 | sh_sc_dict.__setitem__.assert_not_called()
249 | else:
250 | sh_sc_dict.__setitem__.assert_called_once()
251 | sh_sc_dict.__getitem__.assert_called_once_with("20mm")
252 | assert return_val == b"foo-bar"
253 |
--------------------------------------------------------------------------------
/unit_tests/test_transform.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 CEA
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import numpy as np
7 | from neuroglancer_scripts.transform import (
8 | matrix_as_compact_urlsafe_json,
9 | nifti_to_neuroglancer_transform,
10 | )
11 |
12 |
13 | def test_matrix_as_compact_urlsafe_json():
14 | mat = np.array([[1, 1.5], [2, 3], [0, -1]])
15 | assert matrix_as_compact_urlsafe_json(mat) == "[[1_1.5]_[2_3]_[0_-1]]"
16 |
17 |
18 | def test_nifti_to_neuroglancer_transform():
19 | nifti_transform = np.array([
20 | [1, 0, 0, 0],
21 | [0, 1, 0, 0],
22 | [0, 0, 1, 0],
23 | [0, 0, 0, 1]
24 | ])
25 | voxel_size = (1.0, 1.0, 1.0)
26 | ng_transform = nifti_to_neuroglancer_transform(nifti_transform, voxel_size)
27 | assert np.array_equal(ng_transform, np.array([
28 | [1, 0, 0, -0.5],
29 | [0, 1, 0, -0.5],
30 | [0, 0, 1, -0.5],
31 | [0, 0, 0, 1]
32 | ]))
33 |
--------------------------------------------------------------------------------
/unit_tests/test_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 Forschungszentrum Juelich GmbH
2 | # Author: Yann Leprince
3 | #
4 | # This software is made available under the MIT licence, see LICENCE.txt.
5 |
6 | import numpy as np
7 | import pytest
8 | from neuroglancer_scripts.utils import (
9 | ceil_div,
10 | invert_permutation,
11 | permute,
12 | readable_count,
13 | )
14 |
15 |
16 | def test_ceil_div():
17 | assert ceil_div(0, 8) == 0
18 | assert ceil_div(1, 8) == 1
19 | assert ceil_div(7, 8) == 1
20 | assert ceil_div(8, 8) == 1
21 | assert ceil_div(9, 8) == 2
22 | with pytest.raises(ZeroDivisionError):
23 | ceil_div(1, 0)
24 |
25 |
26 | def test_permute():
27 | assert permute((1, 2, 3), (0, 1, 2)) == (1, 2, 3)
28 | assert permute((1, 2, 3), (2, 0, 1)) == (3, 1, 2)
29 |
30 |
31 | def test_invert_permutation():
32 | assert np.array_equal(invert_permutation((0, 1, 2)), [0, 1, 2])
33 | assert np.array_equal(invert_permutation((2, 1, 0)), [2, 1, 0])
34 | assert np.array_equal(invert_permutation((2, 0, 1)), [1, 2, 0])
35 |
36 |
37 | def test_readable_count():
38 | assert readable_count(0) == "0 "
39 | assert readable_count(1) == "1 "
40 | assert readable_count(512) == "512 "
41 | assert readable_count(1e10) == "9.3 Gi"
42 | # Test fall-back for the largest unit
43 | assert readable_count(2 ** 70) == "1,024 Ei"
44 |
--------------------------------------------------------------------------------
/unit_tests/test_volume_reader.py:
--------------------------------------------------------------------------------
1 | import json
2 | from unittest.mock import patch
3 |
4 | import nibabel as nib
5 | import numpy as np
6 | import pytest
7 | from neuroglancer_scripts.volume_reader import (
8 | nibabel_image_to_info,
9 | volume_file_to_precomputed,
10 | )
11 |
12 |
13 | def prepare_nifti_images():
14 | rng = np.random.default_rng()
15 | random_rgb_val = rng.integers(256, size=(3, 3, 3, 3), dtype=np.uint8)
16 | right_type = np.dtype([("R", "u1"), ("G", "u1"), ("B", "u1")])
17 | new_data = random_rgb_val.copy().view(dtype=right_type).reshape((3, 3, 3))
18 | rgb_img = nib.Nifti1Image(new_data, np.eye(4))
19 |
20 | random_uint8_val = rng.integers(256, size=(3, 3, 3), dtype=np.uint8)
21 | uint8_img = nib.Nifti1Image(random_uint8_val, np.eye(4))
22 |
23 | return [(rgb_img, 3), (uint8_img, 1)]
24 |
25 |
26 | @pytest.mark.parametrize("nifti_img,expected_num_channel",
27 | prepare_nifti_images())
28 | def test_nibabel_image_to_info(nifti_img, expected_num_channel):
29 |
30 | formatted_info, _, _, _ = nibabel_image_to_info(nifti_img)
31 | info = json.loads(formatted_info)
32 | assert info.get("num_channels") == expected_num_channel
33 |
34 |
35 | @pytest.mark.parametrize("nifti_img,expected_num_channel",
36 | prepare_nifti_images())
37 | @patch('neuroglancer_scripts.precomputed_io.get_IO_for_existing_dataset',
38 | return_value=None)
39 | @patch('neuroglancer_scripts.volume_reader.nibabel_image_to_precomputed')
40 | @patch("nibabel.load")
41 | def test_volume_file_to_precomputed(m_nib_load, m_nib_img_precomp, _,
42 | nifti_img, expected_num_channel):
43 | m_nib_load.return_value = nifti_img
44 | m_nib_img_precomp.return_value = "hoho"
45 | volume_file_to_precomputed("mock_file_name", "./bla")
46 |
47 | assert m_nib_load.called
48 | assert m_nib_img_precomp.called
49 |
50 | nibabel_image = m_nib_img_precomp.call_args[0][0]
51 |
52 | if expected_num_channel == 1:
53 | assert nibabel_image is nifti_img
54 | else:
55 | assert nibabel_image is not nifti_img
56 | assert len(nibabel_image.dataobj.shape) == 4
57 |
--------------------------------------------------------------------------------