├── pyapr ├── tests │ ├── __init__.py │ ├── test_files │ │ ├── sphere_1D.apr │ │ ├── sphere_1D.tif │ │ ├── sphere_2D.apr │ │ ├── sphere_2D.tif │ │ ├── sphere_3D.apr │ │ ├── sphere_3D.tif │ │ └── two_objects.apr │ ├── test_gui.py │ ├── test_segmentation.py │ ├── test_restoration.py │ ├── test_iterator.py │ ├── test_reconstruction.py │ ├── test_measure.py │ ├── test_transform.py │ ├── test_converter.py │ ├── test_utils.py │ ├── helpers.py │ ├── test_tree.py │ ├── test_particledata.py │ ├── test_io.py │ ├── test_morphology.py │ └── test_filter.py ├── transform │ ├── __init__.py │ └── projection.py ├── segmentation │ └── __init__.py ├── restoration │ ├── __init__.py │ └── src │ │ └── BindRichardsonLucy.hpp ├── tree │ ├── __init__.py │ ├── tree_ops.py │ ├── fill_tree.py │ └── src │ │ └── BindFillTree.hpp ├── utils │ ├── __init__.py │ └── types.py ├── measure │ ├── __init__.py │ ├── _find_label_volume.py │ ├── _find_objects.py │ ├── _find_label_centers.py │ └── _connected_component.py ├── viewer │ ├── __init__.py │ ├── particleScatterPlot.py │ ├── src │ │ └── BindRaycaster.hpp │ └── compressInteractive.py ├── io │ ├── __init__.py │ └── src │ │ └── BindAPRFile.hpp ├── morphology │ └── __init__.py ├── filter │ ├── __init__.py │ ├── stencils.py │ ├── std.py │ └── rank_filters.py ├── converter │ ├── __init__.py │ └── src │ │ └── BindConverterBatch.hpp ├── reconstruction │ ├── __init__.py │ └── APRSlicer.py ├── data_containers │ ├── src │ │ ├── BindLazyIterator.hpp │ │ ├── BindLazyAccess.hpp │ │ ├── BindPixelData.hpp │ │ ├── BindLazyData.hpp │ │ ├── BindReconPatch.hpp │ │ ├── BindAPR.hpp │ │ ├── BindParameters.hpp │ │ └── BindLinearIterator.hpp │ └── __init__.py ├── _common.py └── __init__.py ├── MANIFEST.in ├── requirements.txt ├── docs ├── images │ ├── apr_file.png │ ├── raycast.png │ ├── view_apr.png │ ├── apr_joined.png │ ├── pix_joined.png │ └── view_level.png ├── source │ ├── install.rst │ ├── pyapr.transform.rst │ ├── pyapr.segmentation.rst │ ├── pyapr.tree.rst │ ├── pyapr.utils.rst │ ├── pyapr.measure.rst │ ├── pyapr.restoration.rst │ ├── pyapr.viewer.rst │ ├── pyapr.io.rst │ ├── pyapr.rst │ ├── pyapr.converter.rst │ ├── pyapr.filter.rst │ ├── pyapr.reconstruction.rst │ ├── pyapr.morphology.rst │ ├── pyapr.data_containers.rst │ ├── index.rst │ └── conf.py ├── Makefile └── make.bat ├── travis_windows_setup.sh ├── .gitmodules ├── pyproject.toml ├── demo ├── viewer_demo.py ├── raycast_demo.py ├── numerics_demo.py ├── apr_segmentation_demo.py ├── apr_io_demo.py ├── convolution_demo.py ├── compress_particles_demo.py ├── plot_particle_scatter_demo.py ├── reconstruction_demo.py ├── richardson_lucy_demo.py ├── get_apr_demo.py ├── get_apr_interactive_demo.py ├── apr_iteration_demo.py └── get_apr_by_block_interactive_demo.py ├── external └── maxflow-v3.04.src │ ├── instances.inc │ ├── CHANGES.TXT │ ├── graph.cpp │ └── README.TXT ├── setup.cfg ├── fix_windows_wheel.py ├── .gitignore ├── .travis.yml ├── setup.py ├── INSTALL.md ├── wrappers └── pythonBind.cpp ├── README.md └── .github └── workflows ├── quick-test.yml └── deploy-docs.yml /pyapr/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | recursive-include wrappers * 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scikit-image 3 | tifffile 4 | PyQt5 5 | pyqtgraph 6 | matplotlib 7 | -------------------------------------------------------------------------------- /docs/images/apr_file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/docs/images/apr_file.png -------------------------------------------------------------------------------- /docs/images/raycast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/docs/images/raycast.png -------------------------------------------------------------------------------- /docs/images/view_apr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/docs/images/view_apr.png -------------------------------------------------------------------------------- /docs/images/apr_joined.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/docs/images/apr_joined.png -------------------------------------------------------------------------------- /docs/images/pix_joined.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/docs/images/pix_joined.png -------------------------------------------------------------------------------- /docs/images/view_level.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/docs/images/view_level.png -------------------------------------------------------------------------------- /docs/source/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | .. include:: ../../INSTALL.md 5 | :parser: myst_parser.sphinx_ -------------------------------------------------------------------------------- /pyapr/tests/test_files/sphere_1D.apr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/sphere_1D.apr -------------------------------------------------------------------------------- /pyapr/tests/test_files/sphere_1D.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/sphere_1D.tif -------------------------------------------------------------------------------- /pyapr/tests/test_files/sphere_2D.apr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/sphere_2D.apr -------------------------------------------------------------------------------- /pyapr/tests/test_files/sphere_2D.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/sphere_2D.tif -------------------------------------------------------------------------------- /pyapr/tests/test_files/sphere_3D.apr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/sphere_3D.apr -------------------------------------------------------------------------------- /pyapr/tests/test_files/sphere_3D.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/sphere_3D.tif -------------------------------------------------------------------------------- /pyapr/transform/__init__.py: -------------------------------------------------------------------------------- 1 | from .projection import maximum_projection 2 | 3 | __all__ = [ 4 | 'maximum_projection' 5 | ] 6 | -------------------------------------------------------------------------------- /pyapr/tests/test_files/two_objects.apr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdaptiveParticles/pyapr/HEAD/pyapr/tests/test_files/two_objects.apr -------------------------------------------------------------------------------- /pyapr/segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | from .graphcut import graphcut, compute_terminal_costs 2 | 3 | __all__ = [ 4 | 'graphcut', 5 | 'compute_terminal_costs' 6 | ] 7 | -------------------------------------------------------------------------------- /pyapr/restoration/__init__.py: -------------------------------------------------------------------------------- 1 | from .richardson_lucy import richardson_lucy, richardson_lucy_tv, richardson_lucy_cuda 2 | 3 | __all__ = [ 4 | 'richardson_lucy', 5 | 'richardson_lucy_tv', 6 | 'richardson_lucy_cuda' 7 | ] 8 | -------------------------------------------------------------------------------- /travis_windows_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | git clone https://github.com/microsoft/vcpkg 4 | cd vcpkg 5 | ./bootstrap-vcpkg.bat 6 | ./vcpkg.exe install blosc:x64-windows tiff:x64-windows hdf5:x64-windows szip:x64-windows 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/source/pyapr.transform.rst: -------------------------------------------------------------------------------- 1 | transform 2 | ========= 3 | 4 | .. autosummary:: 5 | 6 | pyapr.transform.maximum_projection 7 | 8 | .. automodule:: pyapr.transform 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: 12 | -------------------------------------------------------------------------------- /pyapr/tree/__init__.py: -------------------------------------------------------------------------------- 1 | from .fill_tree import fill_tree_mean, fill_tree_min, fill_tree_max 2 | from .tree_ops import sample_from_tree 3 | 4 | __all__ = [ 5 | 'fill_tree_mean', 6 | 'fill_tree_max', 7 | 'fill_tree_min', 8 | 'sample_from_tree' 9 | ] 10 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "pybind11"] 2 | path = external/pybind11 3 | url = https://github.com/pybind/pybind11.git 4 | branch = master 5 | [submodule "external/LibAPR"] 6 | path = external/LibAPR 7 | url = https://github.com/AdaptiveParticles/LibAPR.git 8 | branch = master 9 | 10 | -------------------------------------------------------------------------------- /pyapr/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .filegui import InteractiveIO 2 | from .types import type_to_particles, type_to_lazy_particles, particles_to_type 3 | 4 | __all__ = [ 5 | 'particles_to_type', 6 | 'type_to_particles', 7 | 'type_to_lazy_particles', 8 | 'InteractiveIO' 9 | ] 10 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"] 3 | 4 | [tool.setuptools_scm] 5 | write_to = "pyapr/_version.py" 6 | 7 | [tool.pytest.ini_options] 8 | minversion = "6.0" 9 | addopts = "-ra -q" 10 | testpaths = [ 11 | "pyapr/tests", 12 | ] 13 | -------------------------------------------------------------------------------- /docs/source/pyapr.segmentation.rst: -------------------------------------------------------------------------------- 1 | segmentation 2 | ============ 3 | 4 | .. autosummary:: 5 | 6 | pyapr.segmentation.graphcut 7 | pyapr.segmentation.compute_terminal_costs 8 | 9 | 10 | .. automodule:: pyapr.segmentation 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | -------------------------------------------------------------------------------- /docs/source/pyapr.tree.rst: -------------------------------------------------------------------------------- 1 | tree 2 | ==== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.tree.fill_tree_mean 7 | pyapr.tree.fill_tree_max 8 | pyapr.tree.fill_tree_min 9 | pyapr.tree.sample_from_tree 10 | 11 | 12 | .. automodule:: pyapr.tree 13 | :members: 14 | :undoc-members: 15 | :show-inheritance: 16 | -------------------------------------------------------------------------------- /docs/source/pyapr.utils.rst: -------------------------------------------------------------------------------- 1 | utils 2 | ===== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.utils.particles_to_type 7 | pyapr.utils.type_to_particles 8 | pyapr.utils.type_to_lazy_particles 9 | pyapr.utils.InteractiveIO 10 | 11 | 12 | .. automodule:: pyapr.utils 13 | :members: 14 | :undoc-members: 15 | :show-inheritance: 16 | -------------------------------------------------------------------------------- /docs/source/pyapr.measure.rst: -------------------------------------------------------------------------------- 1 | measure 2 | ======= 3 | 4 | .. autosummary:: 5 | 6 | pyapr.measure.connected_component 7 | pyapr.measure.find_objects 8 | pyapr.measure.find_label_centers 9 | pyapr.measure.find_label_volume 10 | 11 | .. automodule:: pyapr.measure 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | -------------------------------------------------------------------------------- /docs/source/pyapr.restoration.rst: -------------------------------------------------------------------------------- 1 | restoration 2 | =========== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.restoration.richardson_lucy 7 | pyapr.restoration.richardson_lucy_tv 8 | pyapr.restoration.richardson_lucy_cuda 9 | 10 | 11 | .. automodule:: pyapr.restoration 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | -------------------------------------------------------------------------------- /pyapr/measure/__init__.py: -------------------------------------------------------------------------------- 1 | from ._connected_component import connected_component 2 | from ._find_objects import find_objects 3 | from ._find_label_centers import find_label_centers 4 | from ._find_label_volume import find_label_volume 5 | 6 | __all__ = [ 7 | 'connected_component', 8 | 'find_objects', 9 | 'find_label_centers', 10 | 'find_label_volume' 11 | ] 12 | -------------------------------------------------------------------------------- /docs/source/pyapr.viewer.rst: -------------------------------------------------------------------------------- 1 | viewer 2 | ====== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.viewer.parts_viewer 7 | pyapr.viewer.raycast_viewer 8 | pyapr.viewer.interactive_compression 9 | pyapr.viewer.particle_scatter_plot 10 | pyapr.viewer.APRRaycaster 11 | 12 | 13 | .. automodule:: pyapr.viewer 14 | :members: 15 | :undoc-members: 16 | :show-inheritance: 17 | -------------------------------------------------------------------------------- /pyapr/viewer/__init__.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.viewer import APRRaycaster 2 | from .partsViewer import * 3 | from .compressInteractive import * 4 | from .raycastViewer import * 5 | from .particleScatterPlot import particle_scatter_plot 6 | 7 | __all__ = [ 8 | 'parts_viewer', 9 | 'raycast_viewer', 10 | 'interactive_compression', 11 | 'particle_scatter_plot', 12 | 'APRRaycaster' 13 | ] 14 | -------------------------------------------------------------------------------- /demo/viewer_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | 3 | 4 | """ 5 | Read a selected APR from file and display it in the z-slice viewer. 6 | """ 7 | 8 | # Get APR file path from gui 9 | io_int = pyapr.utils.InteractiveIO() 10 | fpath_apr = io_int.get_apr_file_name() 11 | 12 | # Read APR and particles from file 13 | apr, parts = pyapr.io.read(fpath_apr) 14 | 15 | # Launch the by-slice viewer 16 | pyapr.viewer.parts_viewer(apr, parts) 17 | -------------------------------------------------------------------------------- /docs/source/pyapr.io.rst: -------------------------------------------------------------------------------- 1 | io 2 | == 3 | 4 | .. autosummary:: 5 | 6 | pyapr.io.read 7 | pyapr.io.write 8 | pyapr.io.read_apr 9 | pyapr.io.write_apr 10 | pyapr.io.read_particles 11 | pyapr.io.write_particles 12 | pyapr.io.get_particle_names 13 | pyapr.io.get_particle_type 14 | pyapr.io.APRFile 15 | 16 | .. automodule:: pyapr.io 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | -------------------------------------------------------------------------------- /pyapr/io/__init__.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.io import APRFile 2 | from .io_api import read, write, read_apr, write_apr, read_particles, write_particles, get_particle_type, \ 3 | get_particle_names 4 | 5 | __all__ = [ 6 | 'read', 7 | 'write', 8 | 'read_apr', 9 | 'write_apr', 10 | 'read_particles', 11 | 'write_particles', 12 | 'get_particle_names', 13 | 'get_particle_type', 14 | 'APRFile' 15 | ] 16 | -------------------------------------------------------------------------------- /demo/raycast_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | 3 | 4 | """ 5 | Read a selected APR from file and visualize it via maximum intensity projection. 6 | 7 | Scroll to zoom 8 | Click and drag to change the view 9 | """ 10 | 11 | # Get input APR file path from gui 12 | io_int = pyapr.utils.InteractiveIO() 13 | fpath_apr = io_int.get_apr_file_name() 14 | 15 | # Read APR and particles from file 16 | apr, parts = pyapr.io.read(fpath_apr) 17 | 18 | # Launch the raycast viewer 19 | pyapr.viewer.raycast_viewer(apr, parts) 20 | -------------------------------------------------------------------------------- /docs/source/pyapr.rst: -------------------------------------------------------------------------------- 1 | pyapr 2 | ===== 3 | 4 | .. automodule:: pyapr 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | :caption: Submodules 12 | 13 | pyapr.converter 14 | pyapr.data_containers 15 | pyapr.filter 16 | pyapr.io 17 | pyapr.measure 18 | pyapr.morphology 19 | pyapr.reconstruction 20 | pyapr.restoration 21 | pyapr.segmentation 22 | pyapr.transform 23 | pyapr.tree 24 | pyapr.utils 25 | pyapr.viewer 26 | -------------------------------------------------------------------------------- /pyapr/morphology/__init__.py: -------------------------------------------------------------------------------- 1 | from .morphology import dilation, erosion, opening, closing, tophat, bottomhat, find_perimeter, remove_small_objects, \ 2 | remove_large_objects, remove_edge_objects, remove_small_holes 3 | 4 | __all__ = [ 5 | 'dilation', 6 | 'erosion', 7 | 'opening', 8 | 'closing', 9 | 'tophat', 10 | 'bottomhat', 11 | 'find_perimeter', 12 | 'remove_small_objects', 13 | 'remove_small_holes', 14 | 'remove_edge_objects', 15 | 'remove_large_objects' 16 | ] 17 | -------------------------------------------------------------------------------- /docs/source/pyapr.converter.rst: -------------------------------------------------------------------------------- 1 | converter 2 | ========= 3 | 4 | .. autosummary:: 5 | 6 | pyapr.converter.get_apr 7 | pyapr.converter.get_apr_interactive 8 | pyapr.converter.find_parameters_interactive 9 | pyapr.converter.ByteConverter 10 | pyapr.converter.ShortConverter 11 | pyapr.converter.FloatConverter 12 | pyapr.converter.ByteConverterBatch 13 | pyapr.converter.ShortConverterBatch 14 | pyapr.converter.FloatConverterBatch 15 | 16 | .. automodule:: pyapr.converter 17 | :members: 18 | :undoc-members: 19 | -------------------------------------------------------------------------------- /docs/source/pyapr.filter.rst: -------------------------------------------------------------------------------- 1 | filter 2 | ====== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.filter.convolve 7 | pyapr.filter.correlate 8 | pyapr.filter.gradient 9 | pyapr.filter.sobel 10 | pyapr.filter.gradient_magnitude 11 | pyapr.filter.sobel_magnitude 12 | pyapr.filter.std 13 | pyapr.filter.median_filter 14 | pyapr.filter.min_filter 15 | pyapr.filter.max_filter 16 | pyapr.filter.get_gaussian_stencil 17 | 18 | .. automodule:: pyapr.filter 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /pyapr/filter/__init__.py: -------------------------------------------------------------------------------- 1 | from .convolution import convolve, correlate 2 | from .gradient import gradient, gradient_magnitude, sobel, sobel_magnitude 3 | from .std import std 4 | from .rank_filters import median_filter, min_filter, max_filter 5 | from .stencils import get_gaussian_stencil 6 | 7 | __all__ = [ 8 | 'convolve', 9 | 'correlate', 10 | 'gradient', 11 | 'sobel', 12 | 'gradient_magnitude', 13 | 'sobel_magnitude', 14 | 'std', 15 | 'median_filter', 16 | 'min_filter', 17 | 'max_filter', 18 | 'get_gaussian_stencil' 19 | ] 20 | -------------------------------------------------------------------------------- /external/maxflow-v3.04.src/instances.inc: -------------------------------------------------------------------------------- 1 | #include "graph.h" 2 | 3 | #ifdef WIN_COMPILE 4 | #pragma warning(disable: 4661) 5 | #endif 6 | 7 | #ifdef WIN_COMPILE 8 | #define LIBRARY_API __declspec(dllexport) 9 | #else 10 | #define LIBRARY_API 11 | #endif 12 | 13 | //#define LIBRARY_API __declspec(dllexport) 14 | //#define LIBRARY_API 15 | 16 | // Instantiations: 17 | // IMPORTANT: 18 | // flowtype should be 'larger' than tcaptype 19 | // tcaptype should be 'larger' than captype 20 | 21 | template class LIBRARY_API Graph; 22 | -------------------------------------------------------------------------------- /pyapr/converter/__init__.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.converter import ByteConverter, ShortConverter, FloatConverter, ByteConverterBatch, \ 2 | ShortConverterBatch, FloatConverterBatch 3 | from .converter_methods import get_apr, get_apr_interactive, find_parameters_interactive 4 | 5 | __all__ = [ 6 | 'get_apr', 7 | 'get_apr_interactive', 8 | 'find_parameters_interactive', 9 | 'ByteConverter', 10 | 'ShortConverter', 11 | 'FloatConverter', 12 | 'ByteConverterBatch', 13 | 'ShortConverterBatch', 14 | 'FloatConverterBatch' 15 | ] 16 | -------------------------------------------------------------------------------- /pyapr/reconstruction/__init__.py: -------------------------------------------------------------------------------- 1 | from .reconstruct import reconstruct_constant, reconstruct_level, reconstruct_smooth, reconstruct_lazy, \ 2 | reconstruct_constant_lazy, reconstruct_level_lazy, reconstruct_smooth_lazy 3 | from .APRSlicer import APRSlicer 4 | from .LazySlicer import LazySlicer 5 | 6 | __all__ = [ 7 | 'reconstruct_constant', 8 | 'reconstruct_level', 9 | 'reconstruct_smooth', 10 | 'APRSlicer', 11 | 'LazySlicer', 12 | 'reconstruct_lazy', 13 | 'reconstruct_constant_lazy', 14 | 'reconstruct_level_lazy', 15 | 'reconstruct_smooth_lazy' 16 | ] 17 | -------------------------------------------------------------------------------- /docs/source/pyapr.reconstruction.rst: -------------------------------------------------------------------------------- 1 | reconstruction 2 | ============== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.reconstruction.reconstruct_constant 7 | pyapr.reconstruction.reconstruct_level 8 | pyapr.reconstruction.reconstruct_smooth 9 | pyapr.reconstruction.APRSlicer 10 | pyapr.reconstruction.LazySlicer 11 | pyapr.reconstruction.reconstruct_lazy 12 | pyapr.reconstruction.reconstruct_constant_lazy 13 | pyapr.reconstruction.reconstruct_level_lazy 14 | pyapr.reconstruction.reconstruct_smooth_lazy 15 | 16 | 17 | .. automodule:: pyapr.reconstruction 18 | :members: 19 | :undoc-members: 20 | -------------------------------------------------------------------------------- /docs/source/pyapr.morphology.rst: -------------------------------------------------------------------------------- 1 | morphology 2 | ========== 3 | 4 | .. autosummary:: 5 | 6 | pyapr.morphology.dilation 7 | pyapr.morphology.erosion 8 | pyapr.morphology.opening 9 | pyapr.morphology.closing 10 | pyapr.morphology.tophat 11 | pyapr.morphology.bottomhat 12 | pyapr.morphology.find_perimeter 13 | pyapr.morphology.remove_small_objects 14 | pyapr.morphology.remove_small_holes 15 | pyapr.morphology.remove_edge_objects 16 | pyapr.morphology.remove_large_objects 17 | 18 | 19 | .. automodule:: pyapr.morphology 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindLazyIterator.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by joel on 03.09.21. 3 | // 4 | 5 | #ifndef PYLIBAPR_LAZYITERATOR_HPP 6 | #define PYLIBAPR_LAZYITERATOR_HPP 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace py = pybind11; 14 | 15 | void AddLazyIterator(pybind11::module &m) { 16 | 17 | py::class_(m, "LazyIterator") 18 | .def(py::init()) 19 | .def(py::init([](LazyAccess& access){ return new LazyIterator(access); })); 20 | } 21 | 22 | #endif //PYLIBAPR_LAZYITERATOR_HPP 23 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /pyapr/data_containers/__init__.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, APRParameters, FloatParticles, ShortParticles, LongParticles, \ 2 | ByteParticles, IntParticles, ReconPatch, PixelDataByte, PixelDataShort, PixelDataFloat, APRPtrVector, LazyAccess, \ 3 | LazyDataByte, LazyDataShort, LazyDataLong, LazyDataFloat, LazyIterator, LinearIterator 4 | 5 | __all__ = [ 6 | 'APR', 7 | 'APRParameters', 8 | 'ByteParticles', 9 | 'ShortParticles', 10 | 'IntParticles', 11 | 'FloatParticles', 12 | 'LongParticles', 13 | 'ReconPatch', 14 | 'LinearIterator', 15 | 'LazyAccess', 16 | 'LazyIterator', 17 | 'LazyDataByte', 18 | 'LazyDataShort', 19 | 'LazyDataFloat', 20 | 'LazyDataLong' 21 | ] 22 | -------------------------------------------------------------------------------- /pyapr/filter/stencils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def get_gaussian_stencil(size, sigma, ndims=3, normalize=False): 5 | """Naively generate a Gaussian stencil.""" 6 | x = np.arange(-(size//2), size//2 + 1) 7 | 8 | vals = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-x**2 / (2 * sigma**2)) 9 | 10 | if normalize: 11 | vals = vals / vals.sum() 12 | 13 | stenc = np.empty((size,)*ndims, dtype=np.float32) 14 | 15 | if ndims == 3: 16 | for i in range(size): 17 | for j in range(size): 18 | for k in range(size): 19 | stenc[i, j, k] = vals[i] * vals[j] * vals[k] 20 | elif ndims == 2: 21 | for i in range(size): 22 | for j in range(size): 23 | stenc[i, j] = vals[i] * vals[j] 24 | 25 | stenc = np.expand_dims(stenc, axis=0) 26 | 27 | return stenc 28 | -------------------------------------------------------------------------------- /demo/numerics_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | 3 | 4 | """ 5 | This demo showcases some of the available numerics functionality on a selected APR 6 | """ 7 | 8 | io_int = pyapr.utils.InteractiveIO() 9 | fpath_apr = io_int.get_apr_file_name() # get APR file path from gui 10 | 11 | # Read from APR file 12 | apr, parts = pyapr.io.read(fpath_apr) 13 | 14 | # Compute gradient along a dimension (central finite differences). 15 | output = pyapr.filter.gradient(apr, parts, dim=0, delta=1.0) 16 | pyapr.viewer.parts_viewer(apr, output) 17 | 18 | # Compute gradient magnitude (Sobel filters) 19 | par = apr.get_parameters() 20 | output = pyapr.filter.sobel_magnitude(apr, parts, deltas=(par.dy, par.dx, par.dz), output=output) 21 | pyapr.viewer.parts_viewer(apr, output) 22 | 23 | # Compute local standard deviation around each particle 24 | pyapr.filter.std(apr, parts, size=5, output=output) 25 | pyapr.viewer.parts_viewer(apr, output) 26 | -------------------------------------------------------------------------------- /docs/source/pyapr.data_containers.rst: -------------------------------------------------------------------------------- 1 | data\_containers 2 | ================ 3 | 4 | These classes are in the top-level module, and accessed e.g. as ``pyapr.APR`` and ```pyapr.ReconPatch``. 5 | 6 | .. autosummary:: 7 | 8 | pyapr.data_containers.APR 9 | pyapr.data_containers.APRParameters 10 | pyapr.data_containers.ByteParticles 11 | pyapr.data_containers.ShortParticles 12 | pyapr.data_containers.FloatParticles 13 | pyapr.data_containers.LongParticles 14 | pyapr.data_containers.ReconPatch 15 | pyapr.data_containers.LinearIterator 16 | pyapr.data_containers.LazyAccess 17 | pyapr.data_containers.LazyIterator 18 | pyapr.data_containers.LazyDataByte 19 | pyapr.data_containers.LazyDataShort 20 | pyapr.data_containers.LazyDataFloat 21 | pyapr.data_containers.LazyDataLong 22 | 23 | 24 | .. automodule:: pyapr.data_containers 25 | :members: 26 | :undoc-members: 27 | :show-inheritance: 28 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /pyapr/tests/test_gui.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from pytestqt import qtbot 4 | from pyqtgraph.Qt import QtCore 5 | import pyapr 6 | from .helpers import load_test_apr 7 | 8 | 9 | @pytest.mark.skipif('DISPLAY' not in os.environ, reason='requires display') 10 | def test_viewer(qtbot): 11 | apr, parts = load_test_apr(3) 12 | 13 | # launch viewer 14 | win = pyapr.viewer.MainWindow() 15 | win.add_level_toggle() 16 | win.init_APR(apr, parts) 17 | qtbot.add_widget(win) 18 | 19 | # change LUT and view mode 20 | qtbot.keyClicks(win.comboBox, "magma") 21 | qtbot.mouseClick(win.level_toggle, QtCore.Qt.LeftButton) 22 | 23 | # change z-slice 24 | z_prev = win.slider.value() 25 | qtbot.mouseClick(win.slider, QtCore.Qt.LeftButton, pos=win.slider.rect().center() + QtCore.QPoint(10, 0)) 26 | assert win.slider.value() != z_prev 27 | 28 | # try to trigger mouse hover event (does not seem to work) 29 | qtbot.mouseMove(win.pg_win, win.pg_win.rect().center(), delay=100) 30 | -------------------------------------------------------------------------------- /demo/apr_segmentation_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | 3 | 4 | """ 5 | This demo performs graph cut segmentation using maxflow-v3.04 (http://pub.ist.ac.at/~vnk/software.html) 6 | by Yuri Boykov and Vladimir Kolmogorov. 7 | 8 | The graph is formed by linking each particle to its face-side neighbours in each dimension. 9 | Terminal edge costs are set based on a smoothed local minimum and the local standard deviation, while 10 | neighbour edge costs are set based on intensity difference, resolution level and local standard deviation. 11 | 12 | Note: experimental! 13 | """ 14 | 15 | io_int = pyapr.utils.InteractiveIO() 16 | fpath_apr = io_int.get_apr_file_name() # get APR file path from gui 17 | 18 | # Read from APR file 19 | apr, parts = pyapr.io.read(fpath_apr) 20 | 21 | # Compute graphcut segmentation (note that changing the parameters may greatly affect the result) 22 | mask = pyapr.segmentation.graphcut(apr, parts, intensity_threshold=100, min_std=10, num_levels=3) 23 | 24 | # Display the result 25 | pyapr.viewer.parts_viewer(apr, mask) 26 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = pyapr 3 | author = Joel Jonsson, Bevan Cheeseman 4 | author_email = jonsson@mpi-cbg.de 5 | description = Content-adaptive image processing using the Adaptive Particle Representation 6 | long_description = file: README.md 7 | long_description_content_type = text/markdown 8 | url = https://github.com/AdaptiveParticles/pyapr 9 | license = Apache-2.0 10 | classifiers = 11 | Development Status :: 4 - Beta 12 | Intended Audience :: Developers 13 | License :: OSI Approved :: Apache Software License 14 | Programming Language :: Python :: 3 15 | Programming Language :: Python :: 3.8 16 | Programming Language :: Python :: 3.9 17 | Programming Language :: Python :: 3.10 18 | Programming Language :: Python :: 3.11 19 | Topic :: Scientific/Engineering 20 | Topic :: Scientific/Engineering :: Image Processing 21 | 22 | [options] 23 | zip_safe = False 24 | packages = find: 25 | install_requires = 26 | numpy 27 | scikit-image 28 | PyQt5 29 | pyqtgraph 30 | tifffile 31 | matplotlib 32 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. pyapr documentation master file, created by 2 | sphinx-quickstart on Thu May 5 14:19:16 2022. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Documentation for pyapr 7 | ======================= 8 | 9 | Content-adaptive storage and processing of volumetric images in Python. 10 | 11 | `pyapr` is a collection of tools and algorithms to convert images to and from the 12 | Adaptive Particle Representation (APR), as well as to manipulate and view APR 13 | images. The base namespace `pyapr` holds a number of data container classes 14 | (see data_containers), while functions for generating, viewing and processing APR 15 | images are imported via submodules. 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | :caption: Usage 20 | 21 | install 22 | 23 | 24 | .. toctree:: 25 | :maxdepth: 2 26 | :caption: Submodules 27 | 28 | pyapr 29 | 30 | 31 | Indices and tables 32 | ================== 33 | 34 | * :ref:`genindex` 35 | * :ref:`modindex` 36 | * :ref:`search` 37 | -------------------------------------------------------------------------------- /pyapr/measure/_find_label_volume.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ShortParticles, LongParticles, ByteParticles 2 | import _pyaprwrapper.measure as _measure 3 | from .._common import _check_input 4 | import numpy as np 5 | from typing import Union 6 | 7 | __allowed_types__ = (ByteParticles, ShortParticles, LongParticles) 8 | 9 | 10 | def find_label_volume(apr: APR, 11 | labels: Union[ByteParticles, ShortParticles, LongParticles]) -> np.ndarray: 12 | """ 13 | Return the volume (in voxels) of each unique input label. 14 | 15 | Parameters 16 | ---------- 17 | apr: APR 18 | Input APR data structure. 19 | labels: ByteParticles, ShortParticles or LongParticles 20 | Input (object) labels. 21 | 22 | Returns 23 | ------- 24 | volume: numpy.ndarray 25 | Array of shape `(labels.max() + 1,)` containing the label volumes. 26 | """ 27 | _check_input(apr, labels, __allowed_types__) 28 | max_label = labels.max() 29 | volume = np.zeros((max_label+1), dtype=np.uint64) 30 | _measure.find_label_volume(apr, labels, volume) 31 | return volume 32 | -------------------------------------------------------------------------------- /demo/apr_io_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | import numpy as np 3 | from skimage import io as skio 4 | 5 | 6 | """ 7 | This demo converts a selected TIFF image to an APR, writes the result to file and then reads the file. 8 | """ 9 | 10 | io_int = pyapr.utils.InteractiveIO() 11 | 12 | # Read in an image 13 | fpath = io_int.get_tiff_file_name() 14 | img = skio.imread(fpath) 15 | 16 | # convert image to APR (with default parameters) 17 | apr, parts = pyapr.converter.get_apr(img) 18 | 19 | # Compute and display the computational ratio 20 | numParts = apr.total_number_particles() 21 | numPix = img.size 22 | cr = numPix / numParts 23 | print('Input image size: {} pixels, APR size: {} particles --> Computational Ratio = {}'.format(numPix, numParts, cr)) 24 | 25 | # Save the APR to file 26 | fpath_apr = io_int.save_apr_file_name() # get save path from gui 27 | pyapr.io.write(fpath_apr, apr, parts) # write apr and particles to file 28 | 29 | # Read the newly written file 30 | apr2, parts2 = pyapr.io.read(fpath_apr) 31 | 32 | # check that particles are equal at a single, random index 33 | ri = np.random.randint(0, numParts-1) 34 | assert parts[ri] == parts2[ri] 35 | 36 | # check some APR properties 37 | assert apr.total_number_particles() == apr2.total_number_particles() 38 | assert apr.level_max() == apr2.level_max() 39 | assert apr.level_min() == apr2.level_min() 40 | -------------------------------------------------------------------------------- /pyapr/tests/test_segmentation.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr_obj 4 | import numpy as np 5 | import platform 6 | 7 | PARTICLE_TYPES = [ 8 | pyapr.ShortParticles, 9 | pyapr.FloatParticles 10 | ] 11 | 12 | 13 | @pytest.mark.skipif(platform.system() == 'Darwin', reason='see issue #63') 14 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 15 | @pytest.mark.parametrize("constant_neighbor_scale", [True, False]) 16 | @pytest.mark.parametrize("z_block_size", [None, 16]) 17 | def test_graphcut(parts_type, constant_neighbor_scale, z_block_size): 18 | apr, parts = load_test_apr_obj() 19 | parts = parts_type(parts) 20 | 21 | # this image is trivially segmented by thresholding 22 | gt_mask = parts > 100 23 | 24 | # test graphcut 25 | mask = pyapr.segmentation.graphcut(apr, parts, intensity_threshold=101, beta=3.0, z_block_size=z_block_size, 26 | z_ghost_size=32, push_depth=1, constant_neighbor_scale=constant_neighbor_scale) 27 | assert mask == gt_mask 28 | 29 | # run compute_terminal_costs 30 | foreground, background = pyapr.segmentation.compute_terminal_costs(apr, parts) 31 | 32 | with pytest.raises(TypeError): 33 | # unsupported output type 34 | mask = pyapr.segmentation.graphcut(apr, parts, output=pyapr.FloatParticles()) 35 | -------------------------------------------------------------------------------- /demo/convolution_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | from time import time 3 | 4 | """ 5 | This demo reads an APR, applies a convolution operation and displays the result 6 | """ 7 | 8 | io_int = pyapr.utils.InteractiveIO() 9 | fpath_apr = io_int.get_apr_file_name() # get APR file path from gui 10 | 11 | # Read from APR file 12 | apr, parts = pyapr.io.read(fpath_apr) 13 | 14 | # Stencil and output must be float32 15 | stencil = pyapr.filter.get_gaussian_stencil(size=5, sigma=1, ndims=3, normalize=True) 16 | out = pyapr.FloatParticles() 17 | 18 | # Convolve using CPU: 19 | t0 = time() 20 | out = pyapr.filter.convolve(apr, parts, stencil, output=out, method='slice') 21 | print('convolve (method \'slice\') took {} seconds'.format(time()-t0)) 22 | 23 | 24 | # Alternative CPU convolution algorithm: 25 | t0 = time() 26 | out = pyapr.filter.convolve(apr, parts, stencil, output=out, method='pencil') 27 | print('convolve (method \'pencil\') took {} seconds'.format(time()-t0)) 28 | 29 | 30 | # Convolve using GPU (stencil must be of shape 3x3x3 or 5x5x5): 31 | if pyapr.cuda_enabled() and stencil.shape in [(3, 3, 3), (5, 5, 5)]: 32 | t0 = time() 33 | out = pyapr.filter.convolve(apr, parts, stencil, output=out, method='cuda') 34 | print('convolve (method \'cuda\') took {} seconds'.format(time()-t0)) 35 | 36 | # Display the result 37 | pyapr.viewer.parts_viewer(apr, out) 38 | -------------------------------------------------------------------------------- /pyapr/tests/test_restoration.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import pyapr 4 | from .helpers import load_test_apr 5 | import numpy as np 6 | 7 | PARTICLE_TYPES = [ 8 | pyapr.ByteParticles, 9 | pyapr.ShortParticles, 10 | pyapr.FloatParticles 11 | ] 12 | 13 | 14 | @pytest.mark.filterwarnings('ignore:richardson_lucy_cuda') 15 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 16 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 17 | def test_richardson_lucy(parts_type, ndim): 18 | apr, parts = load_test_apr(ndim) 19 | parts = parts_type(parts) 20 | 21 | psf = pyapr.filter.get_gaussian_stencil(size=5, sigma=0.8, ndims=ndim, normalize=True) 22 | niter = 10 23 | 24 | rl_out = pyapr.restoration.richardson_lucy(apr, parts, psf, num_iter=niter) 25 | 26 | if ndim == 3: 27 | rl_cuda = pyapr.restoration.richardson_lucy_cuda(apr, parts, psf, num_iter=niter) 28 | # should give the same result as richardson_lucy on cpu 29 | assert np.allclose(np.array(rl_out), np.array(rl_cuda)) 30 | 31 | rl_out = pyapr.restoration.richardson_lucy_tv(apr, parts, psf, num_iter=niter, resume=True, output=rl_out) 32 | 33 | with pytest.raises(ValueError): 34 | # resume with wrongly initialized output 35 | pyapr.restoration.richardson_lucy(apr, parts, psf, num_iter=niter, resume=True, output=pyapr.FloatParticles(10)) 36 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindLazyAccess.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by joel on 03.09.21. 3 | // 4 | 5 | #ifndef PYLIBAPR_LAZYACCESS_HPP 6 | #define PYLIBAPR_LAZYACCESS_HPP 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | namespace py = pybind11; 13 | 14 | void AddLazyAccess(pybind11::module &m, const std::string &modulename) { 15 | 16 | using namespace pybind11::literals; 17 | 18 | py::class_(m, modulename.c_str()) 19 | .def(py::init()) 20 | .def("__repr__", [](LazyAccess &acc) { 21 | return "LazyAccess(shape [" + std::to_string(acc.aprInfo.org_dims[2]) + ", " + 22 | std::to_string(acc.aprInfo.org_dims[1]) + ", " + std::to_string(acc.aprInfo.org_dims[0]) + 23 | "], " + std::to_string(acc.aprInfo.total_number_particles) + " particles)"; }) 24 | .def("init", &LazyAccess::init, "initialize LazyAccess from an open APRFile", "aprFile"_a) 25 | .def("init_tree", &LazyAccess::init_tree, "initialize LazyAccess for tree data from an open APRFile", "aprFile"_a) 26 | .def("open", &LazyAccess::open, "open file for reading") 27 | .def("close", &LazyAccess::close, "close file") 28 | .def("org_dims", &LazyAccess::org_dims, "original image dimensions"); 29 | 30 | } 31 | 32 | #endif //PYLIBAPR_LAZYACCESS_HPP 33 | -------------------------------------------------------------------------------- /pyapr/tests/test_iterator.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | from .helpers import load_test_apr 3 | import numpy as np 4 | import math 5 | 6 | 7 | def test_iterator_vs_slicer(): 8 | apr, parts = load_test_apr(3) 9 | it = apr.iterator() 10 | 11 | for level_delta in [0, -1, -2]: 12 | slicer = pyapr.reconstruction.APRSlicer(apr, parts, level_delta=level_delta) 13 | level = it.level_max() + level_delta 14 | for z in range(5, 13): 15 | for x in range(1, 9): 16 | recon_row = slicer[z, x] 17 | for idx in range(it.begin(level, z, x), it.end()): 18 | assert parts[idx] == recon_row[it.y(idx)] 19 | 20 | 21 | def test_iterator_find_x(): 22 | apr, parts = load_test_apr(3) 23 | it = apr.iterator() 24 | 25 | _shape = apr.shape() 26 | z_coords = [0] + list(np.random.randint(1, _shape[0]-1, size=4)) + [_shape[0]-1] 27 | 28 | for z in z_coords: 29 | for x in range(_shape[1]): 30 | for y in range(_shape[2]): 31 | # find particle at z, x, y 32 | idx = it.find_particle(z, x, y) 33 | 34 | # find coordinates of particle 35 | level, z_l, x_l, y_l = it.find_coordinates(idx) 36 | size_factor = 2 ** (it.level_max() - level) 37 | assert z_l == (z // size_factor) 38 | assert x_l == (x // size_factor) 39 | assert y_l == (y // size_factor) 40 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindPixelData.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Joel Jonsson on 29.06.18. 3 | // 4 | 5 | #ifndef PYLIBAPR_PYPIXELDATA_HPP 6 | #define PYLIBAPR_PYPIXELDATA_HPP 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "data_structures/Mesh/PixelData.hpp" 13 | 14 | namespace py = pybind11; 15 | 16 | 17 | // Currently only using this to return PixelData objects to python as arrays without copy. It could be made more 18 | // complete with constructors and methods, but I don't think it is necessary. 19 | 20 | template 21 | void AddPyPixelData(pybind11::module &m, const std::string &aTypeString) { 22 | using PixelDataType = PixelData; 23 | std::string typeStr = "PixelData" + aTypeString; 24 | py::class_(m, typeStr.c_str(), py::buffer_protocol()) 25 | .def(py::init()) 26 | .def_buffer([](PixelDataType &a) -> py::buffer_info{ 27 | return py::buffer_info( 28 | a.mesh.get(), 29 | sizeof(DataType), 30 | py::format_descriptor::format(), 31 | 3, 32 | {a.z_num, a.x_num, a.y_num}, 33 | {sizeof(DataType) * a.x_num * a.y_num, sizeof(DataType) * a.y_num, sizeof(DataType)} 34 | ); 35 | }); 36 | } 37 | 38 | #endif //PYLIBAPR_PYPIXELDATA_HPP 39 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindLazyData.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by joel on 03.09.21. 3 | // 4 | 5 | #ifndef PYLIBAPR_LAZYDATA_HPP 6 | #define PYLIBAPR_LAZYDATA_HPP 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | namespace py = pybind11; 13 | 14 | template 15 | void AddLazyData(pybind11::module &m, const std::string &aTypeString) { 16 | 17 | using namespace pybind11::literals; 18 | using LazyDataType = LazyData; 19 | std::string typeStr = "LazyData" + aTypeString; 20 | 21 | py::class_(m, typeStr.c_str()) 22 | .def(py::init()) 23 | .def("__repr__", [typeStr](LazyDataType& p) { return typeStr + "(size " + std::to_string(p.dataset_size()) + ")"; }) 24 | .def("init", &LazyDataType::init, "initialize dataset from an open APRFile", 25 | "aprFile"_a, "particles_name"_a, "t"_a=0, "channel_name"_a="t") 26 | .def("init_tree", &LazyDataType::init_tree, "initialize tree dataset from an open APRFile", 27 | "aprFile"_a, "particles_name"_a, "t"_a=0, "channel_name"_a="t") 28 | .def("open", &LazyDataType::open, "open dataset") 29 | .def("close", &LazyDataType::close, "close dataset") 30 | .def("dataset_size", &LazyDataType::dataset_size, "return the number of elements in the open dataset"); 31 | } 32 | 33 | #endif //PYLIBAPR_LAZYDATA_HPP 34 | -------------------------------------------------------------------------------- /demo/compress_particles_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyapr 3 | 4 | 5 | """ 6 | This demo applies lossy compression to the particle intensities, with the background level and quantization factor 7 | set interactively. 8 | """ 9 | 10 | io_int = pyapr.utils.InteractiveIO() 11 | fpath_apr = io_int.get_apr_file_name() # get APR file path from gui 12 | 13 | # Read APR and particles from file 14 | apr, parts = pyapr.io.read(fpath_apr) 15 | 16 | # Interactive WNL compression 17 | pyapr.viewer.interactive_compression(apr, parts) 18 | 19 | # Write compressed APR to file 20 | fpath_apr_save = io_int.save_apr_file_name() # get file path from gui 21 | pyapr.io.write(fpath_apr_save, apr, parts) 22 | 23 | # Size of original and compressed APR files in MB 24 | original_file_size = os.path.getsize(fpath_apr) * 1e-6 25 | compressed_file_size = os.path.getsize(fpath_apr_save) * 1e-6 26 | 27 | # Uncompressed pixel image size (assuming 16-bit datatype) 28 | original_image_size = 2e-6 * apr.x_num(apr.level_max()) * apr.y_num(apr.level_max()) * apr.z_num(apr.level_max()) 29 | 30 | print("Original APR File Size: {:7.2f} MB".format(original_file_size)) 31 | print("Lossy Compressed APR File Size: {:7.2f} MB".format(compressed_file_size)) 32 | 33 | # compare uncompressed pixel image size to compressed APR file sizes 34 | print("Original Memory Compression Ratio: {:7.2f} ".format(original_image_size/original_file_size)) 35 | print("Lossy Memory Compression Ratio: {:7.2f} ".format(original_image_size/compressed_file_size)) 36 | -------------------------------------------------------------------------------- /fix_windows_wheel.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | import sys 4 | import glob 5 | import shutil 6 | 7 | # 8 | # This script fixes wheels in this folder for windows builds, copying files from the release directory 9 | # then repacking the wheel and then using delvewheel to fix dependencies. 10 | # 11 | 12 | def main(wheel_file, dest_dir): 13 | wheel_dir = os.path.dirname(wheel_file) 14 | wheel_name = os.path.basename(wheel_file) 15 | os.chdir(wheel_dir) 16 | 17 | #unpack the wheel 18 | subprocess.check_call(['wheel', 'unpack', wheel_name]) 19 | 20 | folder = glob.glob('pyapr*/')[0] # there should be only one 21 | 22 | # copy files out of the Release subdirectory 23 | files_2_copy = glob.glob(os.path.join(folder, 'Release', '*')) 24 | for fc in files_2_copy: 25 | print('copying file ', fc, ' to ', folder) 26 | shutil.copy(fc,folder) 27 | 28 | # remove the Release folder and its contents 29 | shutil.rmtree(os.path.join(folder, 'Release')) 30 | 31 | # repack the wheel 32 | subprocess.check_call(['wheel', 'pack', folder]) 33 | 34 | # remove the unpacked directory 35 | shutil.rmtree(folder) 36 | 37 | # repair wheel 38 | subprocess.check_call(['delvewheel', 'repair', '--ignore-in-wheel', wheel_name]) 39 | 40 | # copy repaired wheel to destination directory 41 | shutil.copy(wheel_name, dest_dir) 42 | 43 | 44 | if __name__ == '__main__': 45 | _, wheel_file, dest_dir = sys.argv 46 | main(wheel_file, dest_dir) 47 | -------------------------------------------------------------------------------- /demo/plot_particle_scatter_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | 3 | 4 | """ 5 | Read a selected APR from file and display (a rectangular region of) a given z-slice as a point scatter. 6 | """ 7 | 8 | # Get APR file path from gui 9 | io_int = pyapr.utils.InteractiveIO() 10 | fpath_apr = io_int.get_apr_file_name() 11 | 12 | # Read APR and particles from file 13 | apr, parts = pyapr.io.read(fpath_apr) 14 | 15 | display = True # display the result as a python plot? 16 | save = False # save resulting plot as an image? 17 | if save: 18 | save_path = io_int.save_tiff_file_name() 19 | 20 | z = None # which slice to display? (default None -> display center slice) 21 | base_markersize = 1 22 | markersize_scale_factor = 2 # markersize = base_markersize * particle_size ** markersize_scale_factor 23 | figsize = None # figure size in inches (default None -> determined by xrange, yrange and dpi) 24 | dpi = 50 # dots per inch (output image dimensions will be dpi*figsize) 25 | xrange = (400, 800) # range of x values to be plotted 26 | yrange = (400, 800) # range of y values to be plotted (if None or out of bounds, the entire range is used) 27 | 28 | pyapr.viewer.particle_scatter_plot(apr, parts, z=z, markersize_scale_factor=markersize_scale_factor, 29 | base_markersize=base_markersize, figsize=figsize, dpi=dpi, 30 | save_path=save_path if save else None, xrange=xrange, yrange=yrange, 31 | display=display, cmap='viridis') 32 | -------------------------------------------------------------------------------- /demo/reconstruction_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | from skimage import io as skio 3 | from pyapr.reconstruction import reconstruct_constant, reconstruct_smooth, reconstruct_level 4 | 5 | 6 | """ 7 | This demo illustrates three different pixel image reconstruction methods: 8 | 9 | constant each pixel takes the value of the particle whose cell contains the pixel 10 | smooth additionally smooths regions of coarser resolution to reduce 'blockiness' 11 | level each pixel takes the value of the resolution level of the particle cell it belongs to 12 | """ 13 | 14 | # get input APR file path from gui 15 | io_int = pyapr.utils.InteractiveIO() 16 | fpath_apr = io_int.get_apr_file_name() 17 | 18 | # Read APR and particles from file 19 | apr, parts = pyapr.io.read(fpath_apr) 20 | 21 | pc_recon = reconstruct_constant(apr, parts) # piecewise constant reconstruction 22 | smooth_recon = reconstruct_smooth(apr, parts) # smooth reconstruction 23 | level_recon = reconstruct_level(apr) # level reconstruction 24 | 25 | # Save the results 26 | file_name = '.'.join(fpath_apr.split('/')[-1].split('.')[:-1]) 27 | 28 | save_path = io_int.save_tiff_file_name(file_name + '_reconstruct_const.tif') 29 | if save_path: 30 | skio.imsave(save_path, pc_recon, check_contrast=False) 31 | 32 | save_path = io_int.save_tiff_file_name(file_name + '_reconstruct_smooth.tif') 33 | if save_path: 34 | skio.imsave(save_path, smooth_recon, check_contrast=False) 35 | 36 | save_path = io_int.save_tiff_file_name(file_name + '_reconstruct_level.tif') 37 | if save_path: 38 | skio.imsave(save_path, level_recon, check_contrast=False) 39 | -------------------------------------------------------------------------------- /pyapr/_common.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, FloatParticles, LongParticles 2 | from typing import Union, Optional, List, Tuple 3 | 4 | 5 | def _check_input(apr: APR, 6 | parts: Union[ByteParticles, ShortParticles, FloatParticles, LongParticles], 7 | allowed_types: Optional[Union[List, Tuple]] = None): 8 | if allowed_types: 9 | if not isinstance(parts, tuple(allowed_types)): 10 | raise TypeError(f'Input particles must be of type {allowed_types}, got {type(parts)}.') 11 | if apr.total_number_particles() == 0: 12 | raise ValueError(f'Input APR {apr} is not initialized.') 13 | if len(parts) != apr.total_number_particles(): 14 | raise ValueError(f'Size mismatch between input APR: {apr} and particles: {parts}.') 15 | 16 | 17 | def _check_input_tree(apr: APR, 18 | tree_parts: Union[ByteParticles, ShortParticles, FloatParticles, LongParticles], 19 | allowed_types: Optional[Union[List, Tuple]] = None): 20 | if allowed_types: 21 | if not isinstance(tree_parts, tuple(allowed_types)): 22 | raise TypeError(f'Input tree particles must be of type {allowed_types}, got {type(tree_parts)}.') 23 | if apr.total_number_particles() == 0: 24 | raise ValueError(f'Input APR {apr} is not initialized.') 25 | if len(tree_parts) != apr.total_number_tree_particles(): 26 | raise ValueError(f'Size mismatch between input APR ({apr.total_number_tree_particles()} tree particles) and ' 27 | f'input tree particles: {tree_parts}.') 28 | -------------------------------------------------------------------------------- /demo/richardson_lucy_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | from time import time 3 | 4 | """ 5 | Read a selected APR from file and apply Richardson-Lucy deconvolution 6 | """ 7 | 8 | # Get input APR file path from gui 9 | io_int = pyapr.utils.InteractiveIO() 10 | fpath_apr = io_int.get_apr_file_name() 11 | 12 | # Read from APR file 13 | apr, parts = pyapr.io.read(fpath_apr) 14 | 15 | # Copy particles to float 16 | parts = pyapr.FloatParticles(parts) 17 | 18 | # Add a small offset to the particle values to avoid division by 0 19 | offset = 1e-5 * parts.max() 20 | parts += offset 21 | 22 | # Specify the PSF and number of iterations 23 | psf = pyapr.filter.get_gaussian_stencil(size=5, sigma=1, ndims=3, normalize=True) 24 | 25 | # Richardson-lucy deconvolution 26 | t0 = time() 27 | output = pyapr.restoration.richardson_lucy(apr, parts, psf, num_iter=10) 28 | print('RL took {} seconds'.format(time()-t0)) 29 | 30 | # Using total variation regularization 31 | t0 = time() 32 | output_tv = pyapr.restoration.richardson_lucy_tv(apr, parts, psf, reg_param=1e-2, num_iter=10) 33 | print('RLTV took {} seconds'.format(time()-t0)) 34 | 35 | # if pyapr is built with CUDA enabled and psf is of size (3, 3, 3) or (5, 5, 5) 36 | cuda = False 37 | if pyapr.cuda_enabled() and psf.shape in [(3, 3, 3), (5, 5, 5)]: 38 | t0 = time() 39 | output_cuda = pyapr.restoration.richardson_lucy_cuda(apr, parts, psf, num_iter=10) 40 | print('RL cuda took {} seconds'.format(time()-t0)) 41 | cuda = True 42 | 43 | 44 | # Display the results 45 | pyapr.viewer.parts_viewer(apr, output) 46 | pyapr.viewer.parts_viewer(apr, output_tv) 47 | if cuda: 48 | pyapr.viewer.parts_viewer(apr, output_cuda) 49 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindReconPatch.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by joel on 01.03.21. 3 | // 4 | 5 | #ifndef PYLIBAPR_RECONPATCH_HPP 6 | #define PYLIBAPR_RECONPATCH_HPP 7 | 8 | #include "numerics/APRReconstruction.hpp" 9 | #include 10 | 11 | 12 | void AddReconPatch(pybind11::module &m) { 13 | 14 | using namespace pybind11::literals; 15 | 16 | py::class_(m, "ReconPatch") 17 | .def(py::init()) 18 | .def("__repr__", [](ReconPatch& p) { 19 | return "ReconPatch(z: [" + std::to_string(p.z_begin) + ", " + std::to_string(p.z_end) + ")" + 20 | ", x: [" + std::to_string(p.x_begin) + ", " + std::to_string(p.x_end) + ")" + 21 | ", y: [" + std::to_string(p.y_begin) + ", " + std::to_string(p.y_end) + ")" + 22 | ", level_delta = " + std::to_string(p.level_delta) + ")"; 23 | }) 24 | .def_readwrite("z_begin", &ReconPatch::z_begin) 25 | .def_readwrite("z_end", &ReconPatch::z_end) 26 | .def_readwrite("x_begin", &ReconPatch::x_begin) 27 | .def_readwrite("x_end", &ReconPatch::x_end) 28 | .def_readwrite("y_begin", &ReconPatch::y_begin) 29 | .def_readwrite("y_end", &ReconPatch::y_end) 30 | .def_readwrite("level_delta", &ReconPatch::level_delta) 31 | .def("check_limits", [](ReconPatch& p, APR& apr){ return p.check_limits(apr); }) 32 | .def("check_limits", [](ReconPatch& p, LazyAccess& acc){ return p.check_limits(acc); }) 33 | .def("size", &ReconPatch::size, "return the number of pixels in the patch region"); 34 | } 35 | 36 | 37 | #endif //PYLIBAPR_RECONPATCH_HPP 38 | -------------------------------------------------------------------------------- /pyapr/measure/_find_objects.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, LongParticles 2 | import _pyaprwrapper.measure as _measure 3 | from .._common import _check_input 4 | import numpy as np 5 | from typing import Union, Tuple 6 | 7 | __allowed_types__ = (ByteParticles, ShortParticles, LongParticles) 8 | 9 | 10 | def find_objects(apr: APR, 11 | labels: Union[ByteParticles, ShortParticles, LongParticles]) -> Tuple[np.ndarray, np.ndarray]: 12 | """ 13 | Find and return tight bounding boxes for each unique input label. Assumes that the labels are 14 | ordered from 0, such that 0 is background and each value > 0 corresponds to a connected component. 15 | 16 | Parameters 17 | ---------- 18 | apr: APR 19 | Input APR data structure. 20 | labels: ByteParticles, ShortParticles or LongParticles 21 | Input (object) labels. 22 | 23 | Returns 24 | ------- 25 | min_coords: numpy.ndarray 26 | Array of shape `(labels.max() + 1, 3)` containing the "lower" corner of each bounding box in z, x and y. 27 | max_coords: numpy.ndarray 28 | Array of shape `(labels.max() + 1, 3)` containing the "upper" corner of each bounding box in z, x and y. 29 | """ 30 | _check_input(apr, labels, __allowed_types__) 31 | max_label = labels.max() 32 | max_dim = max([apr.org_dims(x) for x in range(3)]) 33 | min_coords = np.full((max_label+1, 3), max_dim+1, dtype=np.int32) 34 | max_coords = np.zeros((max_label+1, 3), dtype=np.int32) 35 | _measure.find_objects(apr, labels, min_coords, max_coords) 36 | 37 | max_coords[0, :] = apr.shape() 38 | min_coords[0, :] = 0 39 | 40 | return min_coords, max_coords 41 | -------------------------------------------------------------------------------- /pyapr/measure/_find_label_centers.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ShortParticles, LongParticles, FloatParticles, ByteParticles 2 | import _pyaprwrapper.measure as _measure 3 | from .._common import _check_input 4 | import numpy as np 5 | from typing import Union, Optional 6 | 7 | __allowed_input_types__ = (ByteParticles, ShortParticles, LongParticles) 8 | __allowed_weight_types__ = (ShortParticles, FloatParticles) 9 | 10 | 11 | def find_label_centers(apr: APR, 12 | labels: Union[ByteParticles, ShortParticles, LongParticles], 13 | weights: Optional[Union[ShortParticles, FloatParticles]] = None) -> np.ndarray: 14 | """ 15 | Compute the volumetric center of each unique input label, optionally weighted by, e.g., image intensity. 16 | 17 | Parameters 18 | ---------- 19 | apr: APR 20 | Input APR data structure. 21 | labels: ByteParticles, ShortParticles or LongParticles 22 | Input (object) labels. 23 | weights: ShortParticles or FloatParticles, optional 24 | Weight for each particle. Normalization is applied internally. (default: None) 25 | 26 | Returns 27 | ------- 28 | coords: numpy.ndarray 29 | Array of shape `(labels.max()+1, 3)` containing the center coordinates. 30 | """ 31 | _check_input(apr, labels, __allowed_input_types__) 32 | max_label = labels.max() 33 | coords = np.zeros((max_label+1, 3), dtype=np.float64) 34 | if weights is not None: 35 | _check_input(apr, weights, __allowed_weight_types__) 36 | _measure.find_label_centers_weighted(apr, labels, coords, weights) 37 | else: 38 | _measure.find_label_centers(apr, labels, coords) 39 | return coords 40 | -------------------------------------------------------------------------------- /pyapr/measure/_connected_component.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ShortParticles, LongParticles, ByteParticles 2 | from _pyaprwrapper.measure import connected_component as _connected_component 3 | from .._common import _check_input 4 | from typing import Union, Optional 5 | 6 | __allowed_types__ = (ByteParticles, ShortParticles, LongParticles) 7 | 8 | 9 | def connected_component(apr: APR, 10 | mask: Union[ByteParticles, ShortParticles, LongParticles], 11 | output: Optional[Union[ByteParticles, ShortParticles, LongParticles]] = None) \ 12 | -> Union[ByteParticles, ShortParticles, LongParticles]: 13 | """ 14 | Label the connected components of an input particle mask. Two particles are considered connected if they 15 | are face-side neighbors and take non-zero values. 16 | 17 | Parameters 18 | ---------- 19 | apr: APR 20 | Input APR data structure. 21 | mask: ByteParticles, ShortParticles or LongParticles 22 | Input (binary) particle mask. 23 | output: ByteParticles, ShortParticles or LongParticles, optional 24 | Particle object for the output labels. If not provided, a LongParticles (uint64) object is generated 25 | and returned. (default: None) 26 | 27 | Returns 28 | ------- 29 | output: ByteParticles, ShortParticles or LongParticles 30 | Particle data containing the connected component labels 31 | """ 32 | _check_input(apr, mask, __allowed_types__) 33 | if output is None: 34 | output = LongParticles() 35 | 36 | if not isinstance(output, __allowed_types__): 37 | raise TypeError(f'output (if provided) must be of type {__allowed_types__}, received {type(output)}.') 38 | 39 | _connected_component(apr, mask, output) 40 | return output 41 | -------------------------------------------------------------------------------- /pyapr/tests/test_reconstruction.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import pyapr 4 | from .helpers import load_test_apr, get_test_apr_path 5 | import numpy as np 6 | 7 | 8 | RECON_MODES = [ 9 | 'constant', 10 | 'smooth', 11 | 'level' 12 | ] 13 | 14 | 15 | @pytest.mark.parametrize("mode", RECON_MODES) 16 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 17 | def test_reconstruction(mode, ndim): 18 | apr, parts = load_test_apr(ndim) 19 | fpath = get_test_apr_path(ndim) 20 | 21 | slicer = pyapr.reconstruction.APRSlicer(apr, parts, mode=mode) 22 | lazy_slicer = pyapr.reconstruction.LazySlicer(fpath, mode=mode) 23 | 24 | assert slicer.ndim == lazy_slicer.ndim == 3 25 | assert slicer.shape == lazy_slicer.shape == apr.shape() 26 | 27 | for level_delta in (-2, -1, 0, 1): 28 | slicer.set_level_delta(level_delta) 29 | lazy_slicer.set_level_delta(level_delta) 30 | 31 | rc1 = slicer[:] 32 | rc2 = lazy_slicer[:] 33 | assert rc1.shape == rc2.shape 34 | assert np.allclose(rc1, rc2) 35 | 36 | patch = pyapr.ReconPatch() 37 | patch.level_delta = level_delta 38 | rc2 = pyapr.reconstruction.reconstruct_lazy(fpath, patch=patch, mode=mode).squeeze() 39 | assert rc1.shape == rc2.shape 40 | assert np.allclose(rc1, rc2) 41 | 42 | assert np.allclose(slicer[0], lazy_slicer[0]) 43 | assert np.allclose(slicer[0, float(0), :], lazy_slicer[0, float(0), :]) 44 | 45 | assert np.max(slicer) == parts.max() == np.max(np.array(parts)) 46 | 47 | slicer = slicer.astype(np.float32) 48 | assert isinstance(slicer.parts, pyapr.FloatParticles) 49 | 50 | slicer = slicer.astype(int) 51 | assert isinstance(slicer.parts, pyapr.IntParticles) 52 | 53 | with pytest.raises(ValueError): 54 | slicer = slicer.astype(np.int8) -------------------------------------------------------------------------------- /demo/get_apr_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyapr 3 | from skimage import io as skio 4 | 5 | 6 | """ 7 | This demo shows how to convert an image to APR using a fixed set of parameters. 8 | """ 9 | 10 | # Read in an image 11 | io_int = pyapr.utils.InteractiveIO() 12 | fpath = io_int.get_tiff_file_name() # get image file path from gui (data type must be float32 or uint16) 13 | img = skio.imread(fpath) 14 | 15 | # Set some parameters 16 | par = pyapr.APRParameters() 17 | par.rel_error = 0.1 # relative error threshold 18 | par.gradient_smoothing = 3 # b-spline smoothing parameter for gradient estimation 19 | # 0 = no smoothing, higher = more smoothing 20 | par.dx = 1 21 | par.dy = 1 # voxel size 22 | par.dz = 1 23 | # threshold parameters 24 | par.Ip_th = 0 # regions below this intensity are regarded as background 25 | par.grad_th = 3 # gradients below this value are set to 0 26 | par.sigma_th = 10 # the local intensity scale is clipped from below to this value 27 | par.auto_parameters = True # if true, 'grad_th' and 'sigma_th' are computed automatically based on histograms 28 | 29 | # Compute APR and sample particle values 30 | apr, parts = pyapr.converter.get_apr(img, params=par, verbose=True) 31 | 32 | # Display the APR 33 | pyapr.viewer.parts_viewer(apr, parts) 34 | 35 | # Write the resulting APR to file 36 | print("Writing APR to file ... \n") 37 | fpath_apr = io_int.save_apr_file_name() # get path through gui 38 | pyapr.io.write(fpath_apr, apr, parts) 39 | 40 | if fpath_apr: 41 | # Display the size of the file 42 | file_sz = os.path.getsize(fpath_apr) 43 | print("APR File Size: {:7.2f} MB \n".format(file_sz * 1e-6)) 44 | 45 | # Compute compression ratio 46 | mcr = os.path.getsize(fpath) / file_sz 47 | print("Memory Compression Ratio: {:7.2f}".format(mcr)) 48 | -------------------------------------------------------------------------------- /pyapr/converter/src/BindConverterBatch.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Joel Jonsson on 05.11.20. 3 | // 4 | #ifndef PYLIBAPR_PYAPRCONVERTERBATCH_HPP 5 | #define PYLIBAPR_PYAPRCONVERTERBATCH_HPP 6 | 7 | #include "algorithm/APRConverterBatch.hpp" 8 | #include 9 | #include 10 | #include 11 | 12 | namespace py = pybind11; 13 | 14 | template 15 | class PyAPRConverterBatch : public APRConverterBatch { 16 | 17 | public: 18 | 19 | PyAPRConverterBatch() : APRConverterBatch() {} 20 | 21 | /** 22 | * Set the parameters to be used during conversion. 23 | * @param par 24 | */ 25 | void set_parameters(APRParameters &par) { this->par = par; } 26 | 27 | /** 28 | * return current parameter set 29 | * @return 30 | */ 31 | APRParameters get_parameters() { return this->par; } 32 | }; 33 | 34 | template 35 | void AddPyAPRConverterBatch(pybind11::module &m, const std::string &aTypeString) { 36 | using converter = PyAPRConverterBatch; 37 | std::string typeStr = aTypeString + "ConverterBatch"; 38 | py::class_(m, typeStr.c_str()) 39 | .def(py::init()) 40 | .def_readwrite("verbose", &converter::verbose, "print timings and additional information") 41 | .def_readwrite("z_block_size", &converter::z_block_size, "number of z slices to process simultaneously") 42 | .def_readwrite("z_ghost_size", &converter::ghost_z, "number of \'ghost slices\' on each side of each block") 43 | .def("get_apr", &converter::get_apr, "compute APR from an image (input as a numpy array)") 44 | .def("set_parameters", &converter::set_parameters, "set parameters") 45 | .def("get_parameters", &converter::get_parameters, "get parameters"); 46 | } 47 | 48 | 49 | 50 | #endif //PYLIBAPR_PYAPRCONVERTERBATCH_HPP 51 | -------------------------------------------------------------------------------- /pyapr/tests/test_measure.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr_obj 4 | import numpy as np 5 | 6 | 7 | MASK_TYPES = [ 8 | pyapr.ByteParticles, 9 | pyapr.ShortParticles, 10 | pyapr.LongParticles 11 | ] 12 | 13 | 14 | @pytest.mark.parametrize("mask_type", MASK_TYPES) 15 | def test_measures(mask_type): 16 | # load apr and generate binary mask 17 | apr, parts = load_test_apr_obj() 18 | mask = parts > 101 19 | 20 | # find object labels 21 | cc = pyapr.measure.connected_component(apr, mask, output=mask_type()) 22 | assert cc.max() == 2 23 | 24 | # find bounding boxes around each object/label 25 | min_coords, max_coords = pyapr.measure.find_objects(apr, cc) 26 | assert min_coords.shape == max_coords.shape == (3, 3) 27 | 28 | # compute the (weighted) volumetric center of each object 29 | obj_centers = pyapr.measure.find_label_centers(apr, cc) 30 | obj_centers_weighted = pyapr.measure.find_label_centers(apr, cc, weights=parts) 31 | assert obj_centers.shape == obj_centers_weighted.shape == (3, 3) 32 | 33 | # check that object centers are within the bounding boxes computed by `find_objects` 34 | for i in (1, 2): 35 | for j in range(3): 36 | assert min_coords[i, j] < obj_centers[i, j] < max_coords[i, j] 37 | assert min_coords[i, j] < obj_centers_weighted[i, j] < max_coords[i, j] 38 | 39 | # compute the volume of each object 40 | vol = pyapr.measure.find_label_volume(apr, cc) 41 | 42 | # compute volumes on reconstructions for comparison 43 | slicer = pyapr.reconstruction.APRSlicer(apr, cc) 44 | for obj in (1, 2): 45 | patch = slicer[min_coords[obj, 0]:max_coords[obj, 0], 46 | min_coords[obj, 1]:max_coords[obj, 1], 47 | min_coords[obj, 2]:max_coords[obj, 2]] 48 | assert vol[obj] == np.sum(patch == obj) 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /pyapr/tests/test_transform.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr 4 | import numpy as np 5 | 6 | PARTICLE_TYPES = [ 7 | pyapr.ByteParticles, 8 | pyapr.ShortParticles, 9 | pyapr.FloatParticles, 10 | pyapr.LongParticles 11 | ] 12 | 13 | PROJECTION_METHODS = [ 14 | 'direct', 15 | 'pyramid' 16 | ] 17 | 18 | 19 | @pytest.mark.filterwarnings('ignore:max projection') 20 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 21 | @pytest.mark.parametrize("method", PROJECTION_METHODS) 22 | def test_maximum_projection(parts_type, method): 23 | apr, parts = load_test_apr(3) 24 | parts = parts_type(parts) 25 | 26 | for dim in (0, 1, 2): 27 | res = pyapr.transform.maximum_projection(apr, parts, dim, method=method) 28 | recon = pyapr.reconstruction.reconstruct_constant(apr, parts) 29 | assert np.allclose(res, np.max(recon, axis=2-dim)) 30 | 31 | patch = pyapr.ReconPatch() 32 | _shape = apr.shape() 33 | patch.z_begin = 1 34 | patch.z_end = _shape[0] // 2 35 | patch.x_begin = 2 36 | patch.x_end = _shape[1] // 2 37 | patch.y_begin = 3 38 | patch.y_end = _shape[2] // 2 39 | 40 | res = pyapr.transform.maximum_projection(apr, parts, dim, patch=patch, method=method) 41 | recon = pyapr.reconstruction.reconstruct_constant(apr, parts, patch=patch) 42 | assert np.allclose(res, np.max(recon, axis=2-dim)) 43 | 44 | with pytest.raises(ValueError): 45 | # invalid dim argument 46 | res = pyapr.transform.maximum_projection(apr, parts, dim=3, method=method) 47 | 48 | with pytest.raises(ValueError): 49 | # invalid patch specification (z_begin > z_end) 50 | patch = pyapr.ReconPatch() 51 | patch.z_begin = 5; patch.z_end = 3 52 | patch.level_delta = -1 53 | res = pyapr.transform.maximum_projection(apr, parts, dim=0, patch=patch, method=method) 54 | 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | .idea/ 9 | 10 | # Distribution / packaging 11 | .Python 12 | cmake-build-debug/ 13 | cmake-build-release/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | env/ 91 | venv/ 92 | ENV/ 93 | env.bak/ 94 | venv.bak/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | .DS_Store 109 | 110 | data/ 111 | /processing 112 | pyapr/tests/test_files/ 113 | 114 | # written by setuptools_scm 115 | **/_version.py 116 | 117 | -------------------------------------------------------------------------------- /external/maxflow-v3.04.src/CHANGES.TXT: -------------------------------------------------------------------------------- 1 | List of changes from version 3.03: 2 | 3 | - changed types of node::TS and TIME from int to long (to prevent overflows for very large graphs). Thanks to Alexander Bersenev for the suggestion. 4 | 5 | List of changes from version 3.02: 6 | 7 | - put under GPL license 8 | 9 | List of changes from version 3.01: 10 | 11 | - fixed a bug: using add_node() or add_edge() after the first maxflow() with the reuse_trees option 12 | could have caused segmentation fault (if nodes or arcs are reallocated). Thanks to Jan Lellmann for pointing out this bug. 13 | - updated block.h to suppress compilation warnings 14 | 15 | List of changes from version 3.0: 16 | - Moved line 17 | #include "instances.inc" 18 | to the end of cpp files to make it compile under GNU c++ compilers 4.2(?) and above 19 | 20 | List of changes from version 2.2: 21 | 22 | - Added functions for accessing graph structure, residual capacities, etc. 23 | (They are needed for implementing maxflow-based algorithms such as primal-dual algorithm for convex MRFs.) 24 | - Added option of reusing trees. 25 | - node_id's are now integers starting from 0. Thus, it is not necessary to store node_id's in a separate array. 26 | - Capacity types are now templated. 27 | - Fixed bug in block.h. (After Block::Reset, ScanFirst() and ScanNext() did not work properly). 28 | - Implementation with a forward star representation of the graph is no longer supported. (It needs less memory, but slightly slower than adjacency list representation.) If you still wish to use it, download version 2.2. 29 | - Note: version 3.0 is released under a different license than version 2.2. 30 | 31 | List of changes from version 2.1: 32 | 33 | - Put the code under GPL license 34 | 35 | List of changes from version 2.02: 36 | 37 | - Fixed a bug in the implementation that uses forward star representation 38 | 39 | List of changes from version 2.01: 40 | 41 | - Added new interface function - Graph::add_tweights(Node_id, captype, captype) 42 | (necessary for the "ENERGY" software package) 43 | 44 | -------------------------------------------------------------------------------- /pyapr/filter/std.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, FloatParticles, LongParticles 2 | from _pyaprwrapper.filter import local_std 3 | from .._common import _check_input 4 | from typing import Union, Optional, Tuple, List 5 | 6 | 7 | __allowed_input_types__ = (ByteParticles, ShortParticles, FloatParticles, LongParticles) 8 | ParticleData = Union[ByteParticles, ShortParticles, FloatParticles, LongParticles] 9 | 10 | 11 | def std(apr: APR, 12 | parts: ParticleData, 13 | size: Union[int, Tuple[int, int, int], List[int]], 14 | output: Optional[FloatParticles] = None) -> FloatParticles: 15 | """ 16 | Compute the local standard deviation in a neighborhood around each particle. 17 | 18 | Parameters 19 | ---------- 20 | apr: APR 21 | Input APR data structure. 22 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 23 | Input particle values. 24 | size: int, tuple, list 25 | Size of the box in which standard deviations are computed. If a single integer is provided, 26 | considers a box of size ``min(size, apr.shape[dim])`` in each dimension. To use different sizes, 27 | give a list or tuple of length 3, specifying the size in dimensions (y, x, z) 28 | output: FloatParticles, optional 29 | Particle object to which the resulting values are written. If not provided, a new object 30 | is generated. (default: None) 31 | 32 | Returns 33 | ------- 34 | output: FloatParticles 35 | The local standard deviation values 36 | """ 37 | _check_input(apr, parts, __allowed_input_types__) 38 | if isinstance(size, int): 39 | size = (min(size, apr.org_dims(0)), min(size, apr.org_dims(1)), min(size, apr.org_dims(2))) 40 | if len(size) != 3: 41 | raise ValueError(f'size must be a tuple or list of length 3, got {size}') 42 | output = output if isinstance(output, FloatParticles) else FloatParticles() 43 | local_std(apr, parts, output, size) 44 | return output 45 | -------------------------------------------------------------------------------- /pyapr/tests/test_converter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from os.path import join 3 | import pyapr 4 | from .helpers import load_test_image, constant_upsample, expand 5 | import numpy as np 6 | from skimage import io as skio 7 | 8 | IMAGES = [load_test_image(d) for d in (1, 2, 3)] 9 | IMAGES[1] = IMAGES[1].astype(np.float32) 10 | ERR_THRESHOLDS = [0.2, 0.1, 0.05, 0.01] 11 | UNSUPPORTED_TYPES = [np.int16, np.int32, np.float64] 12 | 13 | 14 | @pytest.mark.parametrize("img", IMAGES) 15 | @pytest.mark.parametrize("rel_error", ERR_THRESHOLDS) 16 | def test_get_apr(tmpdir, rel_error: float, img: np.ndarray): 17 | 18 | # set conversion parameters 19 | par = pyapr.APRParameters() 20 | par.rel_error = rel_error 21 | par.auto_parameters = True 22 | par.output_steps = True 23 | par.output_dir = str(tmpdir) + '/' 24 | 25 | # convert image to APR 26 | apr, parts = pyapr.converter.get_apr(img, params=par, verbose=True) 27 | 28 | # load pipeline steps 29 | lis = expand(skio.imread(join(par.output_dir, 'local_intensity_scale_rescaled.tif'))).astype(np.float32) 30 | grad = expand(skio.imread(join(par.output_dir, 'gradient_step.tif'))).astype(np.float32) 31 | 32 | # upsample lis and gradient 33 | lis = constant_upsample(lis, apr.shape()) 34 | grad = constant_upsample(grad, apr.shape()) 35 | 36 | # check constant reconstruction error 37 | recon = pyapr.reconstruction.reconstruct_constant(apr, parts).astype(np.float32) 38 | img = img.astype(np.float32) 39 | err = np.divide(np.abs(img - recon), lis) 40 | 41 | # ignore regions below pipeline threshold parameters 42 | par = apr.get_parameters() 43 | err[grad < par.grad_th] = 0 44 | err[img < par.Ip_th] = 0 45 | 46 | # check maximum error 47 | max_error = err.max() 48 | assert max_error < rel_error 49 | 50 | 51 | @pytest.mark.parametrize("dtype", UNSUPPORTED_TYPES) 52 | def test_get_apr_type_error(dtype): 53 | img = IMAGES[1].astype(dtype) 54 | with pytest.raises(TypeError): 55 | apr, parts = pyapr.converter.get_apr(img) 56 | 57 | -------------------------------------------------------------------------------- /pyapr/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr 4 | import numpy as np 5 | 6 | 7 | def test_utils(): 8 | assert isinstance(pyapr.utils.type_to_particles(np.uint8), pyapr.ByteParticles) 9 | assert isinstance(pyapr.utils.type_to_particles(np.uint16), pyapr.ShortParticles) 10 | assert isinstance(pyapr.utils.type_to_particles(np.int32), pyapr.IntParticles) 11 | assert isinstance(pyapr.utils.type_to_particles(np.uint64), pyapr.LongParticles) 12 | assert isinstance(pyapr.utils.type_to_particles(np.float32), pyapr.FloatParticles) 13 | 14 | assert isinstance(pyapr.utils.type_to_lazy_particles(np.uint8), pyapr.LazyDataByte) 15 | assert isinstance(pyapr.utils.type_to_lazy_particles(np.uint16), pyapr.LazyDataShort) 16 | assert isinstance(pyapr.utils.type_to_lazy_particles(np.uint64), pyapr.LazyDataLong) 17 | assert isinstance(pyapr.utils.type_to_lazy_particles(np.float32), pyapr.LazyDataFloat) 18 | 19 | assert pyapr.utils.particles_to_type(pyapr.ByteParticles()) is np.uint8 20 | assert pyapr.utils.particles_to_type(pyapr.ShortParticles()) is np.uint16 21 | assert pyapr.utils.particles_to_type(pyapr.IntParticles()) is np.int32 22 | assert pyapr.utils.particles_to_type(pyapr.LongParticles()) is np.uint64 23 | assert pyapr.utils.particles_to_type(pyapr.FloatParticles()) is np.float32 24 | 25 | assert pyapr.utils.particles_to_type(pyapr.LazyDataByte()) is np.uint8 26 | assert pyapr.utils.particles_to_type(pyapr.LazyDataShort()) is np.uint16 27 | assert pyapr.utils.particles_to_type(pyapr.LazyDataLong()) is np.uint64 28 | assert pyapr.utils.particles_to_type(pyapr.LazyDataFloat()) is np.float32 29 | 30 | with pytest.raises(TypeError): 31 | pyapr.utils.particles_to_type(np.zeros(5, dtype=np.uint16)) 32 | 33 | with pytest.raises(ValueError): 34 | pyapr.utils.type_to_particles(np.int64) 35 | 36 | with pytest.raises(ValueError): 37 | pyapr.utils.type_to_lazy_particles(np.float64) 38 | 39 | cuda_build = pyapr.cuda_enabled() 40 | -------------------------------------------------------------------------------- /pyapr/tests/helpers.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyapr 3 | from skimage import io as skio 4 | import numpy as np 5 | 6 | 7 | def constant_upsample(img, output_shape, factor=2): 8 | output = np.zeros(shape=output_shape, dtype=img.dtype) 9 | for i in range(output_shape[0]): 10 | for j in range(output_shape[1]): 11 | for k in range(output_shape[2]): 12 | output[i, j, k] = img[i//factor, j//factor, k//factor] 13 | return output 14 | 15 | 16 | def expand(img): 17 | while img.ndim < 3: 18 | img = np.expand_dims(img, axis=0) 19 | return img 20 | 21 | 22 | def load_test_apr(dims: int = 3): 23 | """ 24 | read APR data for testing 25 | 26 | Parameters 27 | ---------- 28 | dims: int 29 | dimensionality of the image (1-3) 30 | """ 31 | assert dims == 1 or dims == 2 or dims == 3, ValueError('\'dims\' must be 1, 2 or 3') 32 | fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_files', f'sphere_{dims}D.apr') 33 | return pyapr.io.read(fpath) 34 | 35 | 36 | def get_test_apr_path(dims: int = 3): 37 | """ 38 | return the path of an APR file 39 | 40 | Parameters 41 | ---------- 42 | dims: int 43 | dimensionality of the image (1-3) 44 | """ 45 | assert dims == 1 or dims == 2 or dims == 3, ValueError('\'dims\' must be 1, 2 or 3') 46 | return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_files', f'sphere_{dims}D.apr') 47 | 48 | 49 | def load_test_image(dims: int = 3): 50 | """ 51 | read pixel image for testing 52 | 53 | Parameters 54 | ---------- 55 | dims: int 56 | dimensionality of the image (1-3) 57 | """ 58 | assert dims == 1 or dims == 2 or dims == 3, ValueError('\'dims\' must be 1, 2 or 3') 59 | fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_files', f'sphere_{dims}D.tif') 60 | return expand(skio.imread(fpath)) 61 | 62 | 63 | def load_test_apr_obj(): 64 | """ 65 | return test APR with two objects 66 | """ 67 | fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_files', 'two_objects.apr') 68 | return pyapr.io.read(fpath) 69 | -------------------------------------------------------------------------------- /demo/get_apr_interactive_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyapr 3 | from skimage import io as skio 4 | 5 | 6 | """ 7 | Interactive APR conversion. Reads in a selected TIFF image for interactive setting of the parameters: 8 | Ip_th (intensity threshold) 9 | sigma_th (local intensity scale threshold) 10 | grad_th (gradient threshold) 11 | 12 | Use the sliders to control the adaptation. The red overlay shows (approximately) the regions that will be fully 13 | resolved (at pixel resolution). 14 | 15 | Once the parameters are set, the final steps of the conversion pipeline are applied to produce the APR and sample 16 | the particle intensities. 17 | 18 | Note: The effect of grad_th may hide the effect of the other thresholds. It is thus recommended to keep grad_th 19 | low while setting Ip_th and sigma_th, and then increasing grad_th. 20 | """ 21 | 22 | # Read in an image 23 | io_int = pyapr.utils.InteractiveIO() 24 | fpath = io_int.get_tiff_file_name() # get image file path from gui (data type must be float32 or uint16) 25 | img = skio.imread(fpath) 26 | 27 | # Set some parameters (only Ip_th, grad_th and sigma_th are set interactively) 28 | par = pyapr.APRParameters() 29 | par.rel_error = 0.1 # relative error threshold 30 | par.gradient_smoothing = 3 # b-spline smoothing parameter for gradient estimation 31 | # 0 = no smoothing, higher = more smoothing 32 | par.dx = 1 33 | par.dy = 1 # voxel size 34 | par.dz = 1 35 | 36 | # Compute APR and sample particle values 37 | apr, parts = pyapr.converter.get_apr_interactive(img, params=par, verbose=True, slider_decimals=1) 38 | 39 | # Display the APR 40 | pyapr.viewer.parts_viewer(apr, parts) 41 | 42 | # Write the resulting APR to file 43 | print("Writing APR to file ... \n") 44 | fpath_apr = io_int.save_apr_file_name() # get path through gui 45 | pyapr.io.write(fpath_apr, apr, parts) 46 | 47 | if fpath_apr: 48 | # Display the size of the file 49 | file_sz = os.path.getsize(fpath_apr) 50 | print("APR File Size: {:7.2f} MB \n".format(file_sz * 1e-6)) 51 | 52 | # Compute compression ratio 53 | mcr = os.path.getsize(fpath) / file_sz 54 | print("Memory Compression Ratio: {:7.2f}".format(mcr)) 55 | -------------------------------------------------------------------------------- /pyapr/tree/tree_ops.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.tree import sample_from_tree as _sample_from_tree 2 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, FloatParticles, LongParticles 3 | from .._common import _check_input, _check_input_tree 4 | from typing import Union 5 | 6 | ParticleData = Union[ByteParticles, ShortParticles, FloatParticles, LongParticles] 7 | 8 | 9 | def sample_from_tree(apr: APR, 10 | parts: ParticleData, 11 | tree_parts: ParticleData, 12 | num_levels: int = 1, 13 | in_place: bool = False): 14 | """ 15 | Coarsen particle values by sampling from parent nodes in the APR tree. Optionally further coarsen the finest 16 | particle values by setting `num_levels>1`. 17 | 18 | Parameters 19 | ---------- 20 | apr: APR 21 | Input APR data structure. 22 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 23 | Input APR particle values. 24 | tree_parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 25 | Input APR tree values. Must either be of type FloatParticles or the same type as ``parts``. 26 | num_levels: int 27 | Sample values from level ``apr.level_max()-num_levels``. If ``num_levels=1``, each particle takes the value 28 | of its parent node in the APR tree. If ``num_levels>1``, the ``num_levels-1`` finest tree levels are coarsened 29 | prior to re-sampling particle values. Thus, ``num_levels`` sets the maximum resolution of the sampling. (Default: 1) 30 | in_place: bool 31 | If True, both ``parts`` and ``tree_parts`` are modified in-place. (Default: False) 32 | 33 | Returns 34 | ------- 35 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 36 | The resampled particle values. 37 | """ 38 | _check_input(apr, parts) 39 | _check_input_tree(apr, tree_parts, (FloatParticles, type(parts))) 40 | parts = parts if in_place else parts.copy() 41 | 42 | if num_levels <= 0: 43 | return parts 44 | 45 | tree_parts = tree_parts if in_place else tree_parts.copy() 46 | _sample_from_tree(apr, parts, tree_parts, num_levels-1) 47 | return parts 48 | -------------------------------------------------------------------------------- /pyapr/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Content-adaptive storage and processing of volumetric images in Python. 3 | 4 | `pyapr` is a collection of tools and algorithms to convert images to and from the 5 | Adaptive Particle Representation (APR), as well as to manipulate and view APR 6 | images. The base namespace `pyapr` holds a number of data container classes 7 | (see data_containers), while functions for generating, viewing and processing APR 8 | images are imported via submodules: 9 | 10 | converter 11 | Conversion from pixel images to APR. 12 | data_containers 13 | Base classes used by the package. 14 | filter 15 | Spatial convolution and filters for APR images. 16 | io 17 | Reading and writing APR images. 18 | measure 19 | Measurement of object properties, mainly using label images. 20 | morphology 21 | Morphological operations, e.g. dilation and erosion, removing small objects or holes. 22 | reconstruction 23 | Reconstruction of pixel values from APR images, in all or parts of the volume and at different resolutions. 24 | restoration 25 | Restoration algorithms for APR images (currently only deconvolution). 26 | segmentation 27 | Segmentation algorithms for APR images (currently only graphcut). 28 | transform 29 | Transforms for APR images (currently only maximum projection) 30 | tree 31 | Computing interior tree values from APR particles, used in many multi-resolution functions (e.g. viewers). 32 | utils 33 | Utility functions for handling files and data types of APR classes. 34 | viewer 35 | Visualization methods for APR images, e.g. slice viewer and raycast rendering. 36 | """ 37 | 38 | try: 39 | from ._version import version as __version__ 40 | except ImportError: 41 | __version__ = "not-installed" 42 | 43 | from .data_containers import * 44 | from . import converter 45 | from . import filter 46 | from . import io 47 | from . import measure 48 | from . import morphology 49 | from . import reconstruction 50 | from . import restoration 51 | from . import segmentation 52 | from . import transform 53 | from . import tree 54 | from . import utils 55 | from . import viewer 56 | 57 | 58 | try: 59 | from _pyaprwrapper import __cuda_build__ 60 | except ImportError: 61 | __cuda_build__ = False 62 | 63 | 64 | def cuda_enabled() -> bool: 65 | """Returns True if pyapr was built with CUDA support, and False otherwise.""" 66 | return __cuda_build__ 67 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindAPR.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Joel Jonsson on 29.06.18. 3 | // 4 | 5 | #ifndef PYLIBAPR_PYAPR_HPP 6 | #define PYLIBAPR_PYAPR_HPP 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | namespace py = pybind11; 16 | 17 | PYBIND11_MAKE_OPAQUE(std::vector) 18 | 19 | void AddAPR(pybind11::module &m, const std::string &modulename) { 20 | 21 | using namespace py::literals; 22 | 23 | py::class_(m, modulename.c_str()) 24 | .def(py::init()) 25 | .def("__repr__", [](APR& a) { 26 | return "APR(shape [" + std::to_string(a.org_dims(2)) + ", " + std::to_string(a.org_dims(1)) + 27 | ", " + std::to_string(a.org_dims(0)) + "], " + std::to_string(a.total_number_particles()) + " particles)";}) 28 | .def_readwrite("name", &APR::name) 29 | .def("total_number_particles", &APR::total_number_particles, "return number of particles") 30 | .def("total_number_tree_particles", &APR::total_number_tree_particles, "return number of interior tree particles") 31 | .def("level_min", &APR::level_min, "return the minimum resolution level") 32 | .def("level_max", &APR::level_max, "return the maximum resolution level") 33 | .def("x_num", &APR::x_num, "Gives the maximum bounds in the x direction for the given level", "level"_a) 34 | .def("y_num", &APR::y_num, "Gives the maximum bounds in the y direction for the given level", "level"_a) 35 | .def("z_num", &APR::z_num, "Gives the maximum bounds in the z direction for the given level", "level"_a) 36 | .def("iterator", &APR::iterator, "Return a linear iterator for APR particles") 37 | .def("tree_iterator", &APR::tree_iterator, "Return a linear iterator for interior APRTree particles") 38 | .def("org_dims", &APR::org_dims, "returns the original image size in a specified dimension (y, x, z)" , "dim"_a) 39 | .def("shape", [](APR& self){return py::make_tuple(self.org_dims(2), self.org_dims(1), self.org_dims(0));}, "returns the original pixel image dimensions as a tuple (z, x, y)") 40 | .def("get_parameters", &APR::get_apr_parameters, "return the parameters used to create the APR") 41 | .def("computational_ratio", &APR::computational_ratio, "return the computational ratio (number of pixels in original image / number of particles in the APR)"); 42 | 43 | py::bind_vector>(m, "APRPtrVector", py::module_local(false)); 44 | } 45 | 46 | 47 | #endif //PYLIBAPR_PYAPR_HPP 48 | -------------------------------------------------------------------------------- /pyapr/tests/test_tree.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr 4 | import numpy as np 5 | 6 | PARTICLE_TYPES = [ 7 | pyapr.ByteParticles, 8 | pyapr.ShortParticles, 9 | pyapr.FloatParticles, 10 | pyapr.LongParticles 11 | ] 12 | 13 | 14 | def _check_mean(apr, tree_parts, recon): 15 | """ 16 | compare tree particles to np.mean() at a single location 17 | """ 18 | it = apr.tree_iterator() 19 | idx = apr.total_number_tree_particles() - 10 20 | level, z_l, x_l, y_l = it.find_coordinates(idx) 21 | level_size = 2 ** (apr.level_max() - level) 22 | z = z_l * level_size 23 | x = x_l * level_size 24 | y = y_l * level_size 25 | assert tree_parts[idx] == np.mean(recon[z:z+level_size, x:x+level_size, y:y+level_size]) 26 | 27 | 28 | 29 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 30 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 31 | def test_fill_tree(parts_type, ndim): 32 | apr, parts = load_test_apr(ndim) 33 | parts = parts_type(parts) 34 | recon = pyapr.reconstruction.reconstruct_constant(apr, parts) 35 | 36 | tree_parts = pyapr.tree.fill_tree_mean(apr, parts, output=pyapr.FloatParticles()) 37 | _check_mean(apr, tree_parts, recon) 38 | 39 | tree_parts = pyapr.tree.fill_tree_max(apr, parts, output=parts_type()) 40 | assert tree_parts[0] == parts.max() 41 | 42 | tree_parts = pyapr.tree.fill_tree_min(apr, parts, output=parts_type()) 43 | assert tree_parts[0] == parts.min() 44 | 45 | for op in (pyapr.tree.fill_tree_mean, pyapr.tree.fill_tree_min, pyapr.tree.fill_tree_max): 46 | # uninitialized input 47 | with pytest.raises(ValueError): 48 | tree_parts = op(pyapr.APR(), pyapr.ShortParticles()) 49 | 50 | # unsupported output type 51 | with pytest.raises(TypeError): 52 | out = pyapr.ByteParticles() if not isinstance(parts, pyapr.ByteParticles) else pyapr.ShortParticles() 53 | tree_parts = op(apr, parts, output=out) 54 | 55 | 56 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 57 | def test_sample_from_tree(parts_type): 58 | apr, parts = load_test_apr(3) 59 | parts = parts_type(parts) 60 | tree_parts = pyapr.tree.fill_tree_mean(apr, parts) 61 | 62 | res = pyapr.tree.sample_from_tree(apr, parts, tree_parts, num_levels=0) 63 | assert res == parts 64 | 65 | res = pyapr.tree.sample_from_tree(apr, parts, tree_parts, num_levels=2) 66 | 67 | with pytest.raises(TypeError): 68 | # unsupported tree particles type 69 | wrong_type = pyapr.ShortParticles if not isinstance(parts, pyapr.ShortParticles) else pyapr.LongParticles 70 | res = pyapr.tree.sample_from_tree(apr, parts, wrong_type(tree_parts)) 71 | 72 | with pytest.raises(ValueError): 73 | # tree_parts size mismatch 74 | res = pyapr.tree.sample_from_tree(apr, parts, parts) -------------------------------------------------------------------------------- /demo/apr_iteration_demo.py: -------------------------------------------------------------------------------- 1 | import pyapr 2 | import numpy as np 3 | from time import time 4 | 5 | 6 | """ 7 | This demo implements a piecewise constant reconstruction using the wrapped PyLinearIterator. The Python reconstruction 8 | is timed and compared to the internal C++ version. 9 | 10 | Note: The current Python reconstruction is very slow and needs to be improved. For now, this demo is best used as a 11 | coding example of the loop structure to access particles and their spatial properties. 12 | """ 13 | 14 | io_int = pyapr.utils.InteractiveIO() 15 | fpath_apr = io_int.get_apr_file_name() # get APR file path from gui 16 | 17 | # Read from APR file 18 | apr, parts = pyapr.io.read(fpath_apr) 19 | 20 | # Illustrates the usage of the Python-wrapped linear iterator by computing the piecewise constant reconstruction 21 | start = time() 22 | py_recon = np.empty(apr.shape()) 23 | max_level = apr.level_max() 24 | 25 | apr_it = apr.iterator() # LinearIterator 26 | 27 | # particles at the maximum level coincide with pixels 28 | level = max_level 29 | for z in range(apr_it.z_num(level)): 30 | for x in range(apr_it.x_num(level)): 31 | for idx in range(apr_it.begin(level, z, x), apr_it.end()): 32 | py_recon[z, x, apr_it.y(idx)] = parts[idx] 33 | 34 | # loop over levels up to level_max-1 35 | for level in range(apr_it.level_min(), apr_it.level_max()): 36 | 37 | step_size = 2 ** (max_level - level) # this is the size (in pixels) of the particle cells at level 38 | 39 | for z in range(apr_it.z_num(level)): 40 | for x in range(apr_it.x_num(level)): 41 | for idx in range(apr_it.begin(level, z, x), apr_it.end()): 42 | y = apr_it.y(idx) 43 | 44 | y_start = y * step_size 45 | x_start = x * step_size 46 | z_start = z * step_size 47 | 48 | y_end = min(y_start+step_size, py_recon.shape[2]) 49 | x_end = min(x_start+step_size, py_recon.shape[1]) 50 | z_end = min(z_start+step_size, py_recon.shape[0]) 51 | 52 | py_recon[z_start:z_end, x_start:x_end, y_start:y_end] = parts[idx] 53 | 54 | py_time = time()-start 55 | print('python reconstruction took {} seconds'.format(py_time)) 56 | 57 | # Compare to the c++ reconstruction 58 | start = time() 59 | cpp_recon = pyapr.reconstruction.reconstruct_constant(apr, parts) 60 | cpp_time = time()-start 61 | print('c++ reconstruction took {} seconds'.format(cpp_time)) 62 | print('c++ was {} times faster'.format(py_time / cpp_time)) 63 | 64 | # check that both methods produce the same results (on a subset of the image if it is larger than 128^3 pixels) 65 | zm = min(apr.org_dims(2), 128) 66 | xm = min(apr.org_dims(1), 128) 67 | ym = min(apr.org_dims(0), 128) 68 | 69 | success = np.allclose(py_recon[:zm, :xm, :ym], cpp_recon[:zm, :xm, :ym]) 70 | if not success: 71 | print('Python and C++ reconstructions seem to give different results...') 72 | 73 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | 13 | import os 14 | import sys 15 | sys.path.insert(0, os.path.abspath('../..')) 16 | from pyapr import __version__ as __pyapr_version__ 17 | 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = 'pyapr' 22 | copyright = '2022, Joel Jonsson' 23 | author = 'Joel Jonsson' 24 | 25 | # The full version, including alpha/beta/rc tags 26 | release = __pyapr_version__ 27 | 28 | 29 | # -- General configuration --------------------------------------------------- 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | 'sphinx.ext.autodoc', 36 | 'sphinx.ext.napoleon', 37 | 'sphinx.ext.autosummary', 38 | 'myst_parser' 39 | ] 40 | 41 | # turn on autosummary 42 | autosummary_generate = True 43 | 44 | # autodoc settings 45 | autodoc_member_order = 'bysource' 46 | 47 | # Napoleon settings 48 | napoleon_google_docstring = True 49 | napoleon_numpy_docstring = True 50 | napoleon_include_init_with_doc = True 51 | napoleon_include_private_with_doc = False 52 | napoleon_include_special_with_doc = True 53 | napoleon_use_admonition_for_examples = False 54 | napoleon_use_admonition_for_notes = False 55 | napoleon_use_admonition_for_references = False 56 | napoleon_use_ivar = False 57 | napoleon_use_param = True 58 | napoleon_use_rtype = True 59 | napoleon_preprocess_types = False 60 | napoleon_type_aliases = None 61 | napoleon_attr_annotations = True 62 | 63 | 64 | source_suffix = { 65 | '.rst': 'restructuredtext', 66 | '.md': 'markdown', 67 | } 68 | 69 | # Add any paths that contain templates here, relative to this directory. 70 | templates_path = ['_templates'] 71 | 72 | # List of patterns, relative to source directory, that match files and 73 | # directories to ignore when looking for source files. 74 | # This pattern also affects html_static_path and html_extra_path. 75 | exclude_patterns = [] 76 | 77 | 78 | # -- Options for HTML output ------------------------------------------------- 79 | 80 | # The theme to use for HTML and HTML Help pages. See the documentation for 81 | # a list of builtin themes. 82 | # 83 | html_theme = 'sphinx_rtd_theme' 84 | 85 | # Add any paths that contain custom static files (such as style sheets) here, 86 | # relative to this directory. They are copied after the builtin static files, 87 | # so a file named "default.css" will overwrite the builtin "default.css". 88 | html_static_path = ['_static'] 89 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindParameters.hpp: -------------------------------------------------------------------------------- 1 | #ifndef PYLIBAPR_PYAPRPARAMETERS_HPP 2 | #define PYLIBAPR_PYAPRPARAMETERS_HPP 3 | 4 | #include 5 | 6 | void AddAPRParameters(pybind11::module &m) { 7 | py::class_(m, "APRParameters") 8 | .def(py::init()) 9 | // Commonly used parameters 10 | .def_readwrite("dz", &APRParameters::dz, "Voxel size in dimension z (shape[0] of image array). Default 1.0") 11 | .def_readwrite("dx", &APRParameters::dx, "Voxel size in dimension x (shape[1] of image array). Default 1.0") 12 | .def_readwrite("dy", &APRParameters::dy, "Voxel size in dimension y (shape[2] of image array). Default 1.0") 13 | .def_readwrite("gradient_smoothing", &APRParameters::lambda, 14 | "Degree of smoothing in the B-spline fit used in internal steps " 15 | "(0 -> no smoothing, higher -> more smoothing). Default 3.0") 16 | .def_readwrite("rel_error", &APRParameters::rel_error, "Relative error threshold of the reconstruction condition") 17 | .def_readwrite("Ip_th", &APRParameters::Ip_th, "Regions below this intensity are ignored. Default 0") 18 | .def_readwrite("sigma_th", &APRParameters::sigma_th, "The local intensity scale is clamped from below to this value") 19 | .def_readwrite("grad_th", &APRParameters::grad_th, "Gradients below this value are set to 0") 20 | .def_readwrite("auto_parameters", &APRParameters::auto_parameters, 21 | "If True, compute sigma_th and grad_th using minimum cross entropy thresholding (Li's algorithm). Default False") 22 | 23 | // Debugging and analysis 24 | .def_readwrite("output_steps", &APRParameters::output_steps, 25 | "If True, intermediate steps are saved as tiff files in the directory specified by output_dir. Default False") 26 | .def_readwrite("output_dir", &APRParameters::output_dir, 27 | "Output directory for intermediate steps (only used if output_steps is True)") 28 | 29 | // Additional parameters (can usually be kept at default values) 30 | .def_readwrite("psfx", &APRParameters::psfx, "Affects local intensity scale window size") 31 | .def_readwrite("psfy", &APRParameters::psfy, "Affects local intensity scale window size") 32 | .def_readwrite("psfz", &APRParameters::psfz, "Affects local intensity scale window size") 33 | .def_readwrite("neighborhood_optimization", &APRParameters::neighborhood_optimization) 34 | .def_readwrite("sigma_th_max", &APRParameters::sigma_th_max) 35 | .def_readwrite("noise_sd_estimate", &APRParameters::noise_sd_estimate) 36 | .def_readwrite("background_intensity_estimate", &APRParameters::background_intensity_estimate) 37 | .def_readwrite("name", &APRParameters::name, "Name of the APR") 38 | .def_readwrite("input_image_name", &APRParameters::input_image_name) 39 | .def_readwrite("input_dir", &APRParameters::input_dir) 40 | .def_readwrite("mask_file", &APRParameters::mask_file); 41 | } 42 | 43 | #endif //PYLIBAPR_PYAPRPARAMETERS_HPP 44 | -------------------------------------------------------------------------------- /pyapr/transform/projection.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.transform import * 2 | from _pyaprwrapper.data_containers import APR, ReconPatch, ByteParticles, ShortParticles, FloatParticles, LongParticles 3 | from .._common import _check_input 4 | import numpy as np 5 | from warnings import warn 6 | from typing import Optional, Union 7 | 8 | 9 | def maximum_projection(apr: APR, 10 | parts: Union[ByteParticles, ShortParticles, FloatParticles, LongParticles], 11 | dim: int, 12 | patch: Optional[ReconPatch] = None, 13 | method: str = 'auto'): 14 | """ 15 | Compute the maximum intensity projection along an axis. 16 | 17 | Note: assumes that all particle values are non-negative 18 | 19 | Parameters 20 | ---------- 21 | apr: APR 22 | Input APR data structure 23 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 24 | Input particle intensities 25 | dim: int 26 | Dimension along which to compute the projection: 27 | 28 | - ``dim=0``: project along Y to produce a ZX plane 29 | - ``dim=1``: project along X to produce a ZY plane 30 | - ``dim=2``: project along Z to produce an XY plane 31 | patch: ReconPatch, optional 32 | If provided, projects only within the image region specified by ``patch``. Otherwise projects 33 | through the entire volume. (default: None) 34 | method: str 35 | Specify the projection algorithm (results are identical, but performance may differ). Supported arguments 36 | are 'auto', 'direct' and 'pyramid'. 37 | 38 | Returns 39 | ------- 40 | out : numpy.ndarray 41 | The computed maximum intensity projection 42 | """ 43 | if dim not in (0, 1, 2): 44 | raise ValueError(f'\'dim\' must be 0, 1 or 2 corresponding to projection along y, x or z. Got {dim}') 45 | _check_input(apr, parts) 46 | args = (apr, parts) 47 | 48 | if patch is not None: 49 | if patch.level_delta != 0: 50 | warn('max projection is not yet implemented for patch.level_delta != 0. ' 51 | 'Proceeding with level_delta = 0.', RuntimeWarning) 52 | 53 | # temporarily set level_delta to 0 TODO: make it allow non-zero level delta 54 | tmp = patch.level_delta 55 | patch.level_delta = 0 56 | if not patch.check_limits(apr): 57 | raise ValueError(f'Invalid patch {patch}') 58 | patch.level_delta = tmp 59 | args += (patch,) 60 | 61 | if dim == 0: 62 | return np.array(max_projection_y(*args), copy=False).squeeze() if method in ('direct', 'auto') \ 63 | else np.array(max_projection_y_alt(*args), copy=False).squeeze() 64 | elif dim == 1: 65 | return np.array(max_projection_x(*args), copy=False).squeeze() if method == 'direct' \ 66 | else np.array(max_projection_x_alt(*args), copy=False).squeeze() 67 | else: 68 | return np.array(max_projection_z(*args), copy=False).squeeze() if method == 'direct' \ 69 | else np.array(max_projection_z_alt(*args), copy=False).squeeze() 70 | -------------------------------------------------------------------------------- /pyapr/tests/test_particledata.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | import numpy as np 4 | from numbers import Number 5 | 6 | 7 | PARTICLE_TYPES = [ 8 | np.uint8, 9 | np.uint16, 10 | np.uint64, 11 | np.float32 12 | ] 13 | 14 | 15 | def _generate_particles(dtype, size, val): 16 | res = pyapr.utils.type_to_particles(dtype) 17 | res.resize(size) 18 | res.fill(dtype(val)) 19 | return res 20 | 21 | 22 | def _add_inplace(a, b): 23 | a += b 24 | return a 25 | 26 | def _sub_inplace(a, b): 27 | a -= b 28 | return a 29 | 30 | def _mul_inplace(a, b): 31 | a *= b 32 | return a 33 | 34 | 35 | def _test_inplace_op(p1, p2, op, gt_op): 36 | dtype1 = pyapr.utils.particles_to_type(p1) 37 | if isinstance(p2, Number): 38 | val = dtype1(gt_op(dtype1(p1[0]), p2)) 39 | else: 40 | dtype2 = pyapr.utils.particles_to_type(p2) 41 | val = dtype1(gt_op(dtype1(p1[0]), dtype2(p2[0]))) 42 | res = op(p1, p2) 43 | assert pyapr.utils.particles_to_type(res) is dtype1 44 | assert np.allclose(np.array(res), val) 45 | 46 | def _test_op(p1, p2, op): 47 | dtype1 = pyapr.utils.particles_to_type(p1) 48 | if isinstance(p2, Number): 49 | out_type = dtype1 50 | val = out_type(op(dtype1(p1[0]), p2)) 51 | else: 52 | dtype2 = pyapr.utils.particles_to_type(p2) 53 | # output type should be float32 if one input is FloatParticles, otherwise the largest input integer type 54 | out_type = np.float32 if np.float32 in (dtype1, dtype2) else type(dtype1(p1[0]) + dtype2(p2[0])) 55 | val = out_type(op(dtype1(p1[0]), dtype2(p2[0]))) 56 | res = op(p1, p2) 57 | assert pyapr.utils.particles_to_type(res) is out_type 58 | assert np.allclose(np.array(res), val) 59 | 60 | 61 | @pytest.mark.parametrize("p1_type", PARTICLE_TYPES) 62 | @pytest.mark.parametrize("p2_type", PARTICLE_TYPES) 63 | def test_particle_arithmetic(p1_type, p2_type): 64 | p1 = _generate_particles(p1_type, 17, 5.3) 65 | p2 = _generate_particles(p2_type, 17, 2.9) 66 | 67 | # compare two ParticleData objects 68 | assert p1 != p2 69 | assert p1 == p1.copy() 70 | assert p2 == p2.copy() 71 | 72 | # compare ParticleData to scalar 73 | assert np.all(np.array(p1 > 4)) and not np.any(np.array(p1 > p1_type(5.3))) 74 | assert np.all(np.array(p1 < 7)) and not np.any(np.array(p1 < p1_type(5.3))) 75 | assert np.all(np.array(p1 == p1_type(5.3))) 76 | assert np.all(np.array(p1 >= p1_type(5.3))) 77 | assert np.all(np.array(p1 <= p1_type(5.3))) 78 | assert np.all(np.array(p1 != 13)) 79 | 80 | # in-place arithmetic operations 81 | _test_inplace_op(p1, p2, _add_inplace, lambda x, y: x + y) 82 | _test_inplace_op(p1, p2, _sub_inplace, lambda x, y: x - y) 83 | _test_inplace_op(p1, p2, _mul_inplace, lambda x, y: x * y) 84 | _test_inplace_op(p1, 4.2, _add_inplace, lambda x, y: x + y) 85 | _test_inplace_op(p1, 1.9, _sub_inplace, lambda x, y: x - y) 86 | _test_inplace_op(p1, 1.8, _mul_inplace, lambda x, y: x * y) 87 | 88 | _test_op(p1, p2, lambda x, y: x + y) 89 | _test_op(p1, p2, lambda x, y: x - y) 90 | _test_op(p1, p2, lambda x, y: x * y) 91 | _test_op(p1, 6.7, lambda x, y: x + y) 92 | _test_op(p1, 5.9, lambda x, y: x - y) 93 | _test_op(p1, 2.1, lambda x, y: x * y) 94 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | env: 2 | global: 3 | - CIBW_BUILD="cp36-* cp37-* cp38-* cp39-*" 4 | - CIBW_BEFORE_BUILD_LINUX="apt update && apt install -y libtiff5-dev libhdf5-dev" 5 | - CIBW_ARCHS="auto64" 6 | - CIBW_MANYLINUX_X86_64_IMAGE="manylinux_2_24" 7 | - TWINE_USERNAME=__token__ 8 | - CIBW_BUILD_VERBOSITY=3 9 | - CIBW_REPAIR_WHEEL_COMMAND_MACOS="pip uninstall -y delocate && pip install git+https://github.com/Chia-Network/delocate.git && delocate-listdeps {wheel} && delocate-wheel -w {dest_dir} -v {wheel}" 10 | - CIBW_BEFORE_TEST="pip install -r requirements.txt" 11 | - CIBW_TEST_COMMAND="python3 -m unittest" 12 | 13 | matrix: 14 | include: 15 | - language: python 16 | os: linux 17 | sudo: required 18 | python: '3.7' 19 | services: 20 | - docker 21 | install: 22 | - python3 -m pip install cibuildwheel==1.10.0 23 | script: 24 | - python3 -m cibuildwheel --output-dir wheelhouse 25 | after_success: 26 | - python3 -m pip install twine 27 | - python3 -m twine upload --skip-existing --repository testpypi wheelhouse/*.whl 28 | 29 | - os: osx 30 | osx_image: xcode11.4 31 | language: cpp 32 | addons: 33 | homebrew: 34 | packages: 35 | - c-blosc 36 | - cmake 37 | - llvm 38 | - libomp 39 | install: 40 | - brew upgrade cmake 41 | - python3 -m pip install cibuildwheel==1.10.0 42 | script: 43 | - python3 -m cibuildwheel --output-dir wheelhouse 44 | after_success: 45 | - python3 -m pip install twine 46 | - python3 -m twine upload --skip-existing --repository testpypi wheelhouse/*.whl 47 | 48 | - os: windows 49 | language: bash 50 | cache: 51 | directories: 52 | - $HOME/AppData/Local/Temp/chocolatey 53 | - $HOME/AppData/Local/vcpkg/archives 54 | #- /C/ProgramData/chocolatey/bin 55 | #- /C/ProgramData/chocolatey/lib 56 | before_install: 57 | - export VCPKG_FEATURE_FLAGS="binarycaching" 58 | - export EXTRA_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=/c/Users/travis/build/AdaptiveParticles/PyLibAPR/vcpkg/scripts/buildsystems/vcpkg.cmake" 59 | install: 60 | - choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' #need cmake > 3.18 61 | - travis_wait 10 choco install visualstudio2019buildtools --params "--add Microsoft.Component.MSBuild --add Microsoft.VisualStudio.Component.VC.Llvm.Clang --add Microsoft.VisualStudio.Component.VC.Llvm.ClangToolset --add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Llvm.Clang --add Microsoft.VisualStudio.Component.Windows10SDK.19041 --add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 --add Microsoft.VisualStudio.ComponentGroup.UWP.VC.BuildTools" 62 | - travis_wait 20 sh travis_windows_setup.sh 63 | - choco install python3 -y --version 3.8.6 64 | - export PATH="/c/Python38:/c/Python38/Scripts:$PATH" 65 | - ln -s /c/Python38/python.exe /c/Python38/python3.exe 66 | - python3 -m pip install cibuildwheel==1.10.0 67 | script: 68 | - python3 -m cibuildwheel --output-dir dist 69 | after_success: 70 | - py -m pip install wheel 71 | - py -m pip install delvewheel 72 | - py fix_windows_wheel.py 73 | - py -m pip install twine 74 | - py -m twine upload --skip-existing --repository testpypi dist/wheelhouse/*.whl -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import sys 4 | import subprocess 5 | from setuptools import setup, Extension 6 | from setuptools.command.build_ext import build_ext 7 | 8 | 9 | # Convert distutils Windows platform specifiers to CMake -A arguments 10 | PLAT_TO_CMAKE = { 11 | "win32": "Win32", 12 | "win-amd64": "x64", 13 | "win-arm32": "ARM", 14 | "win-arm64": "ARM64", 15 | } 16 | 17 | 18 | # A CMakeExtension needs a sourcedir instead of a file list. 19 | # The name must be the _single_ output extension from the CMake build. 20 | # If you need multiple extensions, see scikit-build. 21 | class CMakeExtension(Extension): 22 | def __init__(self, name, sourcedir=""): 23 | Extension.__init__(self, name, sources=[]) 24 | self.sourcedir = os.path.abspath(sourcedir) 25 | 26 | 27 | class CMakeBuild(build_ext): 28 | def build_extension(self, ext): 29 | extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) 30 | 31 | # required for auto-detection of auxiliary "native" libs 32 | if not extdir.endswith(os.path.sep): 33 | extdir += os.path.sep 34 | 35 | build_type = "Debug" if self.debug else "Release" 36 | 37 | build_args = ['--config', build_type] 38 | 39 | # CMake lets you override the generator - we need to check this. 40 | # Can be set with Conda-Build, for example. 41 | cmake_generator = os.environ.get("CMAKE_GENERATOR", "") 42 | 43 | # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON 44 | # EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code 45 | # from Python. 46 | 47 | cmake_args = [ 48 | "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir), 49 | "-DPython_EXECUTABLE={}".format(sys.executable), 50 | "-DCMAKE_BUILD_TYPE={}".format(build_type), # not used on MSVC, but no harm 51 | ] 52 | 53 | # Pass CMake arguments via the environment variable 'EXTRA_CMAKE_ARGS' 54 | cmake_args.extend([x for x in os.environ.get('EXTRA_CMAKE_ARGS', '').split(' ') if x]) 55 | 56 | if self.compiler.compiler_type == "msvc": 57 | 58 | # Must provide -DCMAKE_TOOLCHAIN_FILE=C:/path/to/vcpkg/scripts/buildsystems/vcpkg.cmake 59 | # via environment variable EXTRA_CMAKE_ARGS 60 | cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]] 61 | cmake_args += ["-DVCPKG_TARGET_TRIPLET=x64-windows"] 62 | cmake_args += ["-T"] 63 | cmake_args += ["ClangCL"] 64 | 65 | if not os.path.exists(self.build_temp): 66 | os.makedirs(self.build_temp) 67 | 68 | print("******************************************************************************") 69 | 70 | print( ext.sourcedir) 71 | print("*******************************CMAKE ARGS***********************************************") 72 | print(cmake_args) 73 | print( self.build_temp) 74 | 75 | print(["cmake", ext.sourcedir] + cmake_args) 76 | 77 | subprocess.check_call( 78 | ["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp 79 | ) 80 | subprocess.check_call( 81 | ["cmake", "--build", ".", "--parallel", "4"] + build_args, cwd=self.build_temp 82 | ) 83 | 84 | 85 | 86 | setup( 87 | ext_modules=[CMakeExtension('_pyaprwrapper')], 88 | cmdclass={ 89 | 'build_ext': CMakeBuild, 90 | } 91 | ) 92 | -------------------------------------------------------------------------------- /pyapr/tests/test_io.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr 4 | 5 | APR_SHAPES = [ 6 | (1, 1, 63), 7 | (1, 63, 63), 8 | (65, 61, 63) 9 | ] 10 | 11 | PARTICLE_TYPES = [ 12 | pyapr.ByteParticles, 13 | pyapr.ShortParticles, 14 | pyapr.FloatParticles, 15 | pyapr.LongParticles 16 | ] 17 | 18 | 19 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 20 | def test_read_write(tmp_path, ndim: int): 21 | # read data 22 | apr, parts = load_test_apr(ndim) 23 | assert apr.shape() == APR_SHAPES[ndim-1] 24 | assert len(parts) == apr.total_number_particles() 25 | 26 | tree_parts = pyapr.FloatParticles(apr.total_number_tree_particles()) 27 | tree_parts.fill(1) 28 | 29 | # write data to temporary file 30 | fpath = str(tmp_path / "tmp.apr") 31 | pyapr.io.write(fpath, apr, parts, tree_parts=tree_parts) 32 | 33 | # read newly written data 34 | apr2, parts2 = pyapr.io.read(fpath) 35 | tree_parts2 = pyapr.io.read_particles(fpath, apr, tree_parts, tree=True) 36 | 37 | assert apr2.shape() == apr.shape() 38 | assert apr2.total_number_particles() == apr.total_number_particles() 39 | assert parts2 == parts 40 | assert tree_parts2 == tree_parts 41 | 42 | # read/write APR only 43 | pyapr.io.write_apr(fpath, apr) 44 | apr2 = pyapr.io.read_apr(fpath) 45 | assert apr2.shape() == apr.shape() 46 | assert apr2.total_number_particles() == apr.total_number_particles() 47 | 48 | # empty path -> nothing written 49 | pyapr.io.write('', apr, parts) 50 | pyapr.io.write_apr('', apr) 51 | 52 | with pytest.raises(ValueError): 53 | pyapr.io.write(fpath, pyapr.APR(), parts) 54 | 55 | with pytest.raises(ValueError): 56 | pyapr.io.write(fpath, apr, pyapr.FloatParticles()) 57 | 58 | with pytest.raises(ValueError): 59 | pyapr.io.read('file-does-not-exist.apr') 60 | 61 | with pytest.raises(ValueError): 62 | pyapr.io.read_apr('file-does-not-exist.apr') 63 | 64 | 65 | 66 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 67 | def test_read_write_particles(tmp_path, parts_type): 68 | # generate particle data 69 | parts = parts_type(100) 70 | parts.fill(113) 71 | parts2 = parts + 15 72 | 73 | # write to file 74 | fpath = str(tmp_path / "tmp.apr") 75 | pyapr.io.write_particles(fpath, parts, append=False) 76 | pyapr.io.write_particles(fpath, parts2, tree=True, parts_name='non_default_name', append=True) 77 | 78 | # read newly written data 79 | wparts = pyapr.io.read_particles(fpath) 80 | wparts2 = pyapr.io.read_particles(fpath, tree=True, parts_name='non_default_name') 81 | 82 | assert wparts == parts 83 | assert wparts2 == parts2 84 | 85 | # empty path -> nothing written 86 | pyapr.io.write_particles('', parts) 87 | 88 | # test particle name and type detection 89 | pname = pyapr.io.get_particle_names(fpath) 90 | assert len(pname) == 1 and pname[0] == 'particles' 91 | ptype = pyapr.io.get_particle_type(fpath, parts_name=pname[0]) 92 | assert isinstance(pyapr.utils.type_to_particles(ptype), type(parts)) 93 | 94 | pname = pyapr.io.get_particle_names(fpath, tree=True) 95 | assert len(pname) == 1 and pname[0] == 'non_default_name' 96 | ptype = pyapr.io.get_particle_type(fpath, parts_name=pname[0], tree=True) 97 | assert isinstance(pyapr.utils.type_to_particles(ptype), type(parts)) 98 | 99 | with pytest.raises(ValueError): 100 | pyapr.io.write_particles(fpath, pyapr.FloatParticles()) 101 | 102 | with pytest.raises(ValueError): 103 | pyapr.io.read_particles('file-does-not-exist.apr') 104 | -------------------------------------------------------------------------------- /pyapr/utils/types.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import ByteParticles, ShortParticles, FloatParticles, LongParticles, IntParticles, \ 2 | LazyDataByte, LazyDataShort, LazyDataFloat, LazyDataLong 3 | import numpy as np 4 | from typing import Union 5 | 6 | 7 | ParticleData = Union[ByteParticles, ShortParticles, LongParticles, FloatParticles] 8 | LazyData = Union[LazyDataByte, LazyDataShort, LazyDataLong, LazyDataFloat] 9 | 10 | 11 | def type_to_particles(typespec: Union[str, type]) -> ParticleData: 12 | """ 13 | Returns a ParticleData object of the specified type. 14 | 15 | Parameters 16 | ---------- 17 | typespec: str or type 18 | String specifying the data type. Valid types are ``uint8``, ``uint16``, ``uint64``, ``float`` or corresponding 19 | numpy types. 20 | 21 | Returns 22 | ------- 23 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 24 | ParticleData of the specified type (if valid). 25 | """ 26 | 27 | if typespec in ('uint16', np.uint16): 28 | return ShortParticles() 29 | if typespec in ('float', 'float32', np.float32): 30 | return FloatParticles() 31 | if typespec in ('uint8', np.uint8): 32 | return ByteParticles() 33 | if typespec in ('uint64', np.uint64): 34 | return LongParticles() 35 | if typespec in (int, 'int', 'int32', np.int32): 36 | return IntParticles() 37 | raise ValueError(f'Type {typespec} is currently not supported. Valid types are \'uint8\', \'uint16\', ' 38 | f'\'uint64\', \'int\' and \'float\'') 39 | 40 | 41 | def type_to_lazy_particles(typespec: Union[str, type]) -> LazyData: 42 | """ 43 | Returns a LazyData object of the specified type. 44 | 45 | Parameters 46 | ---------- 47 | typespec: str or type 48 | String specifying the data type. Valid types are ``uint8``, ``uint16``, ``uint64``, ``float`` or corresponding 49 | numpy types. 50 | 51 | Returns 52 | ------- 53 | parts: LazyDataByte, LazyDataShort, LazyDataLong or LazyDataFloat 54 | LazyData of the specified type (if valid). 55 | """ 56 | 57 | if typespec in ('uint16', np.uint16): 58 | return LazyDataShort() 59 | if typespec in ('float', 'float32', np.float32): 60 | return LazyDataFloat() 61 | if typespec in ('uint8', np.uint8): 62 | return LazyDataByte() 63 | if typespec in ('uint64', np.uint64): 64 | return LazyDataLong() 65 | raise ValueError(f'Type {typespec} is currently not supported. Valid types are \'uint8\', \'uint16\', ' 66 | f'\'uint64\' and \'float\' (\'float32\')') 67 | 68 | 69 | def particles_to_type(parts: Union[ParticleData, LazyData]) -> type: 70 | """ 71 | Returns the numpy dtype corresponding to a ParticleData or LazyData object. 72 | 73 | Parameters 74 | ---------- 75 | parts: ByteParticles, ShortParticles, LongParticles, FloatParticles, LazyDataByte, LazyDataShort, LazyDataLong or LazyDataFloat 76 | Particle data object. 77 | 78 | Returns 79 | ------- 80 | output: type 81 | numpy type corresponding to the data type of the input object. 82 | """ 83 | 84 | if isinstance(parts, (ShortParticles, LazyDataShort)): 85 | return np.uint16 86 | if isinstance(parts, (FloatParticles, LazyDataFloat)): 87 | return np.float32 88 | if isinstance(parts, (ByteParticles, LazyDataByte)): 89 | return np.uint8 90 | if isinstance(parts, (LongParticles, LazyDataLong)): 91 | return np.uint64 92 | if isinstance(parts, IntParticles): 93 | return np.int32 94 | raise TypeError(f'Input must be of type {ParticleData} or {LazyData} ({type(parts)} was provided)') 95 | -------------------------------------------------------------------------------- /demo/get_apr_by_block_interactive_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyapr 3 | import tifffile 4 | 5 | 6 | """ 7 | Interactive APR conversion of large images. Reads in a block of z-slices for interactive setting of the 8 | following parameters: 9 | Ip_th (background intensity level) 10 | sigma_th (local intensity scale threshold) 11 | grad_th (gradient threshold) 12 | 13 | Use the sliders to control the adaptation. The red overlay shows (approximately) the regions that will be fully 14 | resolved (at pixel resolution). 15 | 16 | Once the parameters are set, the entire image is processed in overlapping blocks of z-slices. The size of the 17 | blocks, and the overlap, can be set in the code below to control the memory consumption. 18 | 19 | Note: The effect of grad_th may hide the effect of the other thresholds. It is thus recommended to keep grad_th 20 | low while setting Ip_th and sigma_th, and then increasing grad_th. 21 | """ 22 | 23 | # Read in an image 24 | io_int = pyapr.utils.InteractiveIO() 25 | fpath = io_int.get_tiff_file_name() # get image file path from gui (data type must be float32 or uint16) 26 | 27 | # Specify the z-range to be used to set the parameters 28 | z_start = 0 29 | z_end = 256 30 | 31 | # Read slice range into numpy array 32 | with tifffile.TiffFile(fpath) as tif: 33 | img = tif.asarray(key=slice(z_start, z_end)) 34 | 35 | # Set some parameters (only Ip_th, grad_th and sigma_th are set interactively) 36 | par = pyapr.APRParameters() 37 | par.rel_error = 0.1 # relative error threshold 38 | par.gradient_smoothing = 3 # b-spline smoothing parameter for gradient estimation 39 | # 0 = no smoothing, higher = more smoothing 40 | par.dx = 1 41 | par.dy = 1 # voxel size 42 | par.dz = 1 43 | 44 | # Interactively set the threshold parameters using the partial image 45 | par = pyapr.converter.find_parameters_interactive(img, params=par, verbose=True, slider_decimals=1) 46 | 47 | del img # Parameters found, we don't need the partial image anymore 48 | 49 | # par.input_dir + par.input_image_name must be the path to the image file 50 | par.input_dir = '' 51 | par.input_image_name = fpath 52 | 53 | # Initialize the by-block converter 54 | converter = pyapr.converter.ShortConverterBatch() 55 | converter.set_parameters(par) 56 | converter.verbose = True 57 | 58 | # Parameters controlling the memory usage 59 | converter.z_block_size = 256 # number of z-slices to process in each block during APR conversion 60 | converter.z_ghost_size = 32 # number of ghost slices to use on each side of the blocks 61 | block_size_sampling = 256 # block size for sampling of particle intensities 62 | ghost_size_sampling = 128 # ghost size for sampling of particle intensities 63 | 64 | # Compute the APR 65 | apr = pyapr.APR() 66 | success = converter.get_apr(apr) 67 | 68 | if success: 69 | cr = apr.computational_ratio() 70 | print('APR Conversion successful! Computational ratio (#pixels / #particles) = {}'.format(cr)) 71 | 72 | print('Sampling particle intensity values') 73 | parts = pyapr.ShortParticles() 74 | parts.sample_image_blocked(apr, fpath, block_size_sampling, ghost_size_sampling) 75 | print('Done!') 76 | 77 | # View the result in the by-slice viewer 78 | pyapr.viewer.parts_viewer(apr, parts) 79 | 80 | # Write the resulting APR to file 81 | print("Writing APR to file ... \n") 82 | fpath_apr = io_int.save_apr_file_name() # get path through gui 83 | pyapr.io.write(fpath_apr, apr, parts) 84 | 85 | if fpath_apr: 86 | # Display the size of the file 87 | file_sz = os.path.getsize(fpath_apr) 88 | print("APR File Size: {:7.2f} MB \n".format(file_sz * 1e-6)) 89 | 90 | # Compute compression ratio 91 | mcr = os.path.getsize(fpath) / file_sz 92 | print("Memory Compression Ratio: {:7.2f}".format(mcr)) 93 | 94 | else: 95 | print('Something went wrong...') 96 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | # Installing via pip 2 | 3 | For Windows 10, OSX, and Linux and Python versions 3.6-3.9 direct 4 | installation with OpenMP support should work via [pip]: 5 | ``` 6 | pip install pyapr 7 | ``` 8 | We recommend using a virtual environment (see below). 9 | 10 | CUDA functionality is currently not enabled when installing via pip. 11 | For this, it is necessary to build the package from source. 12 | 13 | # Installing from source 14 | 15 | ## Dependencies 16 | 17 | [LibAPR](https://github.com/AdaptiveParticles/LibAPR) is included as a submodule, and built alongside the wrappers. 18 | This requires the following packages: 19 | 20 | * HDF5 1.8.20 or higher 21 | * OpenMP > 3.0 (optional, but recommended) 22 | * CMake 3.6 or higher 23 | * LibTIFF 4.0 or higher 24 | 25 | pyapr additionally requires Python 3, and the packages listed in [requirements.txt](requirements.txt). 26 | 27 | ### Installing dependencies on Linux 28 | 29 | On Ubuntu, install the `cmake`, `build-essential`, `libhdf5-dev` and `libtiff5-dev` packages (on other distributions, 30 | refer to the documentation there, the package names will be similar). OpenMP support is provided by the GCC compiler 31 | installed as part of the `build-essential` package. 32 | 33 | ### Installing dependencies on OSX 34 | 35 | On OSX, install the `cmake`, `hdf5` and `libtiff` [homebrew](https://brew.sh) packages and have the 36 | [Xcode command line tools](http://osxdaily.com/2014/02/12/install-command-line-tools-mac-os-x/) installed. 37 | If you want to compile with OpenMP support, also install the `llvm` package (this can also be done using homebrew), 38 | as the clang version shipped by Apple currently does not support OpenMP. 39 | 40 | ### Note for windows users 41 | 42 | Please see [LibAPR](https://github.com/AdaptiveParticles/LibAPR) for the latest windows install instructions. 43 | 44 | ## Building 45 | 46 | The repository requires submodules, and needs to be cloned recursively: 47 | 48 | ``` 49 | git clone --recursive https://github.com/AdaptiveParticles/pyapr.git 50 | ``` 51 | 52 | It is recommended to use a virtual environment, such as `virtualenv`. To set this up, use e.g. 53 | 54 | ``` 55 | pip3 install virtualenv 56 | python3 -m virtualenv myenv 57 | source myenv/bin/activate 58 | ``` 59 | 60 | The required Python packages can be installed via the command 61 | ``` 62 | pip install -r requirements.txt 63 | ``` 64 | 65 | Once the dependencies are installed, pyapr can be built via the setup.py script, 66 | e.g. by running 67 | ``` 68 | python setup.py install 69 | ``` 70 | or 71 | ``` 72 | pip install . 73 | ``` 74 | in the root directory of the repository. 75 | 76 | ### CMake build options 77 | 78 | There are two CMake options that can be given to enable or disable OpenMP and CUDA: 79 | 80 | | Option | Description | Default value | 81 | |:--|:--|:--| 82 | | PYAPR_USE_OPENMP | Enable multithreading via OpenMP | ON | 83 | | PYAPR_USE_CUDA | Build available CUDA functionality | OFF | 84 | | PYAPR_PREFER_EXTERNAL_LIBAPR | Use an installed version of LibAPR (if found) rather than building it from submodules | OFF | 85 | 86 | When building via the setup.py script, these options can be set via the environment variable `EXTRA_CMAKE_ARGS`. For example, 87 | ``` 88 | EXTRA_CMAKE_ARGS="-DPYAPR_USE_OPENMP=OFF -DPYAPR_USE_CUDA=OFF" python setup.py install 89 | ``` 90 | will install the package with both OpenMP and CUDA disabled. If building from an installed version of LibAPR in a non-standard 91 | location, help CMake find it, e.g., by passing `-DCMAKE_PREFIX_PATH=/path/to/APR`. Additional compiler options can be 92 | set similarly, e.g. via the variables `CMAKE_CXX_FLAGS` and `CMAKE_CUDA_FLAGS` 93 | 94 | ### OpenMP support on OSX 95 | 96 | To use the homebrew-installed clang for OpenMP support on OSX, modify the call above to 97 | ``` 98 | CPPFLAGS="-I/usr/local/opt/llvm/include" LDFLAGS="-L/usr/local/opt/llvm/lib -Wl,-rpath,/usr/local/opt/llvm/lib" CXX="/usr/local/opt/llvm/bin/clang++" CC="/usr/local/opt/llvm/bin/clang" python setup.py install 99 | ``` -------------------------------------------------------------------------------- /pyapr/tests/test_morphology.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr, load_test_apr_obj 4 | import numpy as np 5 | 6 | PARTICLE_TYPES = [ 7 | pyapr.ByteParticles, 8 | pyapr.ShortParticles, 9 | pyapr.FloatParticles, 10 | pyapr.LongParticles 11 | ] 12 | 13 | MASK_TYPES = [ 14 | pyapr.ByteParticles, 15 | pyapr.ShortParticles, 16 | pyapr.LongParticles 17 | ] 18 | 19 | 20 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 21 | def test_erosion_dilation(parts_type): 22 | apr, parts = load_test_apr_obj() 23 | mask = parts_type(apr.total_number_particles()) 24 | mask.fill(0) 25 | 26 | indices = np.random.randint(apr.total_number_particles(), size=20) 27 | it = apr.iterator() 28 | 29 | for idx in indices: 30 | # set a single particle to 1 and dilate 31 | mask[idx] = 1 32 | tmp = pyapr.morphology.dilation(apr, mask, binary=True, inplace=False) 33 | 34 | # get coordinates to find neighbors 35 | level, z_l, x_l, y_l = it.find_coordinates(idx) 36 | level_size = 2**(it.level_max()-level) 37 | z = z_l * level_size 38 | x = x_l * level_size 39 | y = y_l * level_size 40 | 41 | # ensure neighboring values are 1 42 | if z > 0: 43 | assert tmp[it.find_particle(z - 1, x, y)] == 1 44 | if x > 0: 45 | assert tmp[it.find_particle(z, x - 1, y)] == 1 46 | if y > 0: 47 | assert tmp[it.find_particle(z, x, y - 1)] == 1 48 | if z + level_size < apr.org_dims(2): 49 | assert tmp[it.find_particle(z + level_size, x, y)] == 1 50 | if x + level_size < apr.org_dims(1): 51 | assert tmp[it.find_particle(z, x + level_size, y)] == 1 52 | if y + level_size < apr.org_dims(0): 53 | assert tmp[it.find_particle(z, x, y + level_size)] == 1 54 | 55 | # after eroding the result, only the original particle should be 1 56 | tmp = pyapr.morphology.erosion(apr, tmp, binary=True, inplace=True) 57 | assert tmp[idx] == 1 58 | assert np.sum(np.array(tmp)) == 1 59 | mask[idx] = 0 60 | 61 | 62 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 63 | @pytest.mark.parametrize("binary", [True, False]) 64 | def test_run_morphology_ops(parts_type, binary): 65 | apr, parts = load_test_apr_obj() 66 | parts = parts_type(parts > 101) if binary else parts_type(parts) 67 | 68 | out = pyapr.morphology.opening(apr, parts, binary=binary, radius=2) 69 | out = pyapr.morphology.closing(apr, parts, binary=binary, radius=2) 70 | out = pyapr.morphology.tophat(apr, parts, binary=binary, radius=2) 71 | out = pyapr.morphology.bottomhat(apr, parts, binary=binary, radius=2) 72 | out = pyapr.morphology.find_perimeter(apr, parts) 73 | 74 | 75 | @pytest.mark.parametrize("mask_type", MASK_TYPES) 76 | def test_remove_x_ops(mask_type): 77 | apr, parts = load_test_apr_obj() 78 | mask = parts > 101 79 | cc = pyapr.measure.connected_component(apr, mask, output=mask_type()) 80 | num_obj = cc.max() 81 | labels = set(range(num_obj+1)) 82 | 83 | vol = pyapr.measure.find_label_volume(apr, cc) 84 | threshold = np.min(vol[1:]) + 1 85 | min_label = np.argmin(vol[1:]) + 1 86 | 87 | out = pyapr.morphology.remove_small_objects(apr, cc, min_volume=threshold) 88 | assert min_label not in out 89 | labels.remove(min_label) 90 | max_label = max(labels) 91 | assert max_label in out 92 | 93 | out = pyapr.morphology.remove_large_objects(apr, cc, max_volume=threshold) 94 | assert min_label in out 95 | assert max_label not in out 96 | 97 | out = pyapr.morphology.remove_small_holes(apr, cc, 50) 98 | assert out == cc # there are no holes in the input mask 99 | 100 | out = pyapr.morphology.remove_edge_objects(apr, cc) 101 | assert out == cc # there are no objects on edges in the input mask 102 | 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /pyapr/tree/fill_tree.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.tree import fill_tree_mean as _fill_tree_mean, \ 2 | fill_tree_min as _fill_tree_min, \ 3 | fill_tree_max as _fill_tree_max 4 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, FloatParticles, LongParticles 5 | from .._common import _check_input 6 | from typing import Union, Optional 7 | 8 | 9 | ParticleData = Union[ByteParticles, ShortParticles, FloatParticles, LongParticles] 10 | 11 | 12 | def _check_output_type(parts: ParticleData, output: ParticleData) -> ParticleData: 13 | if output is None: 14 | return type(parts)() 15 | if not isinstance(output, (FloatParticles, type(parts))): 16 | raise TypeError(f'\'output\' must be None, FloatParticles or the same type as the input particles ({type(parts)})') 17 | return output 18 | 19 | 20 | def fill_tree_mean(apr: APR, 21 | parts: ParticleData, 22 | output: Optional[ParticleData] = None) -> ParticleData: 23 | """ 24 | Compute the values of all tree nodes (parent nodes of APR particles) by average downsampling. 25 | 26 | Parameters 27 | ---------- 28 | apr: APR 29 | Input APR data structure. 30 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 31 | Input APR particle values. 32 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles, optional 33 | Particle data object for the output values. If provided, the type must either be FloatParticles 34 | or the same type as ``parts``. If None, generates a new object of the same type as ``parts``. (default: None) 35 | 36 | Returns 37 | ------- 38 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 39 | The computed tree values. 40 | """ 41 | _check_input(apr, parts) 42 | output = _check_output_type(parts, output) 43 | _fill_tree_mean(apr, parts, output) 44 | return output 45 | 46 | 47 | def fill_tree_max(apr: APR, 48 | parts: ParticleData, 49 | output: Optional[ParticleData] = None) -> ParticleData: 50 | """ 51 | Compute the values of all tree nodes (parent nodes of APR particles) by max downsampling. 52 | 53 | Parameters 54 | ---------- 55 | apr: APR 56 | Input APR data structure. 57 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 58 | Input APR particle values. 59 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles, optional 60 | Particle data object for the output values. If provided, the type must either be FloatParticles 61 | or the same type as ``parts``. If None, generates a new object of the same type as ``parts``. (default: None) 62 | 63 | Returns 64 | ------- 65 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 66 | The computed tree values. 67 | """ 68 | _check_input(apr, parts) 69 | output = _check_output_type(parts, output) 70 | _fill_tree_max(apr, parts, output) 71 | return output 72 | 73 | 74 | def fill_tree_min(apr: APR, 75 | parts: ParticleData, 76 | output: Optional[ParticleData] = None) -> ParticleData: 77 | """ 78 | Compute the values of all tree nodes (parent nodes of APR particles) by min downsampling. 79 | 80 | Parameters 81 | ---------- 82 | apr: APR 83 | Input APR data structure. 84 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 85 | Input APR particle values. 86 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles, optional 87 | Particle data object for the output values. If provided, the type must either be FloatParticles 88 | or the same type as ``parts``. If None, generates a new object of the same type as ``parts``. (default: None) 89 | 90 | Returns 91 | ------- 92 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 93 | The computed tree values. 94 | """ 95 | _check_input(apr, parts) 96 | output = _check_output_type(parts, output) 97 | _fill_tree_min(apr, parts, output) 98 | return output 99 | -------------------------------------------------------------------------------- /pyapr/data_containers/src/BindLinearIterator.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Joel Jonsson on 07.05.20. 3 | // 4 | 5 | #ifndef PYLIBAPR_PYLINEARITERATOR_HPP 6 | #define PYLIBAPR_PYLINEARITERATOR_HPP 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "data_structures/APR/APR.hpp" 14 | 15 | namespace py = pybind11; 16 | 17 | auto _find_particle= [](LinearIterator& it, const int z, const int x, const int y) -> uint64_t { 18 | if (z < 0 || z >= it.z_num(it.level_max()) || 19 | x < 0 || x >= it.x_num(it.level_max()) || 20 | y < 0 || y >= it.y_num(it.level_max())) { 21 | throw std::invalid_argument("LinearIterator::find_particle : coordinates (" + std::to_string(z) + 22 | ", " + std::to_string(x) + ", " + std::to_string(y) + ") out of bounds"); 23 | } 24 | for(int level = it.level_min(); level <= it.level_max(); ++level) { 25 | int z_l = z / it.level_size(level); 26 | int x_l = x / it.level_size(level); 27 | int y_l = y / it.level_size(level); 28 | for(it.begin(level, z_l, x_l); it < it.end(); ++it) { 29 | if(it.y() == y_l) { 30 | return it.global_index(); 31 | } 32 | } 33 | } 34 | throw std::runtime_error("no particle found at (" + std::to_string(z) + ", " + std::to_string(x) + 35 | ", " + std::to_string(y) + ")"); 36 | }; 37 | 38 | 39 | auto _find_coordinates = [](LinearIterator& it, const uint64_t idx) -> py::tuple { 40 | 41 | if(idx >= it.total_number_particles()) { 42 | throw std::invalid_argument("index " + std::to_string(idx) + " is out of bounds"); 43 | } 44 | 45 | int level = it.level_min(), z=0, x=0, y=0; 46 | 47 | while(it.particles_level_end(level) <= idx) { 48 | level++; 49 | } 50 | 51 | const int z_num = it.z_num(level); 52 | const int x_num = it.x_num(level); 53 | 54 | it.begin(level, z, x_num-1); 55 | while(it.end() <= idx && z < z_num) { 56 | it.begin(level, ++z, x_num-1); 57 | } 58 | 59 | it.begin(level, z, x); 60 | while(it.end() <= idx && x < x_num) { 61 | it.begin(level, z, ++x); 62 | } 63 | 64 | y = it.get_y(idx); 65 | return py::make_tuple(level, z, x, y); 66 | }; 67 | 68 | 69 | // -------- wrapper ------------------------------------------------- 70 | void AddLinearIterator(pybind11::module &m) { 71 | 72 | using namespace py::literals; 73 | 74 | py::class_(m, "LinearIterator") 75 | .def(py::init()) 76 | .def("total_number_particles", &LinearIterator::total_number_particles, 77 | "return number of particles up to level (by default level = 0 -> all levels)", "level"_a=0) 78 | .def("level_min", &LinearIterator::level_min, "return the minimum resolution level") 79 | .def("level_max", &LinearIterator::level_max, "return the maximum resolution level") 80 | .def("x_num", &LinearIterator::x_num, "Gives the maximum bounds in the x direction for the given level", "level"_a) 81 | .def("y_num", &LinearIterator::y_num, "Gives the maximum bounds in the y direction for the given level", "level"_a) 82 | .def("z_num", &LinearIterator::z_num, "Gives the maximum bounds in the z direction for the given level", "level"_a) 83 | .def("y", &LinearIterator::get_y, "returns the y-coordinate of a given particle index", "idx"_a) 84 | .def("begin", &LinearIterator::begin, 85 | "returns the index of the first particle in the sparse row (level, z, x)", "level"_a, "z"_a, "x"_a) 86 | .def("end", &LinearIterator::end, 87 | "returns the (exclusive) end index of the current sparse row (level, z, x) opened using 'begin'") 88 | .def("find_particle", _find_particle, 89 | "return the particle index corresponding to a given pixel location", "z"_a, "x"_a, "y"_a) 90 | .def("find_coordinates", _find_coordinates, 91 | "return the location of the particle at a given index as a tuple (level, z, x, y)", "idx"_a); 92 | } 93 | 94 | #endif //PYLIBAPR_PYLINEARITERATOR_HPP 95 | -------------------------------------------------------------------------------- /pyapr/viewer/particleScatterPlot.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, FloatParticles, LongParticles 2 | from _pyaprwrapper.viewer import get_points 3 | from .._common import _check_input 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | import io 7 | from PIL import Image 8 | from typing import Union, Optional, Tuple, List, Any 9 | 10 | 11 | def particle_scatter_plot(apr: APR, 12 | parts: Union[ByteParticles, ShortParticles, FloatParticles, LongParticles], 13 | z: Optional[int] = None, 14 | base_markersize: int = 1, 15 | markersize_scale_factor: int = 1, 16 | save_path: Optional[str] = None, 17 | figsize: Optional[Any] = None, 18 | dpi: int = 100, 19 | xrange: Optional[Union[Tuple, List]] = None, 20 | yrange: Optional[Union[Tuple, List]] = None, 21 | display: bool = False, 22 | cmap: str = 'viridis'): 23 | """ 24 | Plot particles in a z-slice (sub-) region as dots colored by intensity and sized according to particle size. 25 | Uses matplotlib for plotting. 26 | 27 | Parameters 28 | ---------- 29 | apr: APR 30 | Input APR data structure. 31 | parts: ParticleData 32 | Input particle intensity values 33 | z: int, optional 34 | Index of the z-slice to plot. If `None`, the center slice of the volume is taken. (default: None) 35 | base_markersize: int 36 | Marker size of the finest dots to plot. 37 | markersize_scale_factor: int 38 | Grow dot size exponentially according to `base_markersize * particle_side_length ** markersize_scale_factor`. 39 | save_path: str, optional 40 | If provided, the resulting figure is saved to this path. 41 | figsize: Any, optional 42 | Size specification of the matplotlib window. 43 | dpi: int 44 | Figure resolution in dots-per-inch. 45 | xrange: tuple or list, optional 46 | Specify the range to plot in the x dimension. If `None`, plots the entire image range. (default: None) 47 | yrange: tuple or list, optional 48 | Specify the range to plot in the y dimension. If `None`, plots the entire image range. (default: None) 49 | display: bool 50 | If `True`, calls matplotlib.pyplot.show() to display the figure. 51 | cmap: str 52 | Matplotlib color map to use. 53 | """ 54 | _check_input(apr, parts) 55 | if z is None: 56 | z = apr.z_num(apr.level_max())//2 57 | 58 | arr = np.array(get_points(apr, parts, z), copy=False).squeeze() 59 | # arr is an array of size (4, num_particles), where the rows are: [x, y, particle size in pixels, intensity] 60 | 61 | # x and y are inverted in the APR 62 | xsize = apr.y_num(apr.level_max()) 63 | ysize = apr.x_num(apr.level_max()) 64 | 65 | if isinstance(xrange, (tuple, list)) and len(xrange) >= 2: 66 | if 0 <= xrange[0] < xrange[1] < xsize: 67 | arr = arr[:, xrange[0] <= arr[0]] 68 | arr = arr[:, arr[0] < xrange[1]] 69 | xsize = xrange[1] - xrange[0] 70 | 71 | if isinstance(yrange, (tuple, list)) and len(yrange) >= 2: 72 | if 0 <= yrange[0] < yrange[1] < ysize: 73 | arr = arr[:, yrange[0] <= arr[1]] 74 | arr = arr[:, arr[1] < yrange[1]] 75 | ysize = yrange[1] - yrange[0] 76 | 77 | fig = plt.figure(figsize=figsize if figsize else [xsize/dpi, ysize/dpi], dpi=dpi) 78 | ax = plt.axes([0, 0, 1, 1], frameon=False, figure=fig) 79 | ax.get_xaxis().set_visible(False) 80 | ax.get_yaxis().set_visible(False) 81 | ax.invert_yaxis() 82 | plt.autoscale(tight=True) 83 | plt.scatter(arr[0], arr[1], s=base_markersize*arr[2]**markersize_scale_factor, c=arr[3], marker='.', cmap=cmap) 84 | 85 | if save_path is not None: 86 | if save_path.endswith('.tif'): 87 | png1 = io.BytesIO() 88 | plt.savefig(png1, format='png', dpi=dpi, bbox_inches='tight', pad_inches=0) 89 | png2 = Image.open(png1) 90 | png2.save(save_path) 91 | png1.close() 92 | else: 93 | plt.savefig(save_path, dpi=dpi, bbox_inches='tight', pad_inches=0) 94 | 95 | if display: 96 | plt.show() 97 | -------------------------------------------------------------------------------- /external/maxflow-v3.04.src/graph.cpp: -------------------------------------------------------------------------------- 1 | /* graph.cpp */ 2 | 3 | 4 | #include 5 | #include 6 | #include 7 | #include "graph.h" 8 | #include 9 | 10 | /* 11 | special constants for node->parent. Duplicated in maxflow.cpp, both should match! 12 | */ 13 | #define TERMINAL ( (arc *) 1 ) /* to terminal */ 14 | #define ORPHAN ( (arc *) 2 ) /* orphan */ 15 | 16 | template 17 | Graph::Graph(int node_num_max, int edge_num_max, void (*err_function)(const char *)) 18 | : node_num(0), 19 | nodeptr_block(NULL), 20 | error_function(err_function) 21 | { 22 | if (node_num_max < 16) node_num_max = 16; 23 | if (edge_num_max < 16) edge_num_max = 16; 24 | 25 | nodes = (node*) malloc(node_num_max*sizeof(node)); 26 | arcs = (arc*) malloc(2*edge_num_max*sizeof(arc)); 27 | 28 | std::cout << "Allocating " << node_num_max*sizeof(node)*1e-9 << " GB of memory for nodes" << std::endl; 29 | std::cout << "Allocating " << 2*edge_num_max*sizeof(arc)*1e-9 << " GB of memory for edges" << std::endl; 30 | std::cout << "Total memory for nodes and edges: " << node_num_max*sizeof(node)*1e-9 + 2*edge_num_max*sizeof(arc)*1e-9 << " GB" << std::endl; 31 | if (!nodes || !arcs) { 32 | if (error_function) (*error_function)("Not enough memory!"); 33 | exit(1); 34 | } 35 | 36 | node_last = nodes; 37 | node_max = nodes + node_num_max; 38 | arc_last = arcs; 39 | arc_max = arcs + 2*edge_num_max; 40 | 41 | maxflow_iteration = 0; 42 | flow = 0; 43 | } 44 | 45 | template 46 | Graph::~Graph() 47 | { 48 | if (nodeptr_block) 49 | { 50 | delete nodeptr_block; 51 | nodeptr_block = NULL; 52 | } 53 | free(nodes); 54 | free(arcs); 55 | } 56 | 57 | template 58 | void Graph::reset() 59 | { 60 | node_last = nodes; 61 | arc_last = arcs; 62 | node_num = 0; 63 | 64 | if (nodeptr_block) 65 | { 66 | delete nodeptr_block; 67 | nodeptr_block = NULL; 68 | } 69 | 70 | maxflow_iteration = 0; 71 | flow = 0; 72 | } 73 | 74 | template 75 | void Graph::reallocate_nodes(int num) 76 | { 77 | int node_num_max = (int)(node_max - nodes); 78 | node* nodes_old = nodes; 79 | 80 | node_num_max += node_num_max / 2; 81 | if (node_num_max < node_num + num) node_num_max = node_num + num; 82 | nodes = (node*) realloc(nodes_old, node_num_max*sizeof(node)); 83 | if (!nodes) { if (error_function) (*error_function)("Not enough memory!"); exit(1); } 84 | 85 | node_last = nodes + node_num; 86 | node_max = nodes + node_num_max; 87 | 88 | if (nodes != nodes_old) 89 | { 90 | node* i; 91 | arc* a; 92 | for (i=nodes; inext) i->next = (node*) ((char*)i->next + (((char*) nodes) - ((char*) nodes_old))); 95 | } 96 | for (a=arcs; ahead = (node*) ((char*)a->head + (((char*) nodes) - ((char*) nodes_old))); 99 | } 100 | } 101 | } 102 | 103 | template 104 | void Graph::reallocate_arcs() 105 | { 106 | int arc_num_max = (int)(arc_max - arcs); 107 | int arc_num = (int)(arc_last - arcs); 108 | arc* arcs_old = arcs; 109 | 110 | arc_num_max += arc_num_max / 2; if (arc_num_max & 1) arc_num_max ++; 111 | arcs = (arc*) realloc(arcs_old, arc_num_max*sizeof(arc)); 112 | std::cout << "Reallocating edges: " << arc_num_max*sizeof(arc)*1e-9 << " GB (arc_num_max = " << arc_num_max << ")" << std::endl; 113 | if (!arcs) { if (error_function) (*error_function)("Not enough memory!"); exit(1); } 114 | 115 | arc_last = arcs + arc_num; 116 | arc_max = arcs + arc_num_max; 117 | 118 | if (arcs != arcs_old) 119 | { 120 | node* i; 121 | arc* a; 122 | for (i=nodes; ifirst) i->first = (arc*) ((char*)i->first + (((char*) arcs) - ((char*) arcs_old))); 125 | if (i->parent && i->parent != ORPHAN && i->parent != TERMINAL) i->parent = (arc*) ((char*)i->parent + (((char*) arcs) - ((char*) arcs_old))); 126 | } 127 | for (a=arcs; anext) a->next = (arc*) ((char*)a->next + (((char*) arcs) - ((char*) arcs_old))); 130 | a->sister = (arc*) ((char*)a->sister + (((char*) arcs) - ((char*) arcs_old))); 131 | } 132 | } 133 | } 134 | 135 | #include "instances.inc" 136 | -------------------------------------------------------------------------------- /pyapr/viewer/src/BindRaycaster.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef PYLIBAPR_BINDRAYCASTER_HPP 3 | #define PYLIBAPR_BINDRAYCASTER_HPP 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include "data_containers/src/BindParticleData.hpp" 10 | #include "data_structures/Mesh/PixelData.hpp" 11 | #include "numerics/APRRaycaster.hpp" 12 | 13 | #ifndef M_PI 14 | #define M_PI 3.14159265358979323846 15 | #endif 16 | 17 | namespace py = pybind11; 18 | 19 | /** 20 | * @tparam T data type of particle values 21 | */ 22 | 23 | class PyAPRRaycaster { 24 | 25 | APRRaycaster apr_raycaster; 26 | 27 | float current_angle = 0; 28 | 29 | ReconPatch rp; 30 | 31 | float z = 0; 32 | 33 | 34 | public: 35 | 36 | PyAPRRaycaster() { 37 | rp.level_delta = 0; 38 | } 39 | 40 | void set_verbose(bool verboseMode) { apr_raycaster.verbose = verboseMode; } 41 | 42 | void set_angle(float angle){ 43 | current_angle = angle; 44 | } 45 | 46 | void set_phi(float angle){ 47 | //apr_raycaster.phi = -3.14/2 + fmod(angle-3.14f/2.0f,(3.14f)); 48 | apr_raycaster.phi = angle; 49 | } 50 | 51 | void increment_angle(float angle){ 52 | current_angle += angle; 53 | } 54 | 55 | void increment_phi(float angle){ 56 | // this is required due to the fixed domain of phi (from -pi/2 to pi/2) I used a cos instead of sin. 57 | 58 | float new_angle = apr_raycaster.phi_s + angle; 59 | if(new_angle > M_PI/2){ 60 | double diff = new_angle - M_PI/2; 61 | diff = std::min(diff, M_PI); 62 | apr_raycaster.phi_s = -M_PI/2 + diff; 63 | apr_raycaster.phi += angle; 64 | } else if (new_angle < -M_PI/2){ 65 | 66 | double diff = -new_angle - M_PI/2; 67 | diff = std::min(diff, M_PI); 68 | 69 | apr_raycaster.phi = M_PI/2 - diff; 70 | 71 | apr_raycaster.phi_s = M_PI/2 - diff; 72 | apr_raycaster.phi += angle; 73 | } else { 74 | apr_raycaster.phi_s += angle; 75 | apr_raycaster.phi += angle; 76 | } 77 | } 78 | 79 | void set_level_delta(int level_delta){ 80 | this->rp.level_delta = level_delta; 81 | } 82 | 83 | void set_z_anisotropy(float aniso){ 84 | apr_raycaster.scale_z = aniso; 85 | } 86 | 87 | void set_radius(float radius){ 88 | apr_raycaster.radius_factor = radius; 89 | } 90 | 91 | float get_radius(){ 92 | return apr_raycaster.radius_factor; 93 | } 94 | 95 | 96 | void get_view(APR& apr, PyParticleData& particles, PyParticleData& particles_tree, py::array_t& input) { 97 | 98 | py::gil_scoped_acquire acquire; 99 | 100 | auto buf = input.request(); 101 | 102 | auto *ptr = static_cast(buf.ptr); 103 | 104 | PixelData input_img; 105 | 106 | input_img.init_from_mesh(buf.shape[0], buf.shape[1], 1, ptr); // may lead to memory issues 107 | 108 | apr_raycaster.theta_0 = current_angle; //start 109 | apr_raycaster.theta_final = 0; //stop radians 110 | apr_raycaster.theta_delta = (apr_raycaster.theta_final - apr_raycaster.theta_0); //steps 111 | 112 | apr_raycaster.scale_down = pow(2,rp.level_delta); 113 | 114 | apr_raycaster.perform_raycast_patch(apr,particles,particles_tree,input_img,rp,[] (const uint16_t& a,const uint16_t& b) {return std::max(a,b);}); 115 | 116 | py::gil_scoped_release release; 117 | } 118 | 119 | 120 | }; 121 | 122 | void AddRaycaster(pybind11::module &m, const std::string &modulename) { 123 | 124 | py::class_(m, modulename.c_str()) 125 | .def(py::init()) 126 | .def("set_verbose", &PyAPRRaycaster::set_verbose, "set verbose mode for projection timer") 127 | .def("set_angle",&PyAPRRaycaster::set_angle, "demo") 128 | .def("set_level_delta",&PyAPRRaycaster::set_level_delta, "demo") 129 | .def("set_z_anisotropy",&PyAPRRaycaster::set_z_anisotropy, "demo") 130 | .def("set_radius",&PyAPRRaycaster::set_radius, "demo") 131 | .def("set_phi",&PyAPRRaycaster::set_phi, "demo") 132 | .def("get_radius",&PyAPRRaycaster::get_radius, "demo") 133 | .def("increment_phi",&PyAPRRaycaster::increment_phi, "demo") 134 | .def("increment_angle",&PyAPRRaycaster::increment_angle, "demo") 135 | .def("get_view", &PyAPRRaycaster::get_view, "demo"); 136 | } 137 | 138 | #endif //PYLIBAPR_BINDRAYCASTER_HPP 139 | -------------------------------------------------------------------------------- /pyapr/filter/rank_filters.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.filter.rank_filters import * 2 | from _pyaprwrapper.data_containers import APR, ByteParticles, ShortParticles, FloatParticles, LongParticles 3 | from .._common import _check_input 4 | from typing import Tuple, Union 5 | 6 | ParticleData = Union[ByteParticles, ShortParticles, FloatParticles, LongParticles] 7 | 8 | __allowed_sizes_median__ = [(x, x, x) for x in [3, 5, 7, 9, 11]] + [(1, x, x) for x in [3, 5, 7, 9, 11]] 9 | __allowed_sizes_min__ = __allowed_sizes_median__ 10 | __allowed_sizes_max__ = __allowed_sizes_median__ 11 | __allowed_input_types__ = (ByteParticles, ShortParticles, FloatParticles, LongParticles) 12 | 13 | 14 | def _check_size(size, allowed_sizes): 15 | if size not in allowed_sizes: 16 | raise ValueError(f'Invalid size {size}. Allowed values are {allowed_sizes}') 17 | 18 | 19 | def median_filter(apr: APR, 20 | parts: ParticleData, 21 | size: Tuple[int, int, int] = (5, 5, 5)): 22 | """ 23 | Apply median filter to an APR image and return a new set of particle values. 24 | Each output particle is the median of an isotropic neighborhood of the given size 25 | in the input image. At coarse resolutions, neighboring values at finer resolution 26 | are average downsampled. 27 | 28 | Parameters 29 | ---------- 30 | apr: APR 31 | APR data structure. 32 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 33 | Input particle values. 34 | size: (int, int, int) 35 | Size of the neighborhood in (z, x, y) dimensions. 36 | Allowed sizes are (x, x, x) and (1, x, x) for x in [3, 5, 7, 9, 11]. Default: (5, 5, 5) 37 | 38 | Returns 39 | ------- 40 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 41 | Median filtered particle values of the same type as the input particles. 42 | """ 43 | _check_input(apr, parts, __allowed_input_types__) 44 | _check_size(size, __allowed_sizes_median__) 45 | fname = 'median_filter_{}{}{}'.format(*size) 46 | output = type(parts)() 47 | globals()[fname](apr, parts, output) 48 | return output 49 | 50 | 51 | def min_filter(apr: APR, 52 | parts: ParticleData, 53 | size: Tuple[int, int, int] = (5, 5, 5)): 54 | """ 55 | Apply minimum filter to an APR image and return a new set of particle values. 56 | Each output particle is the minimum of an isotropic neighborhood of the given size 57 | in the input image. At coarse resolutions, neighboring values at finer resolution 58 | are minimum downsampled. 59 | 60 | Parameters 61 | ---------- 62 | apr: APR 63 | APR data structure. 64 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 65 | Input particle values. 66 | size: (int, int, int) 67 | Size of the neighborhood in (z, x, y) dimensions. 68 | Allowed values are (x, x, x) and (1, x, x) for x in [3, 5, 7, 9, 11]. Default: (5, 5, 5) 69 | 70 | Returns 71 | ------- 72 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 73 | Minimum filtered particle values of the same type as the input particles. 74 | """ 75 | _check_input(apr, parts, __allowed_input_types__) 76 | _check_size(size, __allowed_sizes_min__) 77 | fname = 'min_filter_{}{}{}'.format(*size) 78 | output = type(parts)() 79 | globals()[fname](apr, parts, output) 80 | return output 81 | 82 | 83 | def max_filter(apr: APR, 84 | parts: ParticleData, 85 | size: Tuple[int, int, int] = (5, 5, 5)): 86 | """ 87 | Apply maximum filter to an APR image and return a new set of particle values. 88 | Each output particle is the minimum of an isotropic neighborhood of the given size 89 | in the input image. At coarse resolutions, neighboring values at finer resolution 90 | are maximum downsampled. 91 | 92 | Parameters 93 | ---------- 94 | apr: APR 95 | APR data structure. 96 | parts: ByteParticles, ShortParticles, FloatParticles or LongParticles 97 | Input particle values 98 | size: (int, int, int) 99 | Size of the neighborhood in (z, x, y) dimensions. 100 | Allowed values are (x, x, x) and (1, x, x) for x in [3, 5, 7, 9, 11]. Default: (5, 5, 5) 101 | 102 | Returns 103 | ------- 104 | output: ByteParticles, ShortParticles, FloatParticles or LongParticles 105 | Maximum filtered particle values of the same type as the input particles. 106 | """ 107 | _check_input(apr, parts, __allowed_input_types__) 108 | _check_size(size, __allowed_sizes_max__) 109 | fname = 'max_filter_{}{}{}'.format(*size) 110 | output = type(parts)() 111 | globals()[fname](apr, parts, output) 112 | return output 113 | -------------------------------------------------------------------------------- /pyapr/restoration/src/BindRichardsonLucy.hpp: -------------------------------------------------------------------------------- 1 | 2 | #ifndef PYLIBAPR_BINDRICHARDSONLUCY_HPP 3 | #define PYLIBAPR_BINDRICHARDSONLUCY_HPP 4 | 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include "data_structures/APR/APR.hpp" 11 | #include "data_containers/src/BindParticleData.hpp" 12 | #include "numerics/APRReconstruction.hpp" 13 | #include "numerics/APRFilter.hpp" 14 | #include "numerics/APRNumerics.hpp" 15 | #include "numerics/APRStencil.hpp" 16 | 17 | #ifdef PYAPR_USE_CUDA 18 | #include "numerics/APRNumericsGPU.hpp" 19 | #endif 20 | 21 | namespace py = pybind11; 22 | using namespace py::literals; 23 | 24 | 25 | namespace PyAPRRL { 26 | 27 | template 28 | void 29 | richardson_lucy_cpu(APR &apr, PyParticleData &input_parts, PyParticleData &output_parts, 30 | py::array_t &stencil, int niter, bool use_stencil_downsample, 31 | bool normalize_stencil, bool resume) { 32 | 33 | auto stencil_buf = stencil.request(); 34 | auto *stencil_ptr = static_cast(stencil_buf.ptr); 35 | PixelData psf; 36 | psf.init_from_mesh(stencil_buf.shape[2], stencil_buf.shape[1], stencil_buf.shape[0], stencil_ptr); 37 | 38 | APRNumerics::richardson_lucy(apr, input_parts, output_parts, psf, niter, use_stencil_downsample, normalize_stencil, resume); 39 | } 40 | 41 | 42 | template 43 | void richardson_lucy_tv_cpu(APR &apr, PyParticleData &input_parts, 44 | PyParticleData &output_parts, 45 | py::array_t &stencil, int niter, float reg_factor, 46 | bool use_stencil_downsample, 47 | bool normalize_stencil, bool resume) { 48 | 49 | auto stencil_buf = stencil.request(); 50 | auto *stencil_ptr = static_cast(stencil_buf.ptr); 51 | PixelData psf; 52 | psf.init_from_mesh(stencil_buf.shape[2], stencil_buf.shape[1], stencil_buf.shape[0], stencil_ptr); 53 | 54 | APRNumerics::richardson_lucy_tv(apr, input_parts, output_parts, psf, niter, reg_factor, 55 | use_stencil_downsample, normalize_stencil, resume); 56 | } 57 | 58 | 59 | #ifdef PYAPR_USE_CUDA 60 | 61 | template 62 | void richardson_lucy_cuda(APR& apr, PyParticleData& input_parts, PyParticleData& output_parts, 63 | py::array_t& stencil, int niter, bool use_stencil_downsample, bool normalize_stencil, 64 | bool resume) { 65 | 66 | auto stencil_buf = stencil.request(); 67 | auto* stencil_ptr = static_cast(stencil_buf.ptr); 68 | PixelData stencil_pd; 69 | stencil_pd.init_from_mesh(stencil_buf.shape[0], stencil_buf.shape[1], stencil_buf.shape[2], stencil_ptr); 70 | 71 | auto access = apr.gpuAPRHelper(); 72 | auto tree_access = apr.gpuTreeHelper(); 73 | 74 | APRNumericsGPU::richardson_lucy(access, tree_access, input_parts.data, output_parts.data, stencil_pd, niter, 75 | use_stencil_downsample, normalize_stencil, resume); 76 | } 77 | #endif 78 | } 79 | 80 | 81 | template 82 | void bindRichardsonLucy(py::module& m) { 83 | m.def("richardson_lucy", &PyAPRRL::richardson_lucy_cpu, "APR RL deconvolution", 84 | "apr"_a, "input_parts"_a, "output_parts"_a, "stencil"_a, "niter"_a, "use_stencil_downsample"_a=true, 85 | "normalize_stencil"_a=false, "resume"_a=false); 86 | 87 | m.def("richardson_lucy_tv", &PyAPRRL::richardson_lucy_tv_cpu, 88 | "APR RL deconvolution with total variation regularization", 89 | "apr"_a, "input_parts"_a, "output_parts"_a, "stencil"_a, "niter"_a, "reg_factor"_a, 90 | "use_stencil_downsample"_a=true, "normalize_stencil"_a=false, "resume"_a=false); 91 | 92 | #ifdef PYAPR_USE_CUDA 93 | m.def("richardson_lucy_cuda", &PyAPRRL::richardson_lucy_cuda, "APR RL deconvolution on GPU", 94 | "apr"_a, "input_parts"_a, "output_parts"_a, "stencil"_a, "niter"_a, "use_stencil_downsample"_a=true, 95 | "normalize_stencil"_a=false, "resume"_a=false); 96 | #endif 97 | } 98 | 99 | 100 | 101 | void AddRichardsonLucy(py::module &m) { 102 | 103 | bindRichardsonLucy(m); 104 | bindRichardsonLucy(m); 105 | bindRichardsonLucy(m); 106 | } 107 | 108 | #endif //PYLIBAPR_BINDRICHARDSONLUCY_HPP 109 | 110 | -------------------------------------------------------------------------------- /wrappers/pythonBind.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Krzysztof Gonciarz on 5/7/18. 3 | // Modified by Joel Jonsson on 2/5/19. 4 | // 5 | 6 | #include 7 | #include 8 | 9 | #include "converter/src/BindConverter.hpp" 10 | #include "converter/src/BindConverterBatch.hpp" 11 | 12 | #include "data_containers/src/BindPixelData.hpp" 13 | #include "data_containers/src/BindAPR.hpp" 14 | #include "data_containers/src/BindParameters.hpp" 15 | #include "data_containers/src/BindParticleData.hpp" 16 | #include "data_containers/src/BindReconPatch.hpp" 17 | #include "data_containers/src/BindLinearIterator.hpp" 18 | #include "data_containers/src/BindLazyAccess.hpp" 19 | #include "data_containers/src/BindLazyData.hpp" 20 | #include "data_containers/src/BindLazyIterator.hpp" 21 | 22 | #include "filter/src/BindFilter.hpp" 23 | #include "io/src/BindAPRFile.hpp" 24 | #include "measure/src/BindMeasure.hpp" 25 | #include "morphology/src/BindMorphology.hpp" 26 | #include "reconstruction/src/BindReconstruction.hpp" 27 | #include "restoration/src/BindRichardsonLucy.hpp" 28 | #include "segmentation/src/BindGraphCut.hpp" 29 | #include "transform/src/BindProjection.hpp" 30 | #include "tree/src/BindFillTree.hpp" 31 | 32 | #include "viewer/src/BindRaycaster.hpp" 33 | #include "viewer/src/BindViewerHelpers.hpp" 34 | 35 | 36 | #ifdef PYAPR_USE_CUDA 37 | #define BUILT_WITH_CUDA true 38 | #else 39 | #define BUILT_WITH_CUDA false 40 | #endif 41 | 42 | namespace py = pybind11; 43 | 44 | // -------- Check if properly configured in CMAKE ----------------------------- 45 | #ifndef APR_PYTHON_MODULE_NAME 46 | #error "Name of APR module (python binding) is not defined!" 47 | #endif 48 | 49 | // -------- Definition of python module --------------------------------------- 50 | PYBIND11_MODULE(APR_PYTHON_MODULE_NAME, m) { 51 | m.doc() = "python binding for LibAPR library"; 52 | m.attr("__version__") = py::str(ConfigAPR::APR_VERSION); 53 | m.attr("__cuda_build__") = BUILT_WITH_CUDA; 54 | 55 | py::module data_containers = m.def_submodule("data_containers"); 56 | 57 | AddAPR(data_containers, "APR"); 58 | AddAPRParameters(data_containers); 59 | AddLinearIterator(data_containers); 60 | AddReconPatch(data_containers); 61 | 62 | //wrap PyPixelData class for different data types 63 | AddPyPixelData(data_containers, "Byte"); 64 | AddPyPixelData(data_containers, "Short"); 65 | AddPyPixelData(data_containers, "Float"); 66 | AddPyPixelData(data_containers, "Long"); 67 | 68 | // wrap PyParticleData class for different data types 69 | AddPyParticleData(data_containers, "Byte"); 70 | AddPyParticleData(data_containers, "Float"); 71 | AddPyParticleData(data_containers, "Short"); 72 | AddPyParticleData(data_containers, "Long"); 73 | AddPyParticleData(data_containers, "Int"); 74 | 75 | // wrap lazy classes 76 | AddLazyAccess(data_containers, "LazyAccess"); 77 | AddLazyIterator(data_containers); 78 | AddLazyData(data_containers, "Byte"); 79 | AddLazyData(data_containers, "Short"); 80 | AddLazyData(data_containers, "Long"); 81 | AddLazyData(data_containers, "Float"); 82 | 83 | 84 | py::module converter = m.def_submodule("converter"); 85 | 86 | // wrap APRConverter 87 | AddPyAPRConverter(converter, "Byte"); 88 | AddPyAPRConverter(converter, "Short"); 89 | AddPyAPRConverter(converter, "Float"); 90 | 91 | // wrap APRConverterBatch (tiled conversion) 92 | AddPyAPRConverterBatch(converter, "Byte"); 93 | AddPyAPRConverterBatch(converter, "Short"); 94 | AddPyAPRConverterBatch(converter, "Float"); 95 | 96 | 97 | py::module filter = m.def_submodule("filter"); 98 | AddFilter(filter); 99 | 100 | 101 | py::module io = m.def_submodule("io"); 102 | AddAPRFile(io, "APRFile"); 103 | 104 | 105 | py::module measure = m.def_submodule("measure"); 106 | AddMeasure(measure); 107 | 108 | 109 | py::module morphology = m.def_submodule("morphology"); 110 | AddMorphology(morphology); 111 | 112 | 113 | py::module reconstruction = m.def_submodule("reconstruction"); 114 | AddReconstruction(reconstruction); 115 | 116 | 117 | py::module restoration = m.def_submodule("restoration"); 118 | AddRichardsonLucy(restoration); 119 | 120 | 121 | py::module segmentation = m.def_submodule("segmentation"); 122 | AddGraphcut(segmentation, "graphcut"); 123 | 124 | 125 | py::module transform = m.def_submodule("transform"); 126 | AddProjection(transform); 127 | 128 | 129 | py::module tree = m.def_submodule("tree"); 130 | AddFillTree(tree); 131 | 132 | 133 | py::module viewer = m.def_submodule("viewer"); 134 | AddViewerHelpers(viewer); 135 | AddRaycaster(viewer, "APRRaycaster"); 136 | } 137 | -------------------------------------------------------------------------------- /external/maxflow-v3.04.src/README.TXT: -------------------------------------------------------------------------------- 1 | ################################################################### 2 | # # 3 | # MAXFLOW - software for computing mincut/maxflow in a graph # 4 | # Version 3.04 # 5 | # http://http://pub.ist.ac.at/~vnk/software.html # 6 | # # 7 | # Yuri Boykov (yuri@csd.uwo.ca) # 8 | # Vladimir Kolmogorov (vnk@ist.ac.at) # 9 | # 2001-2006 # 10 | # # 11 | ################################################################### 12 | 13 | 1. Introduction. 14 | 15 | This software library implements the maxflow algorithm described in 16 | 17 | "An Experimental Comparison of Min-Cut/Max-Flow Algorithms for Energy Minimization in Vision." 18 | Yuri Boykov and Vladimir Kolmogorov. 19 | In IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI), 20 | September 2004 21 | 22 | This algorithm was developed by Yuri Boykov and Vladimir Kolmogorov 23 | at Siemens Corporate Research. To make it available for public use, 24 | it was later reimplemented by Vladimir Kolmogorov based on open publications. 25 | 26 | If you use this software for research purposes, you should cite 27 | the aforementioned paper in any resulting publication. 28 | 29 | ---------------------------------------------------------------------- 30 | 31 | REUSING TREES: 32 | 33 | Starting with version 3.0, there is a also an option of reusing search 34 | trees from one maxflow computation to the next, as described in 35 | 36 | "Efficiently Solving Dynamic Markov Random Fields Using Graph Cuts." 37 | Pushmeet Kohli and Philip H.S. Torr 38 | International Conference on Computer Vision (ICCV), 2005 39 | 40 | If you use this option, you should cite 41 | the aforementioned paper in any resulting publication. 42 | 43 | Tested under windows, Visual C++ 6.0 compiler and unix (SunOS 5.8 44 | and RedHat Linux 7.0, GNU c++ compiler). 45 | 46 | ################################################################## 47 | 48 | 2. License & disclaimer. 49 | 50 | Copyright 2001-2006 Vladimir Kolmogorov (vnk@ist.ac.at), Yuri Boykov (yuri@csd.uwo.ca). 51 | 52 | This software is under the GPL license. 53 | If you require another license, you may consider using version 2.21 54 | (which implements exactly the same algorithm, but does not have the option of reusing search trees). 55 | 56 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 57 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 58 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 59 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 60 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 62 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 66 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 | 68 | ################################################################## 69 | 70 | 3. Example usage. 71 | 72 | This section shows how to use the library to compute 73 | a minimum cut on the following graph: 74 | 75 | SOURCE 76 | / \ 77 | 1/ \2 78 | / 3 \ 79 | node0 -----> node1 80 | | <----- | 81 | | 4 | 82 | \ / 83 | 5\ /6 84 | \ / 85 | SINK 86 | 87 | /////////////////////////////////////////////////// 88 | 89 | #include 90 | #include "graph.h" 91 | 92 | int main() 93 | { 94 | typedef Graph GraphType; 95 | GraphType *g = new GraphType(/*estimated # of nodes*/ 2, /*estimated # of edges*/ 1); 96 | 97 | g -> add_node(); 98 | g -> add_node(); 99 | 100 | g -> add_tweights( 0, /* capacities */ 1, 5 ); 101 | g -> add_tweights( 1, /* capacities */ 2, 6 ); 102 | g -> add_edge( 0, 1, /* capacities */ 3, 4 ); 103 | 104 | int flow = g -> maxflow(); 105 | 106 | printf("Flow = %d\n", flow); 107 | printf("Minimum cut:\n"); 108 | if (g->what_segment(0) == GraphType::SOURCE) 109 | printf("node0 is in the SOURCE set\n"); 110 | else 111 | printf("node0 is in the SINK set\n"); 112 | if (g->what_segment(1) == GraphType::SOURCE) 113 | printf("node1 is in the SOURCE set\n"); 114 | else 115 | printf("node1 is in the SINK set\n"); 116 | 117 | delete g; 118 | 119 | return 0; 120 | } 121 | 122 | 123 | /////////////////////////////////////////////////// 124 | -------------------------------------------------------------------------------- /pyapr/tree/src/BindFillTree.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by joeljonsson on 04.03.21. 3 | // 4 | 5 | #ifndef PYLIBAPR_BINDFILLTREE_HPP 6 | #define PYLIBAPR_BINDFILLTREE_HPP 7 | 8 | #include 9 | #include "data_structures/APR/APR.hpp" 10 | #include "data_containers/src/BindParticleData.hpp" 11 | #include "numerics/APRTreeNumerics.hpp" 12 | 13 | namespace py = pybind11; 14 | using namespace py::literals; 15 | 16 | 17 | namespace PyAPRTreeNumerics { 18 | template 19 | void fill_tree_mean(APR &apr, const PyParticleData& particle_data, PyParticleData& tree_data) { 20 | APRTreeNumerics::fill_tree_mean(apr, particle_data, tree_data); 21 | } 22 | 23 | template 24 | void fill_tree_min(APR &apr, const PyParticleData& particle_data, PyParticleData& tree_data) { 25 | APRTreeNumerics::fill_tree_min(apr, particle_data, tree_data); 26 | } 27 | 28 | template 29 | void fill_tree_max(APR &apr, const PyParticleData& particle_data, PyParticleData& tree_data) { 30 | APRTreeNumerics::fill_tree_max(apr, particle_data, tree_data); 31 | } 32 | 33 | template 34 | void sample_from_tree(APR& apr, 35 | PyParticleData& particle_data, 36 | PyParticleData& tree_data, 37 | const int num_levels) { 38 | APRTreeNumerics::push_down_tree(apr, tree_data, num_levels); 39 | APRTreeNumerics::push_to_leaves(apr, tree_data, particle_data); 40 | } 41 | } 42 | 43 | 44 | template 45 | void bindFillTreeMean(py::module &m) { 46 | m.def("fill_tree_mean", &PyAPRTreeNumerics::fill_tree_mean, 47 | "Compute interior tree particle values by average downsampling", 48 | "apr"_a, "particle_data"_a, "tree_data"_a); 49 | 50 | if(!std::is_same::value) { 51 | m.def("fill_tree_mean", &PyAPRTreeNumerics::fill_tree_mean, 52 | "Compute interior tree particle values by average downsampling", 53 | "apr"_a, "particle_data"_a, "tree_data"_a); 54 | } 55 | } 56 | 57 | 58 | template 59 | void bindFillTreeMin(py::module &m) { 60 | m.def("fill_tree_min", &PyAPRTreeNumerics::fill_tree_min, 61 | "Compute interior tree particle values by min downsampling", 62 | "apr"_a, "particle_data"_a, "tree_data"_a); 63 | 64 | if(!std::is_same::value) { 65 | m.def("fill_tree_min", &PyAPRTreeNumerics::fill_tree_min, 66 | "Compute interior tree particle values by min downsampling", 67 | "apr"_a, "particle_data"_a, "tree_data"_a); 68 | } 69 | } 70 | 71 | 72 | template 73 | void bindFillTreeMax(py::module &m) { 74 | m.def("fill_tree_max", &PyAPRTreeNumerics::fill_tree_max, 75 | "Compute interior tree particle values by max downsampling", 76 | "apr"_a, "particle_data"_a, "tree_data"_a); 77 | 78 | if(!std::is_same::value) { 79 | m.def("fill_tree_max", &PyAPRTreeNumerics::fill_tree_max, 80 | "Compute interior tree particle values by max downsampling", 81 | "apr"_a, "particle_data"_a, "tree_data"_a); 82 | } 83 | } 84 | 85 | 86 | template 87 | void bindSampleFromTree(py::module &m) { 88 | m.def("sample_from_tree", &PyAPRTreeNumerics::sample_from_tree, 89 | "Coarsen particle values by resampling from a certain level of the APR tree", 90 | "apr"_a, "particle_data"_a, "tree_data"_a, "num_levels"_a); 91 | 92 | if(!std::is_same::value) { 93 | m.def("sample_from_tree", &PyAPRTreeNumerics::sample_from_tree, 94 | "Coarsen particle values by resampling from a certain level of the APR tree", 95 | "apr"_a, "particle_data"_a, "tree_data"_a, "num_levels"_a); 96 | } 97 | } 98 | 99 | 100 | void AddFillTree(py::module &m) { 101 | 102 | bindFillTreeMean(m); 103 | bindFillTreeMean(m); 104 | bindFillTreeMean(m); 105 | bindFillTreeMean(m); 106 | bindFillTreeMean(m); 107 | 108 | bindFillTreeMin(m); 109 | bindFillTreeMin(m); 110 | bindFillTreeMin(m); 111 | bindFillTreeMin(m); 112 | bindFillTreeMin(m); 113 | 114 | bindFillTreeMax(m); 115 | bindFillTreeMax(m); 116 | bindFillTreeMax(m); 117 | bindFillTreeMax(m); 118 | bindFillTreeMax(m); 119 | 120 | bindSampleFromTree(m); 121 | bindSampleFromTree(m); 122 | bindSampleFromTree(m); 123 | bindSampleFromTree(m); 124 | } 125 | 126 | #endif //PYLIBAPR_BINDFILLTREE_HPP 127 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyapr 2 | 3 | [![build and deploy](https://github.com/AdaptiveParticles/pyapr/actions/workflows/build-deploy.yml/badge.svg)](https://github.com/AdaptiveParticles/pyapr/actions) 4 | [![codecov](https://codecov.io/gh/AdaptiveParticles/pyapr/branch/develop/graph/badge.svg?token=DN63B1DMYK)](https://codecov.io/gh/AdaptiveParticles/pyapr) 5 | [![License](https://img.shields.io/pypi/l/pyapr.svg?color=green)](https://raw.githubusercontent.com/AdaptiveParticles/pyapr/master/LICENSE) 6 | [![Python Version](https://img.shields.io/pypi/pyversions/pyapr.svg?color=blue)]((https://python.org)) 7 | [![PyPI](https://img.shields.io/pypi/v/pyapr.svg?color=green)](https://pypi.org/project/pyapr/) 8 | [![Downloads](https://static.pepy.tech/badge/pyapr)](https://pepy.tech/project/pyapr) 9 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7304045.svg)](https://doi.org/10.5281/zenodo.7304045) 10 | 11 | Documentation can be found [here](https://adaptiveparticles.github.io/pyapr/index.html). 12 | 13 | Content-adaptive storage and processing of large volumetric microscopy data using the Adaptive Particle Representation (APR). 14 | 15 | The APR is an adaptive image representation designed primarily for large 3D fluorescence microscopy datasets. By replacing pixels with particles positioned according to the image content, it enables orders-of-magnitude compression of sparse image data while maintaining image quality. However, unlike most compression formats, the APR can be used directly in a wide range of processing tasks - even on the GPU! 16 | 17 | | Pixels | APR | 18 | | :--: | :--: | 19 | | ![pixels.png](https://github.com/AdaptiveParticles/pyapr/raw/master/docs/images/pix_joined.png) | ![apr.png](https://github.com/AdaptiveParticles/pyapr/raw/master/docs/images/apr_joined.png) | 20 | | Uniform sampling | Adaptive sampling | 21 | 22 | *[image source](https://bbbc.broadinstitute.org/bbbc/BBBC032), 23 | [illustration source](https://ieeexplore.ieee.org/abstract/document/9796006)* 24 | 25 | 26 | For more detailed information about the APR and its use, see: 27 | - [Adaptive particle representation of fluorescence microscopy images](https://www.nature.com/articles/s41467-018-07390-9) (nature communications) 28 | - [Parallel Discrete Convolutions on Adaptive Particle Representations of Images](https://ieeexplore.ieee.org/abstract/document/9796006) (IEEE Transactions on Image Processing) 29 | 30 | **pyapr** is built on top of the C++ library [LibAPR] using [pybind11]. 31 | 32 | ## Quick start guide 33 | 34 | Convert images to APR using minimal amounts of code (*see [get_apr_demo](demo/get_apr_demo.py) and [get_apr_interactive_demo](demo/get_apr_interactive_demo.py) for additional options*). 35 | 36 | ```python 37 | import pyapr 38 | from skimage import io 39 | 40 | # read image into numpy array 41 | img = io.imread('my_image.tif') 42 | 43 | # convert to APR using default settings 44 | apr, parts = pyapr.converter.get_apr(img) 45 | 46 | # write APR to file 47 | pyapr.io.write('my_image.apr', apr, parts) 48 | ``` 49 | 50 | ![apr_file.png](https://github.com/AdaptiveParticles/pyapr/raw/master/docs/images/apr_file.png) 51 | 52 | To return to the pixel representation: 53 | ```python 54 | # reconstruct pixel image 55 | img = pyapr.reconstruction.reconstruct_constant(apr, parts) 56 | ``` 57 | 58 | 59 | Inspect APRs using our makeshift image viewers (*see [napari-apr-viewer] for less experimental visualization options*). 60 | 61 | ```python 62 | # read APR from file 63 | apr, parts = pyapr.io.read('my_image.apr') 64 | 65 | # launch viewer 66 | pyapr.viewer.parts_viewer(apr, parts) 67 | ``` 68 | ![view_apr.png](https://github.com/AdaptiveParticles/pyapr/raw/master/docs/images/view_apr.png) 69 | 70 | The `View Level` toggle allows you to see the adaptation (brighter = higher resolution). 71 | 72 | ![view_level.png](https://github.com/AdaptiveParticles/pyapr/raw/master/docs/images/view_level.png) 73 | 74 | Or view the result in 3D using APR-native maximum intensity projection raycast (cpu). 75 | ```python 76 | # launch raycast viewer 77 | pyapr.viewer.raycast_viewer(apr, parts) 78 | ``` 79 | ![raycast.png](https://github.com/AdaptiveParticles/pyapr/raw/master/docs/images/raycast.png) 80 | 81 | See the [demo scripts] for more examples. 82 | 83 | ## Installation 84 | For Windows 10, OSX, and Linux direct installation with OpenMP support should work via [pip]: 85 | ``` 86 | pip install pyapr 87 | ``` 88 | Note: Due to the use of OpenMP, it is encouraged to install as part of a virtualenv. 89 | 90 | See [INSTALL] for manual build instructions. 91 | 92 | 93 | ## License 94 | 95 | **pyapr** is distributed under the terms of the [Apache Software License 2.0]. 96 | 97 | 98 | ## Issues 99 | 100 | If you encounter any problems, please [file an issue] with a short description. 101 | 102 | ## Contact us 103 | 104 | If you have a project or algorithm in which you would like to try using the APR, don't hesitate to get 105 | in touch with us. We would be happy to assist you! 106 | 107 | 108 | [LibAPR]: https://github.com/AdaptiveParticles/LibAPR 109 | [pybind11]: https://github.com/pybind/pybind11 110 | [pip]: https://pypi.org/project/pip/ 111 | [INSTALL]: INSTALL.md 112 | [demo scripts]: demo 113 | [napari]: https://napari.org 114 | [napari-apr-viewer]: https://github.com/AdaptiveParticles/napari-apr-viewer 115 | [Apache Software License 2.0]: http://www.apache.org/licenses/LICENSE-2.0 116 | [file an issue]: https://github.com/AdaptiveParticles/pyapr/issues 117 | -------------------------------------------------------------------------------- /pyapr/io/src/BindAPRFile.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by Joel Jonsson on 22.05.19. 3 | // 4 | 5 | #ifndef PYLIBAPR_PYAPRFILE_HPP 6 | #define PYLIBAPR_PYAPRFILE_HPP 7 | 8 | #include "io/APRFile.hpp" 9 | #include "data_containers/src/BindParticleData.hpp" 10 | 11 | #include 12 | 13 | namespace py = pybind11; 14 | 15 | class PyAPRFile : public APRFile { 16 | public: 17 | PyAPRFile() : APRFile() {} 18 | 19 | template 20 | bool read_particles_py(APR& apr, std::string particles_name, PyParticleData& particles, 21 | bool apr_or_tree = true, uint64_t t = 0, std::string channel_name = "t") { 22 | return this->read_particles(apr, particles_name, particles, apr_or_tree, t, channel_name); 23 | } 24 | 25 | template 26 | bool read_particles_py2(std::string particles_name, PyParticleData& particles, 27 | bool apr_or_tree = true, uint64_t t = 0, std::string channel_name = "t") { 28 | return this->read_particles(particles_name, particles, apr_or_tree, t, channel_name); 29 | } 30 | }; 31 | 32 | // -------- wrapper ------------------------------------------------- 33 | void AddAPRFile(pybind11::module &m, const std::string &modulename) { 34 | 35 | using namespace py::literals; 36 | 37 | py::class_(m, "APRFile_CPP"); 38 | 39 | py::class_(m, modulename.c_str()) 40 | .def(py::init()) 41 | .def("open", &PyAPRFile::open, "open a file for reading and/or writing", 42 | "file_name"_a, "read_write"_a="WRITE") 43 | .def("close", &PyAPRFile::close, "close the file") 44 | .def("set_write_linear_flag", &PyAPRFile::set_write_linear_flag, "write linear access structure?", 45 | "flag"_a) 46 | 47 | .def("get_particles_names", &PyAPRFile::get_particles_names, "return list of field names for stored particle values", 48 | "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 49 | .def("get_channel_names", &PyAPRFile::get_channel_names, "return list of channel names") 50 | 51 | .def("get_particle_type", &PyAPRFile::get_particle_type, "return type (string) of particle dataset", 52 | "particles_name"_a, "apr_or_tree"_a=false, "t"_a=0, "channel_name"_a="t") 53 | 54 | .def("write_apr", &PyAPRFile::write_apr, "write apr to file", 55 | "apr"_a, "t"_a=0, "channel_name"_a="t", "write_tree"_a=true) 56 | .def("read_apr", &PyAPRFile::read_apr, "read an APR from file", 57 | "apr"_a, "t"_a=0, "channel_name"_a="t") 58 | 59 | .def("write_particles", &PyAPRFile::write_particles, "write particles to file", 60 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 61 | .def("write_particles", &PyAPRFile::write_particles, "write particles to file", 62 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 63 | .def("write_particles", &PyAPRFile::write_particles, "write particles to file", 64 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 65 | .def("write_particles", &PyAPRFile::write_particles, "write particles to file", 66 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 67 | 68 | .def("read_particles", &PyAPRFile::read_particles_py, "read particles from file", 69 | "apr"_a, "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 70 | .def("read_particles", &PyAPRFile::read_particles_py, "read particles from file", 71 | "apr"_a, "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 72 | .def("read_particles", &PyAPRFile::read_particles_py, "read particles from file", 73 | "apr"_a, "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 74 | .def("read_particles", &PyAPRFile::read_particles_py, "read particles from file", 75 | "apr"_a, "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 76 | 77 | .def("read_particles", &PyAPRFile::read_particles_py2, "read particles from file", 78 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 79 | .def("read_particles", &PyAPRFile::read_particles_py2, "read particles from file", 80 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 81 | .def("read_particles", &PyAPRFile::read_particles_py2, "read particles from file", 82 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 83 | .def("read_particles", &PyAPRFile::read_particles_py2, "read particles from file", 84 | "particles_name"_a, "particles"_a, "apr_or_tree"_a=true, "t"_a=0, "channel_name"_a="t") 85 | 86 | .def("current_file_size_GB", &PyAPRFile::current_file_size_GB, "get current file size in GB") 87 | .def("current_file_size_MB", &PyAPRFile::current_file_size_MB, "get current file size in MB"); 88 | } 89 | 90 | #endif //PYLIBAPR_PYAPRFILE_HPP 91 | -------------------------------------------------------------------------------- /pyapr/reconstruction/APRSlicer.py: -------------------------------------------------------------------------------- 1 | from _pyaprwrapper.data_containers import APR, ReconPatch, ByteParticles, ShortParticles, FloatParticles, LongParticles, IntParticles 2 | from _pyaprwrapper.tree import fill_tree_mean, fill_tree_max 3 | from ..utils import particles_to_type, type_to_particles 4 | from .._common import _check_input 5 | from . import reconstruct_constant, reconstruct_level, reconstruct_smooth 6 | import numpy as np 7 | from numbers import Integral 8 | from typing import Union 9 | 10 | 11 | class APRSlicer: 12 | """ 13 | Helper class allowing (3D) slice indexing. Pixel values in the slice range are reconstructed 14 | on the fly and returned as an array. 15 | 16 | Examples 17 | -------- 18 | >>> import pyapr 19 | >>> apr, parts = pyapr.io.read('test.apr') 20 | >>> slicer = pyapr.reconstruction.APRSlicer(apr, parts) 21 | >>> slicer[15] # reconstruct slice at z=15 22 | >>> slicer[10:15, 42:45, 57:60] # reconstruct small 3D patch 23 | """ 24 | def __init__(self, 25 | apr: APR, 26 | parts: Union[ByteParticles, ShortParticles, LongParticles, FloatParticles, IntParticles], 27 | mode: str = 'constant', 28 | level_delta: int = 0, 29 | tree_mode: str = 'mean'): 30 | _check_input(apr, parts) 31 | self.apr = apr 32 | self.parts = parts 33 | self.mode = mode 34 | self.dtype = np.uint8 if self.mode == 'level' else particles_to_type(parts) 35 | 36 | self.patch = ReconPatch() 37 | self.patch.level_delta = level_delta 38 | self.patch.z_end = 1 39 | self.patch.check_limits(self.apr) 40 | self.dims = [] 41 | self.update_dims() 42 | 43 | self.tree_parts = FloatParticles() 44 | 45 | if tree_mode == 'mean': 46 | fill_tree_mean(self.apr, self.parts, self.tree_parts) 47 | elif tree_mode == 'max': 48 | fill_tree_max(self.apr, self.parts, self.tree_parts) 49 | else: 50 | raise ValueError(f'Invalid tree mode {tree_mode}. Allowed values are \'mean\' and \'max\'') 51 | 52 | self._slice = self.new_empty_slice() 53 | if self.mode == 'constant': 54 | self.recon = reconstruct_constant 55 | elif self.mode == 'smooth': 56 | self.recon = reconstruct_smooth 57 | elif self.mode == 'level': 58 | self.recon = reconstruct_level 59 | else: 60 | raise ValueError(f'Invalid mode {mode}. Allowed values are \'constant\', \'smooth\' and \'level\'') 61 | 62 | @property 63 | def shape(self): 64 | return self.dims[2], self.dims[1], self.dims[0] 65 | 66 | @property 67 | def ndim(self): 68 | return 3 69 | 70 | def __array__(self): 71 | # allows things like np.max(APRSlicer) 72 | return np.array(self.parts) 73 | 74 | def astype(self, typespec): 75 | parts = type(type_to_particles(typespec))(np.array(self.parts).astype(typespec)) 76 | return APRSlicer(self.apr, parts, mode=self.mode, level_delta=self.patch.level_delta, tree_mode='max' if typespec in [int, np.int32, 'int', 'int32'] else 'mean') 77 | 78 | def new_empty_slice(self): 79 | return np.zeros((self.patch.z_end-self.patch.z_begin, 80 | self.patch.x_end-self.patch.x_begin, 81 | self.patch.y_end-self.patch.y_begin), 82 | dtype=self.dtype) 83 | 84 | def update_dims(self): 85 | self.dims = [int(np.ceil(self.apr.org_dims(x) * pow(2, self.patch.level_delta))) for x in range(3)] 86 | 87 | def set_level_delta(self, level_delta): 88 | self.patch.level_delta = level_delta 89 | self.update_dims() 90 | 91 | def reconstruct(self): 92 | if self.mode == 'level': 93 | self._slice = self.recon(self.apr, patch=self.patch, out_arr=self._slice) 94 | else: 95 | self._slice = self.recon(self.apr, self.parts, tree_parts=self.tree_parts, patch=self.patch, out_arr=self._slice) 96 | return self._slice.squeeze() 97 | 98 | def __getitem__(self, item): 99 | if isinstance(item, slice): 100 | self.patch.x_begin, self.patch.x_end, self.patch.y_begin, self.patch.y_end = [0, -1, 0, -1] 101 | self.patch.z_begin = int(item.start) if item.start is not None else -1 102 | self.patch.z_end = int(item.stop) if item.stop is not None else -1 103 | elif isinstance(item, tuple): 104 | limits = [-1, -1, -1, -1, -1, -1] 105 | for i in range(len(item)): 106 | if isinstance(item[i], slice): 107 | limits[2*i] = int(item[i].start) if item[i].start is not None else -1 108 | limits[2*i+1] = int(item[i].stop) if item[i].stop is not None else -1 109 | elif isinstance(item[i], Integral): 110 | limits[2*i] = item[i] 111 | limits[2*i+1] = item[i]+1 112 | elif isinstance(item[i], float): 113 | limits[2*i] = int(item[i]) 114 | limits[2*i+1] = int(item[i]+1) 115 | self.patch.z_begin, self.patch.z_end, self.patch.x_begin, self.patch.x_end, self.patch.y_begin, self.patch.y_end = limits 116 | else: 117 | self.patch.x_begin, self.patch.x_end, self.patch.y_begin, self.patch.y_end = [0, -1, 0, -1] 118 | self.patch.z_begin = int(item) 119 | self.patch.z_end = int(item+1) 120 | self.patch.check_limits(self.apr) 121 | return self.reconstruct() 122 | -------------------------------------------------------------------------------- /.github/workflows/quick-test.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: Quick Test Python 4 | 5 | # Controls when the workflow will run 6 | on: 7 | # Triggers the workflow on push or pull request events but only for the master branch 8 | push: 9 | 10 | # Allows you to run this workflow manually from the Actions tab 11 | workflow_dispatch: 12 | 13 | # for deploying docs to gh-pages branch 14 | permissions: 15 | contents: write 16 | 17 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 18 | jobs: 19 | build-test: 20 | name: ${{ matrix.os }}-build-and-test 21 | runs-on: ${{ matrix.os }} 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | os: [ubuntu-latest] 26 | include: 27 | - os: ubuntu-latest 28 | triplet: x64-linux 29 | extblosc: OFF 30 | openmp: ON 31 | buildfolder: build/temp.linux-x86_64-3.9 32 | 33 | env: 34 | # Indicates the CMake build directory where project files and binaries are being produced. 35 | CMAKE_BUILD_DIR: ${{ github.workspace }}/${{ matrix.buildfolder }} 36 | # Indicates the location of the vcpkg as a Git submodule of the project repository. 37 | VCPKG_ROOT: ${{ github.workspace }}/external/LibAPR/vcpkg 38 | EXTRA_CMAKE_ARGS: "-DCMAKE_TOOLCHAIN_FILE='${{ github.workspace }}/external/LibAPR/vcpkg/scripts/buildsystems/vcpkg.cmake'" 39 | steps: 40 | - uses: actions/checkout@v4 41 | with: 42 | fetch-depth: 0 43 | submodules: true 44 | 45 | - name: Submodule recursive 46 | run: git submodule update --init --recursive 47 | 48 | # Setup the build machine with the most recent versions of CMake and Ninja. Both are cached if not already: on subsequent runs both will be quickly restored from GitHub cache service. 49 | - uses: lukka/get-cmake@latest 50 | # Restore both vcpkg and its artifacts from the GitHub cache service. 51 | - name: Restore vcpkg and its artifacts. 52 | uses: actions/cache@v3 53 | with: 54 | # The first path is where vcpkg generates artifacts while consuming the vcpkg.json manifest file. 55 | # The second path is the location of vcpkg (it contains the vcpkg executable and data files). 56 | # The other paths starting with '!' are exclusions: they contain termporary files generated during the build of the installed packages. 57 | path: | 58 | ${{ env.CMAKE_BUILD_DIR }}/vcpkg_installed/ 59 | ${{ env.VCPKG_ROOT }} 60 | !${{ env.VCPKG_ROOT }}/buildtrees 61 | !${{ env.VCPKG_ROOT }}/packages 62 | !${{ env.VCPKG_ROOT }}/downloads 63 | # The key is composed in a way that it gets properly invalidated: this must happen whenever vcpkg's Git commit id changes, or the list of packages changes. In this case a cache miss must happen and a new entry with a new key with be pushed to GitHub the cache service. 64 | # The key includes: hash of the vcpkg.json file, the hash of the vcpkg Git commit id, and the used vcpkg's triplet. The vcpkg's commit id would suffice, but computing an hash out it does not harm. 65 | # Note: given a key, the cache content is immutable. If a cache entry has been created improperly, in order the recreate the right content the key must be changed as well, and it must be brand new (i.e. not existing already). 66 | key: | 67 | ${{ hashFiles( '${{ env.VCPKG_ROOT }}/.git/modules/vcpkg/HEAD' )}}-${{ matrix.triplet }}-invalidate 68 | 69 | - name: Show content of workspace after cache has been restored 70 | run: find $RUNNER_WORKSPACE 71 | shell: bash 72 | 73 | # On Windows runners, let's ensure to have the Developer Command Prompt environment setup correctly. As used here the Developer Command Prompt created is targeting x64 and using the default the Windows SDK. 74 | - uses: ilammy/msvc-dev-cmd@v1 75 | 76 | - uses: actions/setup-python@v4 77 | name: Install Python 78 | with: 79 | python-version: '3.9' 80 | 81 | - name: Check file existence 82 | id: check_files 83 | uses: andstor/file-existence-action@v2 84 | with: 85 | files: "${{ env.VCPKG_ROOT }}/vcpkg" 86 | 87 | - name: VCPKG setup 88 | if: steps.check_files.outputs.files_exists == 'false' 89 | run: | 90 | ${{ env.VCPKG_ROOT }}/bootstrap-vcpkg.sh 91 | 92 | - name: VCPKG install package 93 | run: | 94 | ${{ env.VCPKG_ROOT }}/vcpkg install tiff blosc hdf5 szip --triplet=${{ matrix.triplet }} 95 | 96 | # Build with python 97 | - name: Build pyapr wheel 98 | run: | 99 | python -m venv venv 100 | source venv/bin/activate 101 | pip install wheel setuptools setuptools_scm 102 | python3 setup.py bdist_wheel -b ${{ env.CMAKE_BUILD_DIR }} 103 | 104 | - name: Install pyapr from wheel 105 | run: | 106 | pip install dist/pyapr*.whl 107 | 108 | # these libraries, along with pytest-xvfb enable testing Qt on linux 109 | - name: Install Qt libraries 110 | uses: tlambert03/setup-qt-libs@v1 111 | 112 | - name: Run tests 113 | run: | 114 | pip install pytest pytest-cov pytest-qt pytest-xvfb 115 | pytest -vv --cov-report xml --cov=pyapr 116 | 117 | - name: Upload coverage report 118 | uses: codecov/codecov-action@v3 119 | 120 | - name: Build documentation 121 | run: | 122 | pip install sphinx sphinx_rtd_theme myst_parser 123 | cd docs 124 | make html 125 | -------------------------------------------------------------------------------- /pyapr/tests/test_filter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pyapr 3 | from .helpers import load_test_apr 4 | import numpy as np 5 | 6 | 7 | PARTICLE_TYPES = [ 8 | pyapr.ByteParticles, 9 | pyapr.ShortParticles, 10 | pyapr.FloatParticles, 11 | pyapr.LongParticles 12 | ] 13 | 14 | 15 | @pytest.mark.filterwarnings("ignore:Method \'cuda\'") 16 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 17 | @pytest.mark.parametrize("stencil_shape", [(5, 5, 5), (1, 5, 7), (1, 1, 13)]) 18 | def test_convolution(parts_type, stencil_shape): 19 | apr, parts = load_test_apr(3) 20 | parts = parts_type(parts) 21 | 22 | stencil = np.arange(1, np.prod(stencil_shape)+1).reshape(stencil_shape) 23 | stencil = stencil / np.sum(stencil) 24 | 25 | for op in (pyapr.filter.convolve, pyapr.filter.correlate): 26 | res1 = op(apr, parts, stencil, method='pencil') 27 | 28 | res2 = pyapr.FloatParticles(apr.total_number_particles()) 29 | res2 = op(apr, parts, stencil, method='slice', output=res2) 30 | assert np.allclose(np.array(res1, copy=False), np.array(res2, copy=False)) 31 | 32 | res2 = op(apr, parts, stencil, method='cuda') 33 | assert np.allclose(np.array(res1, copy=False), np.array(res2, copy=False)) 34 | 35 | with pytest.raises(ValueError): 36 | # unsupported method 37 | res = op(apr, parts, stencil, method='does-not-exist') 38 | 39 | with pytest.raises(TypeError): 40 | # unsupported parts type 41 | res = op(apr, (1, 2, 3), stencil) 42 | 43 | 44 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 45 | def test_gradient_filters(parts_type): 46 | apr, parts = load_test_apr(3) 47 | parts = parts_type(parts) 48 | 49 | dy = pyapr.filter.gradient(apr, parts, dim=0, delta=0.7) 50 | dx = pyapr.filter.gradient(apr, parts, dim=1, delta=0.9) 51 | dz = pyapr.filter.gradient(apr, parts, dim=2, delta=1.1) 52 | gradmag_manual = np.sqrt(np.array(dz*dz + dx*dx + dy*dy)) 53 | 54 | gradmag = pyapr.filter.gradient_magnitude(apr, parts, deltas=(0.7, 0.9, 1.1)) 55 | assert np.allclose(np.array(gradmag), gradmag_manual) 56 | 57 | dy = pyapr.filter.sobel(apr, parts, dim=0, delta=1.3) 58 | dx = pyapr.filter.sobel(apr, parts, dim=1, delta=1.5) 59 | dz = pyapr.filter.sobel(apr, parts, dim=2, delta=0.9) 60 | gradmag_manual = np.sqrt(np.array(dz*dz + dx*dx + dy*dy)) 61 | 62 | gradmag = pyapr.filter.sobel_magnitude(apr, parts, deltas=(1.3, 1.5, 0.9)) 63 | assert np.allclose(np.array(gradmag), gradmag_manual) 64 | 65 | with pytest.raises(ValueError): 66 | # invalid dim argument 67 | pyapr.filter.gradient(apr, parts, dim=3) 68 | 69 | with pytest.raises(ValueError): 70 | # invalid dim argument 71 | pyapr.filter.sobel(apr, parts, dim=3) 72 | 73 | with pytest.raises(ValueError): 74 | # invalid deltas argument (must be length 3) 75 | pyapr.filter.gradient_magnitude(apr, parts, deltas=(1, 1)) 76 | 77 | with pytest.raises(ValueError): 78 | # invalid deltas argument (must be length 3) 79 | pyapr.filter.sobel_magnitude(apr, parts, deltas=(2, )) 80 | 81 | 82 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 83 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 84 | def test_gradient_manual(parts_type, ndim): 85 | apr, parts = load_test_apr(ndim) 86 | parts = parts_type(parts) 87 | 88 | # compute y gradient 89 | grad = pyapr.filter.gradient(apr, parts, dim=0) 90 | 91 | # compute gradient using correlate 92 | stencil = np.array([-1, 0, 1]).reshape(1, 1, 3) / 2 93 | grad_manual = pyapr.filter.correlate(apr, parts, stencil, rescale_stencil=True) 94 | 95 | assert np.allclose(np.array(grad), np.array(grad_manual)) 96 | 97 | 98 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 99 | def test_sobel_manual(parts_type): 100 | apr, parts = load_test_apr(3) 101 | parts = parts_type(parts) 102 | 103 | # compute sobel gradient 104 | grad = pyapr.filter.sobel(apr, parts, dim=0) 105 | 106 | # compute gradient using correlate 107 | stencil = np.outer(np.outer([1, 2, 1], [1, 2, 1]), [-1, 0, 1]).reshape(3, 3, 3) / 32 108 | methods = ['slice', 'pencil', 'cuda'] if pyapr.cuda_enabled() else ['slice', 'pencil'] 109 | for method in methods: 110 | grad_manual = pyapr.filter.correlate(apr, parts, stencil, rescale_stencil=True, method=method) 111 | assert np.allclose(np.array(grad), np.array(grad_manual)) 112 | 113 | 114 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 115 | @pytest.mark.parametrize("filter_size", [(3, 3, 3), (1, 5, 5)]) 116 | def test_rank_filters(parts_type, filter_size): 117 | apr, parts = load_test_apr(3) 118 | parts = parts_type(parts) 119 | 120 | med_output = pyapr.filter.median_filter(apr, parts, filter_size) 121 | min_output = pyapr.filter.min_filter(apr, parts, filter_size) 122 | max_output = pyapr.filter.max_filter(apr, parts, filter_size) 123 | 124 | assert med_output[9123] in parts 125 | assert min_output.min() == parts.min() 126 | assert max_output.max() == parts.max() 127 | 128 | with pytest.raises(ValueError): 129 | # unsupported filter size 130 | res = pyapr.filter.median_filter(apr, parts, (1, 97, 13)) 131 | 132 | 133 | @pytest.mark.parametrize("parts_type", PARTICLE_TYPES) 134 | @pytest.mark.parametrize("filter_size", [3, (1, 7, 9), [3, 3, 5]]) 135 | def test_std_filter(parts_type, filter_size): 136 | apr, parts = load_test_apr(3) 137 | parts = parts_type(parts) 138 | output = pyapr.filter.std(apr, parts, filter_size) 139 | 140 | with pytest.raises(ValueError): 141 | # invalid filter specification (must be int or length 3) 142 | pyapr.filter.std(apr, parts, (1, 5)) 143 | -------------------------------------------------------------------------------- /.github/workflows/deploy-docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy documentation 2 | on: 3 | push: 4 | tags: 5 | - 'v[0-9]+.[0-9]+.[0-9]+' # only run on semantic version tags 6 | - 'v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+' # pre-release 7 | 8 | # need write permission to push to gh-pages branch 9 | permissions: 10 | contents: write 11 | 12 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 13 | jobs: 14 | deploy-docs: 15 | name: build and deploy docs 16 | runs-on: ${{ matrix.os }} 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | os: [ubuntu-latest] 21 | include: 22 | - os: ubuntu-latest 23 | triplet: x64-linux 24 | extblosc: OFF 25 | openmp: ON 26 | buildfolder: build/temp.linux-x86_64-3.9 27 | 28 | env: 29 | # Indicates the CMake build directory where project files and binaries are being produced. 30 | CMAKE_BUILD_DIR: ${{ github.workspace }}/${{ matrix.buildfolder }} 31 | # Indicates the location of the vcpkg as a Git submodule of the project repository. 32 | VCPKG_ROOT: ${{ github.workspace }}/external/LibAPR/vcpkg 33 | EXTRA_CMAKE_ARGS: "-DCMAKE_TOOLCHAIN_FILE='${{ github.workspace }}/external/LibAPR/vcpkg/scripts/buildsystems/vcpkg.cmake'" 34 | steps: 35 | - uses: actions/checkout@v4 36 | with: 37 | fetch-depth: 0 38 | submodules: true 39 | 40 | - name: Submodule recursive 41 | run: git submodule update --init --recursive 42 | 43 | # Setup the build machine with the most recent versions of CMake and Ninja. Both are cached if not already: on subsequent runs both will be quickly restored from GitHub cache service. 44 | - uses: lukka/get-cmake@latest 45 | # Restore both vcpkg and its artifacts from the GitHub cache service. 46 | - name: Restore vcpkg and its artifacts. 47 | uses: actions/cache@v3 48 | with: 49 | # The first path is where vcpkg generates artifacts while consuming the vcpkg.json manifest file. 50 | # The second path is the location of vcpkg (it contains the vcpkg executable and data files). 51 | # The other paths starting with '!' are exclusions: they contain termporary files generated during the build of the installed packages. 52 | path: | 53 | ${{ env.CMAKE_BUILD_DIR }}/vcpkg_installed/ 54 | ${{ env.VCPKG_ROOT }} 55 | !${{ env.VCPKG_ROOT }}/buildtrees 56 | !${{ env.VCPKG_ROOT }}/packages 57 | !${{ env.VCPKG_ROOT }}/downloads 58 | # The key is composed in a way that it gets properly invalidated: this must happen whenever vcpkg's Git commit id changes, or the list of packages changes. In this case a cache miss must happen and a new entry with a new key with be pushed to GitHub the cache service. 59 | # The key includes: hash of the vcpkg.json file, the hash of the vcpkg Git commit id, and the used vcpkg's triplet. The vcpkg's commit id would suffice, but computing an hash out it does not harm. 60 | # Note: given a key, the cache content is immutable. If a cache entry has been created improperly, in order the recreate the right content the key must be changed as well, and it must be brand new (i.e. not existing already). 61 | key: | 62 | ${{ hashFiles( '${{ env.VCPKG_ROOT }}/.git/modules/vcpkg/HEAD' )}}-${{ matrix.triplet }}-invalidate 63 | 64 | - name: Show content of workspace after cache has been restored 65 | run: find $RUNNER_WORKSPACE 66 | shell: bash 67 | 68 | # On Windows runners, let's ensure to have the Developer Command Prompt environment setup correctly. As used here the Developer Command Prompt created is targeting x64 and using the default the Windows SDK. 69 | - uses: ilammy/msvc-dev-cmd@v1 70 | 71 | - uses: actions/setup-python@v4 72 | name: Install Python 73 | with: 74 | python-version: '3.9' 75 | 76 | - name: Check file existence 77 | id: check_files 78 | uses: andstor/file-existence-action@v2 79 | with: 80 | files: "${{ env.VCPKG_ROOT }}/vcpkg" 81 | 82 | - name: VCPKG setup 83 | if: steps.check_files.outputs.files_exists == 'false' 84 | run: | 85 | ${{ env.VCPKG_ROOT }}/bootstrap-vcpkg.sh 86 | 87 | - name: VCPKG install dependencies 88 | run: | 89 | ${{ env.VCPKG_ROOT }}/vcpkg install tiff blosc hdf5 szip --triplet=${{ matrix.triplet }} 90 | 91 | - name: Build wheel 92 | run: | 93 | python -m venv venv 94 | source venv/bin/activate 95 | pip install wheel setuptools setuptools_scm 96 | python3 setup.py bdist_wheel -b ${{ env.CMAKE_BUILD_DIR }} 97 | 98 | - name: Install pyapr 99 | run: | 100 | pip install dist/pyapr*.whl 101 | 102 | # these libraries, along with pytest-xvfb enable testing Qt on linux 103 | - name: Install Qt libraries 104 | uses: tlambert03/setup-qt-libs@v1 105 | 106 | - name: Run tests 107 | run: | 108 | pip install pytest pytest-qt pytest-xvfb 109 | pytest -vv 110 | 111 | - name: Build documentation 112 | run: | 113 | pip install sphinx sphinx_rtd_theme myst_parser 114 | cd docs 115 | make html 116 | 117 | - name: Get version 118 | id: get_version 119 | run: | 120 | echo "VERSION_TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT 121 | 122 | - name: Deploy documentation 123 | uses: JamesIves/github-pages-deploy-action@v4 124 | with: 125 | branch: gh-pages # The branch the action should deploy to. 126 | folder: docs/build/html # The folder the action should deploy. 127 | target-folder: ${{ steps.get_version.outputs.VERSION_TAG }} 128 | 129 | -------------------------------------------------------------------------------- /pyapr/viewer/compressInteractive.py: -------------------------------------------------------------------------------- 1 | from pyqtgraph.Qt import QtCore, QtWidgets 2 | import pyqtgraph as pg 3 | from _pyaprwrapper.data_containers import APR, ShortParticles 4 | from _pyaprwrapper.viewer import compress_and_fill_slice 5 | from .partsViewer import MainWindow 6 | from .._common import _check_input 7 | 8 | 9 | class CustomSlider: 10 | def __init__(self, window, label_name): 11 | 12 | self.win_ref = window 13 | self.label_name = label_name 14 | 15 | self.label = QtWidgets.QLabel(window) 16 | 17 | self.maxBox = QtWidgets.QSpinBox(window) 18 | self.maxBox.setMaximum(64000) 19 | self.maxBox.setValue(100) 20 | self.maxBox.valueChanged.connect(self.updateRange) 21 | 22 | self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, window) 23 | self.slider.valueChanged.connect(self.updateText) 24 | self.slider.setValue(1) 25 | 26 | self.sz_label = 120 27 | self.sz_slider = 200 28 | self.sz_box = 75 29 | 30 | def move(self, loc1, loc2): 31 | 32 | self.label.move(loc1, loc2-5) 33 | self.label.setFixedWidth(self.sz_label) 34 | 35 | self.slider.move(loc1 + self.sz_label, loc2) 36 | self.slider.setFixedWidth(self.sz_slider) 37 | self.maxBox.move(loc1 + self.sz_slider + self.sz_label + 5, loc2-5) 38 | self.maxBox.setFixedWidth(self.sz_box) 39 | 40 | def updateRange(self): 41 | max = self.maxBox.value() 42 | self.slider.setMaximum(max) 43 | self.slider.setTickInterval(1) 44 | 45 | def connectSlider(self, function): 46 | self.slider.valueChanged.connect(function) 47 | 48 | def updateText(self): 49 | text_str = self.label_name + ": " + str(self.slider.value()) 50 | self.label.setText(text_str) 51 | 52 | 53 | class CompressWindow(MainWindow): 54 | 55 | def __init__(self): 56 | super(CompressWindow, self).__init__() 57 | 58 | self.exit_button = QtWidgets.QPushButton('Use Parameters', self) 59 | self.exit_button.setFixedWidth(300) 60 | self.exit_button.move(500, 10) 61 | self.exit_button.clicked.connect(self.exitPressed) 62 | 63 | self.max_label = QtWidgets.QLabel(self) 64 | self.max_label.setText("Slider Max") 65 | self.max_label.move(520, 40) 66 | 67 | self.slider_q = CustomSlider(self, "quantization") 68 | self.slider_q.move(200, 70) 69 | self.slider_q.connectSlider(self.valuechangeQ) 70 | self.slider_q.maxBox.setValue(20) 71 | self.slider_q.slider.setSingleStep(0.1) 72 | 73 | self.slider_B = CustomSlider(self, "background") 74 | self.slider_B.move(200, 100) 75 | self.slider_B.connectSlider(self.valuechangeB) 76 | self.slider_B.maxBox.setValue(1000) 77 | 78 | self.toggle_on = QtWidgets.QCheckBox(self) 79 | self.toggle_on.setText("Compress") 80 | self.toggle_on.move(605, 65) 81 | self.toggle_on.setChecked(True) 82 | self.toggle_on.stateChanged.connect(self.toggleCompression) 83 | 84 | def toggleCompression(self): 85 | if self.toggle_on.isChecked(): 86 | self.valuechangeQ() 87 | self.valuechangeB() 88 | else: 89 | self.parts_ref.set_quantization_factor(0) 90 | force_update = self.current_view 91 | self.current_view = -1 92 | self.update_slice(force_update) 93 | 94 | def exitPressed(self): 95 | self.app_ref.exit() 96 | 97 | def valuechangeQ(self): 98 | if self.toggle_on.isChecked(): 99 | size = self.slider_q.slider.value() 100 | self.parts_ref.set_quantization_factor(size) 101 | force_update = self.current_view 102 | self.current_view = -1 103 | self.update_slice(force_update) 104 | 105 | def valuechangeB(self): 106 | if self.toggle_on.isChecked(): 107 | size = self.slider_B.slider.value() 108 | self.parts_ref.set_background(size) 109 | force_update = self.current_view 110 | self.current_view = -1 111 | self.update_slice(force_update) 112 | 113 | def update_slice(self, new_view): 114 | if (new_view >= 0) & (new_view < self.z_num): 115 | # now update the view 116 | for l in range(self.level_min, self.level_max + 1): 117 | # loop over levels of the APR 118 | sz = pow(2, self.level_max - l) 119 | 120 | curr_z = int(new_view/sz) 121 | prev_z = int(self.current_view/sz) 122 | 123 | if prev_z != curr_z: 124 | compress_and_fill_slice(self.aAPR_ref, self.parts_ref, self.array_list[l], curr_z, l) 125 | 126 | self.img_list[l].setImage(self.array_list[l], False) 127 | 128 | img_sz_x = self.scale_sc * self.array_list[l].shape[1] * sz 129 | img_sz_y = self.scale_sc * self.array_list[l].shape[0] * sz 130 | 131 | self.img_list[l].setRect(QtCore.QRectF(self.min_x, self.min_y, img_sz_x, img_sz_y)) 132 | 133 | self.current_view = new_view 134 | # make the slider reflect the new value 135 | self.slider.setValue(new_view) 136 | self.updateSliceText(new_view) 137 | 138 | 139 | def interactive_compression(apr: APR, 140 | parts: ShortParticles): 141 | """ 142 | Spawns a viewer to interactively find compression parameters for particle intensities. 143 | Slide the `quantization` and `background` sliders to change the compression behavior. 144 | 145 | Note: the parameters are saved in the input `parts` object, and used in subsequent I/O calls, e.g. `pyapr.io.write`. 146 | 147 | Parameters 148 | ---------- 149 | apr: APR 150 | Input APR data structure. 151 | parts: ShortParticles 152 | Input particle intensity values. 153 | """ 154 | _check_input(apr, parts, (ShortParticles,)) 155 | pg.setConfigOption('background', 'w') 156 | pg.setConfigOption('foreground', 'k') 157 | pg.setConfigOption('imageAxisOrder', 'row-major') 158 | 159 | app = QtWidgets.QApplication.instance() 160 | if app is None: 161 | app = QtWidgets.QApplication([]) 162 | 163 | ## Create window with GraphicsView widget 164 | win = CompressWindow() 165 | win.app_ref = app 166 | win.init_APR(apr, parts) 167 | 168 | win.show() 169 | app.exec_() 170 | 171 | #turn on 172 | parts.set_compression_type(1) 173 | return None 174 | 175 | --------------------------------------------------------------------------------