├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
└── workflows
│ ├── ci.yml
│ └── deploy.yml
├── .gitignore
├── .readthedocs.yml
├── CITATION.cff
├── DATA
├── Curie.pgm
├── Einstein.pgm
└── lenna-256x256.tif
├── LICENSE
├── README.md
├── TESTS
├── .coveragerc
└── unitTests.py
├── TUTORIALS
├── 01_tools.ipynb
├── 02_pyramids.ipynb
└── 03_steerable_pyramids.ipynb
├── docs
├── Makefile
├── conf.py
├── developerguide.rst
├── index.rst
├── installation.rst
├── make.bat
├── quickstart.rst
└── tutorials
│ ├── 01_tools.nblink
│ ├── 02_pyramids.nblink
│ └── 03_steerable_pyramids.nblink
├── pyproject.toml
├── requirements.txt
├── setup.py
└── src
└── pyrtools
├── __init__.py
├── pyramids
├── GaussianPyramid.py
├── LaplacianPyramid.py
├── SteerablePyramidFreq.py
├── SteerablePyramidSpace.py
├── WaveletPyramid.py
├── __init__.py
├── c
│ ├── __init__.py
│ ├── convolve.c
│ ├── convolve.h
│ ├── edges.c
│ ├── internal_pointOp.c
│ ├── internal_pointOp.h
│ ├── meta.h
│ ├── py.c
│ ├── wrap.c
│ └── wrapper.py
├── filters.py
├── pyr_utils.py
├── pyramid.py
└── steer.py
└── tools
├── __init__.py
├── compare_matpyrtools.py
├── convolutions.py
├── display.py
├── image_stats.py
├── synthetic_images.py
└── utils.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Please provide a short, reproducible example of the error, for example:
15 | ```
16 | import pyrtools as pt
17 | import imageio
18 |
19 | img = imageio.imread('DATA/Einstein.pgm'), dtype=torch.float32)
20 | pyr = pt.pyramids.LaplacianPyramid(img)
21 | # this raises an error
22 | recon_img = pyr.recon_pyr()
23 | ```
24 |
25 | **Expected behavior**
26 | A clear and concise description of what you expected to happen.
27 |
28 | **Screenshots**
29 | If applicable, add screenshots to help explain your problem.
30 |
31 | **System (please complete the following information):**
32 | - OS: [e.g. Mac (with version), Ubuntu 18.04]
33 | - Python version [e.g. 3.11]
34 | - Pyrtools version [e.g. 1.0.1]
35 |
36 | **Additional context**
37 | Add any other context about the problem here.
38 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: build
2 | on:
3 | workflow_dispatch:
4 | schedule:
5 | - cron: 0 0 * * 0 # weekly
6 | pull_request:
7 | branches:
8 | - main
9 | push:
10 | branches:
11 | - main
12 |
13 | jobs:
14 | # based on https://slashgear.github.io/how-to-split-test-by-folder-with-github-action/
15 | get_notebooks:
16 | runs-on: ubuntu-latest
17 | outputs:
18 | notebook: ${{ steps.get-notebooks.outputs.nb }}
19 | steps:
20 | - uses: actions/checkout@v4
21 | - id: get-notebooks
22 | # it's weird to me, but the quotes around \n should *not* be escaped or it breaks
23 | run: "echo \"nb=$(ls TUTORIALS/*ipynb | jq -R -s -c 'split(\"\\n\")[:-1]')\"\
24 | \ >> $GITHUB_OUTPUT\n"
25 | notebooks:
26 | runs-on: ubuntu-latest
27 | needs: [get_notebooks]
28 | strategy:
29 | matrix:
30 | python-version: [3.8, 3.9, '3.10', '3.11', '3.12']
31 | notebook: ${{fromJson(needs.get_notebooks.outputs.notebook)}}
32 | fail-fast: false
33 | name: Execute notebooks
34 | steps:
35 | - uses: actions/checkout@v4
36 | - uses: actions/setup-python@v5
37 | with:
38 | python-version: ${{ matrix.python-version }}
39 | cache: pip
40 | cache-dependency-path: setup.py
41 | - name: Setup FFmpeg
42 | uses: FedericoCarboni/setup-ffmpeg@v3.1
43 | - name: Install dependencies
44 | # nbclient 0.5.5 is the first version that includes jupyter execute
45 | run: |
46 | pip install --upgrade --upgrade-strategy eager .
47 | pip install jupyter ipywidgets
48 | pip install "nbclient>=0.5.5"
49 | - name: Run notebooks
50 | run: jupyter execute ${{ matrix.notebook }} --kernel_name=python3
51 | tests:
52 | runs-on: ${{matrix.os}}
53 | strategy:
54 | matrix:
55 | os: [ubuntu-latest, macos-latest, windows-latest]
56 | python-version: [3.8, 3.9, '3.10', '3.11', '3.12']
57 | fail-fast: false
58 | name: Run tests
59 | steps:
60 | - uses: actions/checkout@v4
61 | - name: Install Python 3
62 | uses: actions/setup-python@v5
63 | with:
64 | python-version: ${{ matrix.python-version }}
65 | cache: pip
66 | cache-dependency-path: setup.py
67 | - name: Install dependencies
68 | run: |
69 | # using the --upgrade and --upgrade-strategy eager flags ensures that
70 | # pip will always install the latest allowed version of all
71 | # dependencies, to make sure the cache doesn't go stale
72 | pip install --upgrade --upgrade-strategy eager .
73 | pip install coverage
74 | - name: Run tests
75 | run: |
76 | # for some reason, need to run this in the TESTS dir in order to get
77 | # coverage to work (I couldn't get an analogous .coveragerc working in
78 | # the root directory)
79 | cd TESTS && coverage run unitTests.py
80 | # generate the xml file and move it to root dir for codecov
81 | coverage xml -o ../coverage.xml
82 | - name: Upload coverage to Codecov
83 | uses: codecov/codecov-action@a079530fc142d3d288ddf76321ca0b7fe5b18df5 # v4.4.1
84 | with:
85 | token: ${{ secrets.CODECOV_TOKEN }}
86 | all_tutorials_in_docs:
87 | runs-on: ubuntu-latest
88 | name: Check that all tutorial notebooks are included in docs
89 | needs: [get_notebooks]
90 | strategy:
91 | matrix:
92 | notebook: ${{fromJson(needs.get_notebooks.outputs.notebook)}}
93 | steps:
94 | - uses: actions/checkout@v4
95 | - name: Check for file
96 | shell: bash
97 | run: if [[ -z "$(grep ${{ matrix.notebook }} docs/tutorials/*nblink)" ]] ; then
98 | exit 1; fi
99 | no_extra_nblinks:
100 | runs-on: ubuntu-latest
101 | name: Check that we don't have any extra nblink files
102 | steps:
103 | - uses: actions/checkout@v4
104 | - name: Check same number of nblink and notebooks
105 | shell: bash
106 | run: |
107 | n_nblink=0; for file in docs/tutorials/*nblink; do let "n_nblink+=1"; done;
108 | n_ipynb=0; for file in TUTORIALS/*ipynb; do let "n_ipynb+=1"; done;
109 | if [[ $n_nblink != $n_ipynb ]]; then exit 1; fi;
110 |
111 | check:
112 | if: always()
113 | needs:
114 | - notebooks
115 | - tests
116 | runs-on: ubuntu-latest
117 | steps:
118 | - name: Decide whether all tests and notebooks succeeded
119 | uses: re-actors/alls-green@afee1c1eac2a506084c274e9c02c8e0687b48d9e # v1.2.2
120 | with:
121 | jobs: ${{ toJSON(needs) }}
122 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: deploy
2 | on:
3 | release:
4 | types: [published]
5 | workflow_dispatch: {}
6 |
7 | jobs:
8 | build-wheels:
9 | name: Make ${{ matrix.os }} wheels
10 | runs-on: ${{ matrix.os }}
11 | strategy:
12 | matrix:
13 | os: [macos-latest, ubuntu-latest, windows-latest]
14 | fail-fast: false
15 |
16 | steps:
17 | - uses: actions/checkout@v4
18 |
19 | - name: Build wheels
20 | uses: pypa/cibuildwheel@6a41245b42fcb325223b8793746f10456ed07436 # v2.23.2
21 | env:
22 | CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014
23 | CIBW_BUILD: cp37-* cp38-* cp39-* cp310-*
24 | CIBW_SKIP: '*musllinux*'
25 | CIBW_ARCHS: native
26 | CIBW_BUILD_FRONTEND: build
27 | CIBW_TEST_COMMAND: python {project}/TESTS/unitTests.py
28 | # cross-compilation for Apple Silicon:
29 | # https://cibuildwheel.readthedocs.io/en/stable/faq/#how-to-cross-compile
30 | CIBW_ARCHS_MACOS: x86_64 arm64
31 |
32 | - name: Upload wheel as artifact
33 | uses: actions/upload-artifact@v4
34 | with:
35 | name: artifact-${{ matrix.os }}-wheel
36 | path: ./**/*.whl
37 |
38 | build-sdist:
39 | name: Make source distribution
40 | runs-on: ubuntu-latest
41 | steps:
42 | - uses: actions/checkout@v4
43 |
44 | - run: pipx run build --sdist
45 |
46 | - uses: actions/upload-artifact@v4
47 | with:
48 | name: artifact-source-dist
49 | path: ./**/dist/*.tar.gz
50 |
51 | deploy:
52 | needs: [build-wheels, build-sdist]
53 | runs-on: ubuntu-latest
54 | if: github.event_name == 'release' && github.event.action == 'published'
55 | steps:
56 | - uses: actions/checkout@v4
57 | - name: Download all artifacts
58 | uses: actions/download-artifact@v4
59 | - name: Copy artifacts to dist/ folder
60 | run: |
61 | find . -name 'artifact-*' -exec unzip '{}' \;
62 | mkdir -p dist/
63 | find . -name '*.tar.gz' -exec mv '{}' dist/ \;
64 | find . -name '*.whl' -exec mv '{}' dist/ \;
65 | - name: Publish package to test pypi
66 | uses: pypa/gh-action-pypi-publish@v1.12.4
67 | with:
68 | user: __token__
69 | password: ${{ secrets.PYPI_API_TOKEN }}
70 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | # Windows
9 | *.exp
10 | *.obj
11 | *.lib
12 |
13 | # Distribution / packaging
14 | .Python
15 | env/
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | .DS_Store
32 | *-checkpoint.ipynb
33 | test_jimmy.py
34 | 00_tests_pe.ipynb
35 |
36 | TESTS/matFiles*
37 |
38 | docs/_build
39 | docs/api
40 |
41 | .idea
42 |
43 | # created automatically by setuptools.scm, don't track
44 | version.py
45 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the version of Python and other tools you might need
9 | build:
10 | os: ubuntu-22.04
11 | tools:
12 | python: "3.10"
13 |
14 | # Build documentation in the docs/ directory with Sphinx
15 | sphinx:
16 | configuration: docs/conf.py
17 |
18 | # Build documentation with MkDocs
19 | #mkdocs:
20 | # configuration: mkdocs.yml
21 |
22 | # Optionally build your docs in additional formats such as PDF and ePub
23 | formats:
24 | - htmlzip
25 |
26 | # Optionally set the version of Python and requirements required to build your docs
27 | python:
28 | install:
29 | - method: pip
30 | path: .
31 | extra_requirements:
32 | - docs
33 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.1.0
2 | message: "If you use any component of pyrtools, please cite it as below."
3 | authors:
4 | - family-names: Simoncelli
5 | given-names: Eero
6 | - family-names: Young
7 | given-names: Rob
8 | - family-names: Broderick
9 | given-names: William
10 | - family-names: Fiquet
11 | given-names: Pierre-Étienne
12 | - family-names: Wang
13 | given-names: Zhuo
14 | - family-names: Kadkhodaie
15 | given-names: Zahra
16 | - family-names: Parthasarathy
17 | given-names: Nikhil
18 | - family-names: Ward
19 | given-names: Brian
20 | title: "Pyrtools: tools for multi-scale image processing"
21 | version: v1.0.7
22 | date-released: 2023-11-20
23 | doi: 10.5281/zenodo.10161031
24 | url: "https://github.com/LabForComputationalVision/pyrtools"
25 |
--------------------------------------------------------------------------------
/DATA/Curie.pgm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LabForComputationalVision/pyrtools/d2ef019a5d8c16a52de597529a60bbdb2030e79c/DATA/Curie.pgm
--------------------------------------------------------------------------------
/DATA/Einstein.pgm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LabForComputationalVision/pyrtools/d2ef019a5d8c16a52de597529a60bbdb2030e79c/DATA/Einstein.pgm
--------------------------------------------------------------------------------
/DATA/lenna-256x256.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LabForComputationalVision/pyrtools/d2ef019a5d8c16a52de597529a60bbdb2030e79c/DATA/lenna-256x256.tif
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 LabForComputationalVision
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # pyrtools: tools for multi-scale image processing
2 |
3 | [](https://pypi.org/project/pyrtools/)
4 | [](https://anaconda.org/conda-forge/pyrtools)
5 | [](https://github.com/LabForComputationalVision/pyrtools/blob/main/LICENSE)
6 | 
7 | [](https://github.com/LabForComputationalVision/pyrtools/actions?query=workflow%3Abuild)
8 | [](https://pyrtools.readthedocs.io/en/latest/?badge=latest)
9 | [](https://zenodo.org/doi/10.5281/zenodo.10161031)
10 | [](https://mybinder.org/v2/gh/LabForComputationalVision/pyrtools/v1.0.7?filepath=TUTORIALS%2F)
11 | [](https://codecov.io/gh/LabForComputationalVision/pyrtools)
12 |
13 | Briefly, the tools include:
14 | - Recursive multi-scale image decompositions (pyramids), including
15 | Laplacian pyramids, QMFs, Wavelets, and steerable pyramids. These
16 | operate on 1D or 2D signals of arbitrary dimension.
17 | - Fast 2D convolution routines, with subsampling and boundary-handling.
18 | - Fast point-operations, histograms, histogram-matching.
19 | - Fast synthetic image generation: sine gratings, zone plates, fractals, etc.
20 | - Display routines for images and pyramids. These include several
21 | auto-scaling options, rounding to integer zoom factors to avoid
22 | resampling artifacts, and useful labeling (dimensions and gray-range).
23 |
24 | This is a python 3 port of Eero Simoncelli's
25 | [matlabPyrTools](https://github.com/LabForComputationalVision/matlabPyrTools),
26 | but it does not attempt to recreate all of the matlab code from matlabPyrTools.
27 | The goal is to create a Python interface for the C code at the heart of
28 | matlabPyrTools.
29 |
30 | **NOTE**: If you are only interested in the complex steerable pyramid, we have a
31 | pytorch implementation in the
32 | [plenoptic](https://github.com/LabForComputationalVision/plenoptic/) package;
33 | the implementation in plenoptic is differentiable.
34 |
35 | # Citing us
36 |
37 | If you use `pyrtools` in a published academic article or presentation, please
38 | cite us! You can find the link to the most recent release on Zenodo
39 | [here](https://zenodo.org/doi/10.5281/zenodo.10161031) (though please specify
40 | the version you used not the most recent one!). You can also get a formatted
41 | citation at the top right of our [GitHub
42 | repo](https://github.com/LabForComputationalVision/pyrtools)
43 |
44 | # Installation
45 |
46 | You can install `pyrtools` using either pip:
47 |
48 | ```sh
49 | pip install pyrtools
50 | ```
51 |
52 | or conda:
53 |
54 | ```sh
55 | conda install pyrtools -c conda-forge
56 | ```
57 |
58 | You may also install from source, directly from the git repository. This is
59 | largely useful if you are seeking to modify the code or make contributions. To
60 | do so, clone the repository and run `pip install`. On Mac or Linux, that looks
61 | like:
62 |
63 | ``` sh
64 | git clone https://github.com/LabForComputationalVision/pyrtools.git
65 | cd pyrtools/
66 | pip install .
67 | ```
68 |
69 | You may also want an editable install, `pip install -e .`, in which case changes
70 | you make in the source code will be reflected in your install.
71 |
72 | # Pyramid resources
73 |
74 | If you would like to learn more about pyramids and why they're helpful
75 | for image processing, here are some resources to get you started:
76 |
77 | - Brian Wandell's [Foundations of
78 | Vision](https://foundationsofvision.stanford.edu/chapter-8-multiresolution-image-representations/),
79 | chapter 8 (the rest of the book is helpful if you want to
80 | understand the basics of the visual system).
81 | - [Adelson et al, 1984, "Pyramid methods in image
82 | processing".](http://persci.mit.edu/pub_pdfs/RCA84.pdf)
83 | - Notes from David Heeger on [steerable
84 | filters](http://www.cns.nyu.edu/~david/handouts/steerable.pdf)
85 | - Notes from Eero Simoncelli on [the Steerable
86 | Pyramid](http://www.cns.nyu.edu/~eero/STEERPYR/)
87 |
88 | # Usage:
89 |
90 | - load modules:
91 | ```
92 | import pyrtools as pt
93 | ```
94 |
95 | - create pyramid:
96 | ```
97 | pyr = pt.pyramids.LaplacianPyramid(img)
98 | ```
99 |
100 | - reconstruct image from pyramid:
101 | ```
102 | recon_img = pyr.recon_pyr()
103 | ```
104 |
105 | Please see `TUTORIALS/02_pyramids.ipynb` for more examples.
106 |
107 | # For developres
108 |
109 | ## Testing
110 |
111 | You can find unit tests in `TESTS/unitTests.py` and run them with `python
112 | TESTS/unitTests.py`.
113 |
114 | ## Build the documentation
115 |
116 | NOTE: If you just want to read the documentation, you do not need to
117 | do this; documentation is built automatically on
118 | [readthedocs](https://pyrtools.readthedocs.io/en/latest/).
119 |
120 | However, it can be built locally as well. You would do this if you've
121 | made changes locally to the documentation (or the docstrings) that you
122 | would like to examine before pushing.
123 |
124 | ```
125 | # create a new virtual environment and then...
126 | # install pyrtools with sphinx and documentation-related dependencies
127 | pip install -e .[docs]
128 | # build documentation
129 | cd docs/
130 | make html
131 | ```
132 |
133 | The index page of the documentation will then be located at
134 | `docs/_build/html/index.html`, open it in your browser to navigate
135 | around.
136 |
--------------------------------------------------------------------------------
/TESTS/.coveragerc:
--------------------------------------------------------------------------------
1 | [paths]
2 | source =
3 | ../pyrtools
4 | */site-packages/pyrtools
5 |
6 | [run]
7 | branch = True
8 | source = pyrtools
9 |
10 | [report]
11 | exclude_lines =
12 | if self.debug:
13 | pragma: no cover
14 | raise NotImplementedError
15 | if __name__ == .__main__.:
16 | ignore_errors = True
17 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = _build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | import os
16 | import sys
17 | sys.path.insert(0, os.path.abspath('..'))
18 | sys.path.insert(0, os.path.abspath('./tutorials/'))
19 |
20 |
21 | # -- General configuration ---------------------------------------------------
22 |
23 | # If your documentation needs a minimal Sphinx version, state it here.
24 | #
25 | # needs_sphinx = '1.0'
26 |
27 | # Add any Sphinx extension module names here, as strings. They can be
28 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
29 | # ones.
30 | extensions = [
31 | 'sphinx.ext.autodoc',
32 | 'sphinx.ext.coverage',
33 | 'sphinx.ext.mathjax',
34 | 'sphinx.ext.viewcode',
35 | 'sphinx.ext.githubpages',
36 | 'sphinx.ext.napoleon',
37 | 'numpydoc',
38 | 'nbsphinx',
39 | 'nbsphinx_link',
40 | 'sphinxcontrib.apidoc',
41 | ]
42 |
43 | # Add any paths that contain templates here, relative to this directory.
44 | templates_path = ['_templates']
45 |
46 | # The suffix(es) of source filenames.
47 | # You can specify multiple suffix as a list of string:
48 | #
49 | # source_suffix = ['.rst', '.md']
50 | source_suffix = '.rst'
51 |
52 | # The master toctree document.
53 | master_doc = 'index'
54 |
55 | # The language for content autogenerated by Sphinx. Refer to documentation
56 | # for a list of supported languages.
57 | #
58 | # This is also used if you do content translation via gettext catalogs.
59 | # Usually you set "language" from the command line for these cases.
60 | language = None
61 |
62 | # List of patterns, relative to source directory, that match files and
63 | # directories to ignore when looking for source files.
64 | # This pattern also affects html_static_path and html_extra_path.
65 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
66 |
67 | # The name of the Pygments (syntax highlighting) style to use.
68 | pygments_style = 'sphinx'
69 |
70 |
71 | # -- Project information -----------------------------------------------------
72 |
73 | project = 'pyrtools'
74 | copyright = '2019, Eero Simoncelli, Rob Young, William Broderick, Pierre-Étienne Fiquet, Zhuo Wang, Zahra Kadkhodaie, Nikhil Parthasarathy'
75 | author = 'Eero Simoncelli, Rob Young, William Broderick, Pierre-Étienne Fiquet, Zhuo Wang, Zahra Kadkhodaie, Nikhil Parthasarathy'
76 |
77 | # The short X.Y version
78 | version = ''
79 | # The full version, including alpha/beta/rc tags
80 | import pyrtools
81 | release = pyrtools.__version__
82 |
83 |
84 | # -- Options for HTML output -------------------------------------------------
85 |
86 | # The theme to use for HTML and HTML Help pages. See the documentation for
87 | # a list of builtin themes.
88 | #
89 | html_theme = 'sphinx_rtd_theme'
90 |
91 | # Theme options are theme-specific and customize the look and feel of a theme
92 | # further. For a list of options available for each theme, see the
93 | # documentation.
94 | #
95 |
96 | # these are for the alabaster theme
97 | # html_theme_options = {
98 | # 'description': 'Python tools for multi-scale image processing',
99 | # 'github_button': True,
100 | # 'github_type': 'star',
101 | # 'travis_button': True,
102 | # 'github_user': 'LabForComputationalVision',
103 | # 'github_repo': 'pyrtools',
104 | # 'github_banner': True,
105 | # 'page_width': '1200px',
106 | # 'sidebar_width': '300px',
107 | # 'fixed_sidebar': True
108 | # }
109 |
110 | # these are for the sphinx_rtd_theme
111 | html_theme_options = {
112 | 'display_version': True,
113 |
114 | }
115 |
116 | # Add any paths that contain custom static files (such as style sheets) here,
117 | # relative to this directory. They are copied after the builtin static files,
118 | # so a file named "default.css" will overwrite the builtin "default.css".
119 | html_static_path = ['_static']
120 |
121 | # Custom sidebar templates, must be a dictionary that maps document names
122 | # to template names.
123 | #
124 | # The default sidebars (for documents that don't match any pattern) are
125 | # defined by theme itself. Builtin themes are using these templates by
126 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
127 | # 'searchbox.html']``.
128 | #
129 | # html_sidebars = {}
130 |
131 |
132 | # -- Options for HTMLHelp output ---------------------------------------------
133 |
134 | # Output file base name for HTML help builder.
135 | htmlhelp_basename = 'pyrtoolsdoc'
136 |
137 |
138 | # -- Options for LaTeX output ------------------------------------------------
139 |
140 | latex_elements = {
141 | # The paper size ('letterpaper' or 'a4paper').
142 | #
143 | # 'papersize': 'letterpaper',
144 |
145 | # The font size ('10pt', '11pt' or '12pt').
146 | #
147 | # 'pointsize': '10pt',
148 |
149 | # Additional stuff for the LaTeX preamble.
150 | #
151 | # 'preamble': '',
152 |
153 | # Latex figure (float) alignment
154 | #
155 | # 'figure_align': 'htbp',
156 | }
157 |
158 | # Grouping the document tree into LaTeX files. List of tuples
159 | # (source start file, target name, title,
160 | # author, documentclass [howto, manual, or own class]).
161 | latex_documents = [
162 | (master_doc, 'pyrtools.tex', 'pyrtools Documentation',
163 | 'Eero Simoncelli, Rob Young, William Broderick, Pierre-Étienne Fiquet, Zhuo Wang, Zahra Kadkhodaie, Nikhil Parthasarathy', 'manual'),
164 | ]
165 |
166 |
167 | # -- Options for manual page output ------------------------------------------
168 |
169 | # One entry per manual page. List of tuples
170 | # (source start file, name, description, authors, manual section).
171 | man_pages = [
172 | (master_doc, 'pyrtools', 'pyrtools Documentation',
173 | [author], 1)
174 | ]
175 |
176 |
177 | # -- Options for Texinfo output ----------------------------------------------
178 |
179 | # Grouping the document tree into Texinfo files. List of tuples
180 | # (source start file, target name, title, author,
181 | # dir menu entry, description, category)
182 | texinfo_documents = [
183 | (master_doc, 'pyrtools', 'pyrtools Documentation',
184 | author, 'pyrtools', 'One line description of project.',
185 | 'Miscellaneous'),
186 | ]
187 |
188 |
189 | # -- Options for Epub output -------------------------------------------------
190 |
191 | # Bibliographic Dublin Core info.
192 | epub_title = project
193 |
194 | # The unique identifier of the text. This can be a ISBN number
195 | # or the project homepage.
196 | #
197 | # epub_identifier = ''
198 |
199 | # A unique identification for the text.
200 | #
201 | # epub_uid = ''
202 |
203 | # A list of files that should not be packed into the epub file.
204 | epub_exclude_files = ['search.html']
205 |
206 |
207 | # -- Extension configuration -------------------------------------------------
208 | apidoc_module_dir = "../src/pyrtools"
209 |
--------------------------------------------------------------------------------
/docs/developerguide.rst:
--------------------------------------------------------------------------------
1 | .. _dev-guide:
2 |
3 | Information for developers
4 | **************************
5 |
6 | Unit tests
7 | ==========
8 |
9 | For running all the unit tests and avoiding bugs, please simply run from the
10 | main folder::
11 |
12 | python TESTS/unitTests.py
13 |
14 | If all the tests pass, then you might be able to submit your Pull Request as explained
15 | in the next section!
16 |
17 | Proposing a Pull Request(PR)
18 | ============================
19 |
20 | Each PR must be documented using docstings and must run the unit
21 | tests. If you are adding new functionality, add a new test to
22 | `unitTests.py` and provide an example.
23 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. |pypi-shield| image:: https://img.shields.io/pypi/v/pyrtools.svg
2 | :target: https://pypi.org/project/pyrtools/
3 |
4 | .. |conda-shield| image:: https://anaconda.org/conda-forge/pyrtools/badges/version.svg
5 | :target: https://anaconda.org/conda-forge/pyrtools
6 |
7 | .. |license-shield| image:: https://img.shields.io/badge/license-MIT-yellow.svg
8 | :target: https://github.com/LabForComputationalVision/pyrtools/blob/main/LICENSE
9 |
10 | .. |python-version-shield| image:: https://img.shields.io/badge/python-3.8%7C3.9%7C3.10%7C3.11%7C3.12-blue.svg
11 |
12 | .. |build| image:: https://github.com/LabForComputationalVision/pyrtools/workflows/build/badge.svg
13 | :target: https://github.com/LabForComputationalVision/pyrtools/actions?query=workflow%3Abuild
14 |
15 | .. |binder| image:: https://mybinder.org/badge_logo.svg
16 | :target: https://mybinder.org/v2/gh/LabForComputationalVision/pyrtools/v1.0.7?filepath=TUTORIALS%2F
17 |
18 | .. |doi| image:: https://zenodo.org/badge/137527035.svg
19 | :target: https://zenodo.org/doi/10.5281/zenodo.10161031
20 |
21 | .. pyrtools documentation master file, created by
22 | sphinx-quickstart on Mon Mar 25 17:57:12 2019.
23 | You can adapt this file completely to your liking, but it should at least
24 | contain the root `toctree` directive.
25 |
26 | pyrtools
27 | ====================================
28 |
29 | |pypi-shield| |conda-shield| |license-shield| |python-version-shield| |build| |binder| |doi|
30 |
31 | Pyrtools is a python package for multi-scale image processing, adapted
32 | from Eero Simoncelli's `matlabPyrTools
33 | `_.
34 |
35 | The tools include:
36 | - Recursive multi-scale image decompositions (pyramids), including
37 | Laplacian pyramids, QMFs, Wavelets, and steerable pyramids. These
38 | operate on 1D or 2D signals of arbitrary dimension.
39 | - Fast 2D convolution routines, with subsampling and boundary-handling.
40 | - Fast point-operations, histograms, histogram-matching.
41 | - Fast synthetic image generation: sine gratings, zone plates, fractals, etc.
42 | - Display routines for images and pyramids. These include several
43 | auto-scaling options, rounding to integer zoom factors to avoid
44 | resampling artifacts, and useful labeling (dimensions and gray-range).
45 |
46 | **NOTE**: If you are only interested in the complex steerable pyramid, we have a pytorch implementation in the `plenoptic `_ package; the implementation in plenoptic is differentiable.
47 |
48 | Citing us
49 | ---------
50 |
51 | If you use ``pyrtools`` in a published academic article or presentation, please
52 | cite us! You can find the link to the most recent release on Zenodo `here
53 | `_ (though please specify the
54 | version you used not the most recent one!). You can also get a formatted
55 | citation at the top right of our `GitHub repo
56 | `_
57 |
58 | .. include:: quickstart.rst
59 |
60 | Pyramid resources
61 | ------------------
62 |
63 | If you would like to learn more about pyramids and why they're helpful
64 | for image processing, here are some resources to get you started:
65 |
66 | - Brian Wandell's `Foundations of Vision
67 | `_,
68 | chapter 8 (the rest of the book is helpful if you want to
69 | understand the basics of the visual system).
70 | - `Adelson et al, 1984, "Pyramid methods in image
71 | processing". `_
72 | - Notes from David Heeger on `steerable filters
73 | `_
74 | - Notes from Eero Simoncelli on `the Steerable Pyramid
75 | `_
76 |
77 |
78 | .. toctree::
79 | :maxdepth: 2
80 |
81 | installation
82 | developerguide
83 |
84 | .. toctree::
85 | :maxdepth: 2
86 | :caption: Tutorials
87 | :glob:
88 | :numbered:
89 |
90 | tutorials/*
91 |
92 | .. toctree::
93 | :caption: API Documentation
94 |
95 | api/modules
96 |
--------------------------------------------------------------------------------
/docs/installation.rst:
--------------------------------------------------------------------------------
1 | .. _install:
2 |
3 | Installation
4 | ************
5 |
6 | You can install pyrtools via the ``pip`` package management system, via
7 | ``conda``, or directly from source.
8 |
9 | .. attention:: Windows support was added in version 1.0.3. If you are on Windows and get an installation error, make sure you are installing the newest version.
10 |
11 | Recommended
12 | ===========
13 |
14 | We recommend using either ``pip`` or ``conda``.
15 |
16 | To install using ``pip``, run the following in a shell::
17 |
18 | pip install pyrtools
19 |
20 | To install using ``conda``, run the following in a shell::
21 |
22 | conda install pyrtools -c conda-forge
23 |
24 | From source
25 | ===========
26 |
27 | Obtain the latest version of pyrtools::
28 |
29 | git clone https://github.com/LabForComputationalVision/pyrtools
30 |
31 | (If you have already cloned the repo, you can update it with ``git pull``.)
32 |
33 | Finally, the package is installed by running::
34 |
35 | cd pyrtools
36 | pip install -e .
37 |
38 | This will install an editable version of the package, so changes made
39 | to the files within the pyrtools directory will be reflected in the
40 | version of pyrtools you use.
41 |
42 | When installing from source on Linux or Mac, we require ``gcc`` version >= 6 in
43 | order for the C code to compile, because of `this issue
44 | `_
45 |
46 | When installing from source on Windows, Microsoft Visual C++ 14.0 or greater is required, which can be obtained with `Microsoft C++ Build Tools `_.
47 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/quickstart.rst:
--------------------------------------------------------------------------------
1 | Quick Start
2 | *************
3 |
4 | Open a shell and run::
5 |
6 | pip install pyrtools
7 |
8 | More instructions available at :ref:`install`.
9 |
10 | In the python interpreter, then call::
11 |
12 | import pyrtools as pt
13 |
14 | Create pyramid::
15 |
16 | pyr = pt.pyramids.LaplacianPyramid(img)
17 |
18 | Reconstruct image from pyramid::
19 |
20 | recon_img = pyr.recon_pyr()
21 |
22 | For more details, see the jupyter notebooks included in the
23 | ``TUTORIALS/`` directory, static versions of which are linked in the
24 | navigation sidebar. You can play around with a live version of them in
25 | order to test out the code before downloading on `binder
26 | `_
27 |
--------------------------------------------------------------------------------
/docs/tutorials/01_tools.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../TUTORIALS/01_tools.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/tutorials/02_pyramids.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../TUTORIALS/02_pyramids.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/docs/tutorials/03_steerable_pyramids.nblink:
--------------------------------------------------------------------------------
1 | {
2 | "path": "../../TUTORIALS/03_steerable_pyramids.ipynb"
3 | }
4 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "pyrtools"
3 | dynamic = ["version"]
4 | authors = [{name="Pyrtools authors"}]
5 | description = "Python tools for multi-scale image processing, including Laplacian pyramids, Wavelets, and Steerable Pyramids."
6 | readme = "README.md"
7 | requires-python = ">=3.8"
8 | classifiers = [
9 | "Development Status :: 4 - Beta",
10 | "Programming Language :: Python :: 3",
11 | "License :: OSI Approved :: MIT License",
12 | "Intended Audience :: Science/Research",
13 | ]
14 | keywords = ['image processing', 'visual information processing', 'computational models']
15 |
16 | dependencies = ['numpy>=1.1',
17 | 'scipy>=0.18',
18 | 'matplotlib>=1.5',
19 | 'tqdm>=4.29',
20 | 'requests>=2.21']
21 |
22 | [project.optional-dependencies]
23 | docs = [
24 | 'sphinx',
25 | 'numpydoc',
26 | # because of this issue:
27 | # https://nbsphinx.readthedocs.io/en/0.6.0/installation.html#Pygments-Lexer-for-Syntax-Highlighting
28 | 'ipython',
29 | 'nbsphinx',
30 | 'nbsphinx_link',
31 | 'sphinxcontrib-apidoc',
32 | # fix sphinx 7 incompatibility issue
33 | 'sphinx_rtd_theme>=1.3.0rc1'
34 | ]
35 |
36 | [build-system]
37 | requires = ["setuptools", "wheel", "setuptools-scm[toml]"]
38 | build-backend = "setuptools.build_meta"
39 |
40 | [project.urls]
41 | "Homepage" = "https://github.com/LabForComputationalVision/pyrtools"
42 | "Documentation" = "https://pyrtools.readthedocs.io/en/latest/"
43 | "Download" = "https://zenodo.org/records/10403034"
44 |
45 | [tool.setuptools.packages.find]
46 | where = ["src"]
47 |
48 | [tool.setuptools_scm]
49 | write_to = "src/pyrtools/version.py"
50 | version_scheme = 'python-simplified-semver'
51 | local_scheme = 'no-local-version'
52 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | .
2 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 |
3 | from wheel.bdist_wheel import bdist_wheel
4 | from setuptools import setup, Extension
5 |
6 | # Adapted from the cibuildwheel example https://github.com/joerick/python-ctypes-package-sample
7 | # it marks the wheel as not specific to the Python API version.
8 | class WheelABINone(bdist_wheel):
9 | def finalize_options(self):
10 | bdist_wheel.finalize_options(self)
11 | self.root_is_pure = False
12 |
13 | def get_tag(self):
14 | _, _, plat = bdist_wheel.get_tag(self)
15 | return "py3", "none", plat
16 |
17 |
18 | setup(
19 | ext_modules=[Extension('pyrtools.pyramids.c.wrapConv',
20 | sources=['src/pyrtools/pyramids/c/py.c',
21 | 'src/pyrtools/pyramids/c/convolve.c',
22 | 'src/pyrtools/pyramids/c/edges.c',
23 | 'src/pyrtools/pyramids/c/wrap.c',
24 | 'src/pyrtools/pyramids/c/internal_pointOp.c'],
25 | depends=['src/pyrtools/pyramids/c/meta.h',
26 | 'src/pyrtools/pyramids/c/convolve.h',
27 | 'src/pyrtools/pyramids/c/internal_pointOp.h'],
28 | extra_compile_args=['-fPIC', '-shared'])],
29 | cmdclass={"bdist_wheel": WheelABINone},
30 | )
31 |
--------------------------------------------------------------------------------
/src/pyrtools/__init__.py:
--------------------------------------------------------------------------------
1 | from . import pyramids
2 |
3 | from .pyramids.c.wrapper import corrDn, upConv, pointOp
4 | from .pyramids.filters import named_filter, binomial_filter, steerable_filters
5 |
6 | from .tools import synthetic_images
7 | from .tools.convolutions import blurDn, blur, upBlur, image_gradient, rconv2
8 | from .tools.display import imshow, animshow, pyrshow, make_figure
9 | from .tools.image_stats import image_compare, image_stats, range, skew, var, entropy
10 | from .tools.utils import rcosFn, matlab_histo, matlab_round, project_polar_to_cartesian
11 | from .tools.compare_matpyrtools import comparePyr, compareRecon
12 |
13 | from .version import version as __version__
14 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/GaussianPyramid.py:
--------------------------------------------------------------------------------
1 | from .pyramid import Pyramid
2 | from .filters import parse_filter
3 | from .c.wrapper import corrDn
4 |
5 |
6 | class GaussianPyramid(Pyramid):
7 | """Gaussian pyramid
8 |
9 | Parameters
10 | ----------
11 | image : `array_like`
12 | 1d or 2d image upon which to construct to the pyramid.
13 | height : 'auto' or `int`.
14 | The height of the pyramid. If 'auto', will automatically determine based on the size of
15 | `image`.
16 | filter_name : {'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4', 'qmf5',
17 | 'qmf9', 'qmf13'}
18 | name of filter to use when constructing pyramid. All scaled so L-2 norm is 1.0
19 |
20 | * `'binomN'` - binomial coefficient filter of order N-1
21 | * `'haar'` - Haar wavelet
22 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
23 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
24 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
25 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
26 | Specifies how to handle edges. Options are:
27 |
28 | * `'circular'` - circular convolution
29 | * `'reflect1'` - reflect about the edge pixels
30 | * `'reflect2'` - reflect, doubling the edge pixels
31 | * `'repeat'` - repeat the edge pixels
32 | * `'zero'` - assume values of zero outside image boundary
33 | * `'extend'` - reflect and invert
34 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
35 |
36 | Attributes
37 | ----------
38 | image : `array_like`
39 | The input image used to construct the pyramid.
40 | image_size : `tuple`
41 | The size of the input image.
42 | pyr_type : `str` or `None`
43 | Human-readable string specifying the type of pyramid. For base class, is None.
44 | edge_type : `str`
45 | Specifies how edges were handled.
46 | pyr_coeffs : `dict`
47 | Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
48 | values are 1d or 2d numpy arrays (same number of dimensions as the input image)
49 | pyr_size : `dict`
50 | Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
51 | tuples and values are tuples.
52 | is_complex : `bool`
53 | Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
54 | a value of True, all others must be False.
55 |
56 | References
57 | ----------
58 | .. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
59 | Proc. ICASSP, pp 291-294, 1980.
60 | .. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
61 | Math, vol. 42, pp 909-996, 1988.
62 | .. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
63 | Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
64 | Technical Report #100.
65 | .. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
66 | ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
67 |
68 | """
69 |
70 | def __init__(self, image, height='auto', filter_name='binom5', edge_type='reflect1', **kwargs):
71 | super().__init__(image=image, edge_type=edge_type)
72 | if self.pyr_type is None:
73 | self.pyr_type = 'Gaussian'
74 | self.num_orientations = 1
75 |
76 | self.filters = {'downsample_filter': parse_filter(filter_name, normalize=False)}
77 | upsamp_filt = kwargs.pop('upsample_filter_name', None)
78 | if upsamp_filt is not None:
79 | if self.pyr_type != 'Laplacian':
80 | raise Exception("upsample_filter should only be set for Laplacian pyramid!")
81 | self.filters['upsample_filter'] = parse_filter(upsamp_filt, normalize=False)
82 | self._set_num_scales('downsample_filter', height, 1)
83 |
84 | self._build_pyr()
85 |
86 | def _build_next(self, image):
87 | """build the next level of the pyramid
88 |
89 | This should not be called directly by users, it's a helper function for constructing the
90 | pyramid
91 |
92 | """
93 | if image.shape[0] == 1:
94 | res = corrDn(image=image, filt=self.filters['downsample_filter'].T, edge_type=self.edge_type, step=(1, 2))
95 | elif image.shape[1] == 1:
96 | res = corrDn(image=image, filt=self.filters['downsample_filter'], edge_type=self.edge_type, step=(2, 1))
97 | else:
98 | tmp = corrDn(image=image, filt=self.filters['downsample_filter'].T, edge_type=self.edge_type, step=(1, 2))
99 | res = corrDn(image=tmp, filt=self.filters['downsample_filter'], edge_type=self.edge_type, step=(2, 1))
100 | return res
101 |
102 | def _build_pyr(self):
103 | """build the pyramid
104 |
105 | This should not be called directly by users, it's a helper function for constructing the
106 | pyramid
107 |
108 | we do this in a separate method for a bit of class wizardry: by over-writing this method in
109 | the LaplacianPyramid class, which inherits the GaussianPyramid class, we can still
110 | correctly construct the LaplacianPyramid with a single call to the GaussianPyramid
111 | constructor
112 | """
113 | im = self.image
114 | self.pyr_coeffs[(0, 0)] = self.image.copy()
115 | self.pyr_size[(0, 0)] = self.image_size
116 | for lev in range(1, self.num_scales):
117 | im = self._build_next(im)
118 | self.pyr_coeffs[(lev, 0)] = im.copy()
119 | self.pyr_size[(lev, 0)] = im.shape
120 |
121 | def recon_pyr(self, *args):
122 | """Reconstruct the pyramid -- NOT NECESSARY FOR GAUSSIANS
123 | """
124 | raise Exception('Not necessary for Gaussian Pyramids')
125 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/LaplacianPyramid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .GaussianPyramid import GaussianPyramid
3 | from .filters import parse_filter
4 | from .c.wrapper import upConv
5 |
6 |
7 | class LaplacianPyramid(GaussianPyramid):
8 | """Laplacian pyramid
9 |
10 | Parameters
11 | ----------
12 | image : `array_like`
13 | 1d or 2d image upon which to construct to the pyramid.
14 | height : 'auto' or `int`.
15 | The height of the pyramid. If 'auto', will automatically determine based on the size of
16 | `image`.
17 | downsample_filter_name : {'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3',
18 | 'daub4', 'qmf5', 'qmf9', 'qmf13'}
19 | name of filter to use for (separable) convolution to downsample the image. All scaled so
20 | L-2 norm is 1.0
21 |
22 | * `'binomN'` (default: 'binom5') - binomial coefficient filter of order N-1
23 | * `'haar'` - Haar wavelet
24 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
25 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
26 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
27 | upsample_filter_name : {None, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3',
28 | 'daub4', 'qmf5', 'qmf9', 'qmf13'}
29 | name of filter to use as the "expansion" filter. All scaled so L-2 norm is 1.0
30 |
31 | * None (default) - same as `downsample_filter_name`
32 | * `'binomN'` - binomial coefficient filter of order N-1
33 | * `'haar'` - Haar wavelet
34 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
35 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
36 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
37 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
38 | Specifies how to handle edges. Options are:
39 |
40 | * `'circular'` - circular convolution
41 | * `'reflect1'` - reflect about the edge pixels
42 | * `'reflect2'` - reflect, doubling the edge pixels
43 | * `'repeat'` - repeat the edge pixels
44 | * `'zero'` - assume values of zero outside image boundary
45 | * `'extend'` - reflect and invert
46 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
47 |
48 | Attributes
49 | ----------
50 | image : `array_like`
51 | The input image used to construct the pyramid.
52 | image_size : `tuple`
53 | The size of the input image.
54 | pyr_type : `str` or `None`
55 | Human-readable string specifying the type of pyramid. For base class, is None.
56 | edge_type : `str`
57 | Specifies how edges were handled.
58 | pyr_coeffs : `dict`
59 | Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
60 | values are 1d or 2d numpy arrays (same number of dimensions as the input image)
61 | pyr_size : `dict`
62 | Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
63 | tuples and values are tuples.
64 | is_complex : `bool`
65 | Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
66 | a value of True, all others must be False.
67 |
68 | References
69 | ----------
70 | .. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
71 | Proc. ICASSP, pp 291-294, 1980.
72 | .. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
73 | Math, vol. 42, pp 909-996, 1988.
74 | .. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
75 | Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
76 | Technical Report #100.
77 | .. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
78 | ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
79 |
80 | """
81 | def __init__(self, image, height='auto', downsample_filter_name='binom5',
82 | upsample_filter_name=None, edge_type='reflect1'):
83 | self.pyr_type = 'Laplacian'
84 | if upsample_filter_name is None:
85 | upsample_filter_name = downsample_filter_name
86 | super().__init__(image, height, downsample_filter_name, edge_type, upsample_filter_name=upsample_filter_name)
87 |
88 |
89 | def _build_pyr(self):
90 | """build the pyramid
91 |
92 | This should not be called directly by users, it's a helper function for constructing the
93 | pyramid
94 |
95 | """
96 | im = self.image
97 | for lev in range(self.num_scales - 1):
98 | im_next = self._build_next(im)
99 | im_recon = self._recon_prev(im_next, output_size=im.shape)
100 | im_residual = im - im_recon
101 | self.pyr_coeffs[(lev, 0)] = im_residual.copy()
102 | self.pyr_size[(lev, 0)] = im_residual.shape
103 | im = im_next
104 | self.pyr_coeffs[(lev+1, 0)] = im.copy()
105 | self.pyr_size[(lev+1, 0)] = im.shape
106 |
107 |
108 | def _recon_prev(self, image, output_size, upsample_filter=None, edge_type=None):
109 | """Reconstruct the previous level of the pyramid.
110 |
111 | Should not be called by users directly, this is a helper function for reconstructing the
112 | input image using pyramid coefficients.
113 |
114 | """
115 | if upsample_filter is None:
116 | upsample_filter = self.filters['upsample_filter']
117 | else:
118 | upsample_filter = parse_filter(upsample_filter, normalize=False)
119 |
120 | if edge_type is None:
121 | edge_type = self.edge_type
122 |
123 | if image.shape[0] == 1:
124 | res = upConv(image=image, filt=upsample_filter.T, edge_type=edge_type, step=(1, 2), stop=(output_size[0], output_size[1]))
125 | elif image.shape[1] == 1:
126 | res = upConv(image=image, filt=upsample_filter, edge_type=edge_type, step=(2, 1), stop=(output_size[0], output_size[1]))
127 | else:
128 | tmp = upConv(image=image, filt=upsample_filter, edge_type=edge_type, step=(2, 1), stop=(output_size[0], image.shape[1]))
129 | res = upConv(image=tmp, filt=upsample_filter.T, edge_type=edge_type, step=(1, 2), stop=(output_size[0], output_size[1]))
130 | return res
131 |
132 |
133 | def recon_pyr(self, upsample_filter_name=None, edge_type=None, levels='all'):
134 | """Reconstruct the input image using pyramid coefficients
135 |
136 | Parameters
137 | ----------
138 | upsample_filter_name : {None, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3',
139 | 'daub4', 'qmf5', 'qmf9', 'qmf13'}
140 | name of filter to use as "expansion" filter. All scaled so L-2 norm is 1.0
141 |
142 | * None (default) - use `self.upsample_filter_name`, the expansion filter set during
143 | initialization.
144 | * `'binomN'` - binomial coefficient filter of order N-1
145 | * `'haar'` - Haar wavelet
146 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
147 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
148 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
149 | edge_type : {None, 'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend',
150 | 'dont-compute'}
151 | Specifies how to handle edges. Options are:
152 |
153 | * None (default) - use `self.edge_type`, the edge_type used to construct the pyramid
154 | * `'circular'` - circular convolution
155 | * `'reflect1'` - reflect about the edge pixels
156 | * `'reflect2'` - reflect, doubling the edge pixels
157 | * `'repeat'` - repeat the edge pixels
158 | * `'zero'` - assume values of zero outside image boundary
159 | * `'extend'` - reflect and invert
160 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
161 | levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
162 | If `list` should contain some subset of integers from `0` to `self.num_scales-1`
163 | (inclusive) and `'residual_lowpass'`. If `'all'`, returned value will contain all
164 | valid levels. Otherwise, must be one of the valid levels.
165 |
166 | Returns
167 | -------
168 | recon : `np.array`
169 | The reconstructed image.
170 | """
171 | recon_keys = self._recon_keys(levels, 'all')
172 | recon = np.zeros_like(self.pyr_coeffs[(self.num_scales-1, 0)])
173 | for lev in reversed(range(self.num_scales)):
174 | # upsample to generate higher reconolution image
175 | recon = self._recon_prev(recon, self.pyr_size[(lev, 0)], upsample_filter_name, edge_type)
176 | if (lev, 0) in recon_keys:
177 | recon += self.pyr_coeffs[(lev, 0)]
178 | return recon
179 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/SteerablePyramidFreq.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | import numpy as np
3 | from scipy.special import factorial
4 | from .pyramid import SteerablePyramidBase
5 | from .c.wrapper import pointOp
6 | from ..tools.utils import rcosFn
7 |
8 |
9 | class SteerablePyramidFreq(SteerablePyramidBase):
10 | """Steerable frequency pyramid.
11 |
12 | Construct a steerable pyramid on matrix IM, in the Fourier domain.
13 | This is similar to Spyr, except that:
14 |
15 | + Reconstruction is exact (within floating point errors)
16 | + It can produce any number of orientation bands.
17 | - Typically slower, especially for non-power-of-two sizes.
18 | - Boundary-handling is circular.
19 |
20 | The squared radial functions tile the Fourier plane with a raised-cosine
21 | falloff. Angular functions are cos(theta- k*pi/order+1)^(order).
22 |
23 | Note that reconstruction will not be exact if the image has an odd shape (due to
24 | boundary-handling issues) or if the pyramid is complex with order=0.
25 |
26 | Notes
27 | -----
28 | Transform described in [1]_, filter kernel design described in [2]_.
29 |
30 | Parameters
31 | ----------
32 | image : `array_like`
33 | 2d image upon which to construct to the pyramid.
34 | height : 'auto' or `int`.
35 | The height of the pyramid. If 'auto', will automatically determine based on the size of
36 | `image`. If an int, must be non-negative. When height=0, only returns the residuals.
37 | order : `int`.
38 | The Gaussian derivative order used for the steerable filters. Default value is 3.
39 | Note that to achieve steerability the minimum number of orientation is `order` + 1,
40 | and is used here. To get more orientations at the same order, use the method `steer_coeffs`
41 | twidth : `int`
42 | The width of the transition region of the radial lowpass function, in octaves
43 | is_complex : `bool`
44 | Whether the pyramid coefficients should be complex or not. If True, the real and imaginary
45 | parts correspond to a pair of odd and even symmetric filters. If False, the coefficients
46 | only include the real part / odd symmetric filter.
47 |
48 | Attributes
49 | ----------
50 | image : `array_like`
51 | The input image used to construct the pyramid.
52 | image_size : `tuple`
53 | The size of the input image.
54 | pyr_type : `str` or `None`
55 | Human-readable string specifying the type of pyramid. For base class, is None.
56 | pyr_coeffs : `dict`
57 | Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
58 | values are 1d or 2d numpy arrays (same number of dimensions as the input image),
59 | running from fine to coarse.
60 | pyr_size : `dict`
61 | Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
62 | tuples and values are tuples.
63 | is_complex : `bool`
64 | Whether the coefficients are complex- or real-valued.
65 |
66 | References
67 | ----------
68 | .. [1] E P Simoncelli and W T Freeman, "The Steerable Pyramid: A Flexible Architecture for
69 | Multi-Scale Derivative Computation," Second Int'l Conf on Image Processing, Washington, DC,
70 | Oct 1995.
71 | .. [2] A Karasaridis and E P Simoncelli, "A Filter Design Technique for Steerable Pyramid
72 | Image Transforms", ICASSP, Atlanta, GA, May 1996.
73 |
74 | """
75 | def __init__(self, image, height='auto', order=3, twidth=1, is_complex=False):
76 | # in the Fourier domain, there's only one choice for how do edge-handling: circular. to
77 | # emphasize that thisisn'ta choice, we use None here.
78 | super().__init__(image=image, edge_type=None)
79 |
80 | self.pyr_type = 'SteerableFrequency'
81 | self.is_complex = is_complex
82 | # SteerablePyramidFreq doesn't have filters, they're constructed in the frequency space
83 | self.filters = {}
84 | self.order = int(order)
85 |
86 | if (image.shape[0] % 2 != 0) or (image.shape[1] % 2 != 0):
87 | warnings.warn("Reconstruction will not be perfect with odd-sized images")
88 |
89 | if self.order == 0 and self.is_complex:
90 | raise ValueError(
91 | "Complex pyramid cannot have order=0! See "
92 | "https://github.com/plenoptic-org/plenoptic/issues/326 "
93 | "for an explanation."
94 | )
95 |
96 | # we can't use the base class's _set_num_scales method because the max height is calculated
97 | # slightly differently
98 | max_ht = np.floor(np.log2(min(self.image.shape))) - 2
99 | if height == 'auto' or height is None:
100 | self.num_scales = int(max_ht)
101 | elif height > max_ht:
102 | raise ValueError("Cannot build pyramid higher than %d levels." % (max_ht))
103 | elif height < 0:
104 | raise ValueError("Height must be a non-negative int.")
105 | else:
106 | self.num_scales = int(height)
107 |
108 | if self.order > 15 or self.order < 0:
109 | raise ValueError("order must be an integer in the range [0,15].")
110 |
111 | self.num_orientations = int(order + 1)
112 |
113 | if twidth <= 0:
114 | raise ValueError("twidth must be positive.")
115 | twidth = int(twidth)
116 |
117 | dims = np.asarray(self.image.shape)
118 | ctr = np.ceil((np.asarray(dims)+0.5)/2).astype(int)
119 |
120 | (xramp, yramp) = np.meshgrid(np.linspace(-1, 1, dims[1]+1)[:-1],
121 | np.linspace(-1, 1, dims[0]+1)[:-1])
122 |
123 | angle = np.arctan2(yramp, xramp)
124 | log_rad = np.sqrt(xramp**2 + yramp**2)
125 | log_rad[ctr[0]-1, ctr[1]-1] = log_rad[ctr[0]-1, ctr[1]-2]
126 | log_rad = np.log2(log_rad)
127 |
128 | # Radial transition function (a raised cosine in log-frequency):
129 | (Xrcos, Yrcos) = rcosFn(twidth, (-twidth/2.0), np.asarray([0, 1]))
130 | Yrcos = np.sqrt(Yrcos)
131 |
132 | YIrcos = np.sqrt(1.0 - Yrcos**2)
133 | lo0mask = pointOp(log_rad, YIrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
134 | self._lo0mask = lo0mask
135 |
136 | imdft = np.fft.fftshift(np.fft.fft2(self.image))
137 |
138 | hi0mask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
139 | self._hi0mask = hi0mask
140 |
141 | hi0dft = imdft * hi0mask.reshape(imdft.shape[0], imdft.shape[1])
142 | hi0 = np.fft.ifft2(np.fft.ifftshift(hi0dft))
143 |
144 | self.pyr_coeffs['residual_highpass'] = np.real(hi0)
145 | self.pyr_size['residual_highpass'] = hi0.shape
146 |
147 | lo0mask = lo0mask.reshape(imdft.shape[0], imdft.shape[1])
148 | lodft = imdft * lo0mask
149 |
150 | self._anglemasks = []
151 | self._himasks = []
152 | self._lomasks = []
153 |
154 | for i in range(self.num_scales):
155 | Xrcos -= np.log2(2)
156 |
157 | lutsize = 1024
158 | Xcosn = np.pi * np.arange(-(2*lutsize+1), (lutsize+2)) / lutsize
159 |
160 | const = (2**(2*self.order))*(factorial(self.order, exact=True)**2)/ float(self.num_orientations*factorial(2*self.order, exact=True))
161 |
162 | if self.is_complex:
163 | # TODO clean that up and give comments
164 | alfa = ((np.pi+Xcosn) % (2.0*np.pi)) - np.pi
165 | Ycosn = (2.0 * np.sqrt(const) * (np.cos(Xcosn) ** self.order) *
166 | (np.abs(alfa) < np.pi/2.0).astype(int))
167 | else:
168 | Ycosn = np.sqrt(const) * (np.cos(Xcosn))**self.order
169 |
170 | log_rad_test = np.reshape(log_rad, (1, log_rad.shape[0] * log_rad.shape[1]))
171 | himask = pointOp(log_rad_test, Yrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
172 | himask = himask.reshape((lodft.shape[0], lodft.shape[1]))
173 | self._himasks.append(himask)
174 |
175 | anglemasks = []
176 | for b in range(self.num_orientations):
177 | angle_tmp = np.reshape(angle, (1, angle.shape[0] * angle.shape[1]))
178 | anglemask = pointOp(angle_tmp, Ycosn, Xcosn[0]+np.pi*b/self.num_orientations,
179 | Xcosn[1]-Xcosn[0])
180 |
181 | anglemask = anglemask.reshape(lodft.shape[0], lodft.shape[1])
182 | anglemasks.append(anglemask)
183 | # that (-1j)**order term in the beginning will be 1, -j, -1, j for order 0, 1, 2,
184 | # 3, and will then loop again
185 | banddft = (-1j) ** self.order * lodft * anglemask * himask
186 | band = np.fft.ifft2(np.fft.ifftshift(banddft))
187 | if not self.is_complex:
188 | self.pyr_coeffs[(i, b)] = np.real(band.copy())
189 | else:
190 | self.pyr_coeffs[(i, b)] = band.copy()
191 | self.pyr_size[(i, b)] = band.shape
192 |
193 | self._anglemasks.append(anglemasks)
194 | dims = np.asarray(lodft.shape)
195 | ctr = np.ceil((dims+0.5)/2).astype(int)
196 | lodims = np.ceil((dims-0.5)/2).astype(int)
197 | loctr = np.ceil((lodims+0.5)/2).astype(int)
198 | lostart = ctr - loctr
199 | loend = lostart + lodims
200 |
201 | log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
202 | angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
203 | lodft = lodft[lostart[0]:loend[0], lostart[1]:loend[1]]
204 | YIrcos = np.abs(np.sqrt(1.0 - Yrcos**2))
205 | log_rad_tmp = np.reshape(log_rad, (1, log_rad.shape[0] * log_rad.shape[1]))
206 | lomask = pointOp(log_rad_tmp, YIrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
207 | lomask = lomask.reshape(lodft.shape[0], lodft.shape[1])
208 | self._lomasks.append(lomask)
209 |
210 | lodft = lodft * lomask
211 |
212 | lodft = np.fft.ifft2(np.fft.ifftshift(lodft))
213 | self.pyr_coeffs['residual_lowpass'] = np.real(np.asarray(lodft).copy())
214 | self.pyr_size['residual_lowpass'] = lodft.shape
215 |
216 | def recon_pyr(self, levels='all', bands='all', twidth=1):
217 | """Reconstruct the image, optionally using subset of pyramid coefficients.
218 |
219 | Parameters
220 | ----------
221 | levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
222 | If `list` should contain some subset of integers from `0` to `self.num_scales-1`
223 | (inclusive) and `'residual_lowpass'`. If `'all'`, returned value will contain all
224 | valid levels. Otherwise, must be one of the valid levels.
225 | bands : `list`, `int`, or `'all'`.
226 | If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
227 | If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
228 | of the valid orientations.
229 | twidth : `int`
230 | The width of the transition region of the radial lowpass function, in octaves
231 |
232 | Returns
233 | -------
234 | recon : `np.array`
235 | The reconstructed image.
236 |
237 | """
238 | if twidth <= 0:
239 | raise ValueError("twidth must be positive.")
240 |
241 | recon_keys = self._recon_keys(levels, bands)
242 |
243 | # make list of dims and bounds
244 | bound_list = []
245 | dim_list = []
246 | # we go through pyr_sizes from smallest to largest
247 | for dims in sorted(self.pyr_size.values()):
248 | if dims in dim_list:
249 | continue
250 | dim_list.append(dims)
251 | dims = np.asarray(dims)
252 | ctr = np.ceil((dims+0.5)/2).astype(int)
253 | lodims = np.ceil((dims-0.5)/2).astype(int)
254 | loctr = np.ceil((lodims+0.5)/2).astype(int)
255 | lostart = ctr - loctr
256 | loend = lostart + lodims
257 | bounds = (lostart[0], lostart[1], loend[0], loend[1])
258 | bound_list.append(bounds)
259 | bound_list.append((0, 0, dim_list[-1][0], dim_list[-1][1]))
260 | dim_list.append((dim_list[-1][0], dim_list[-1][1]))
261 |
262 | # matlab code starts here
263 | dims = np.asarray(self.pyr_size['residual_highpass'])
264 | ctr = np.ceil((dims+0.5)/2.0).astype(int)
265 |
266 | (xramp, yramp) = np.meshgrid((np.arange(1, dims[1]+1)-ctr[1]) / (dims[1]/2.),
267 | (np.arange(1, dims[0]+1)-ctr[0]) / (dims[0]/2.))
268 | angle = np.arctan2(yramp, xramp)
269 | log_rad = np.sqrt(xramp**2 + yramp**2)
270 | log_rad[ctr[0]-1, ctr[1]-1] = log_rad[ctr[0]-1, ctr[1]-2]
271 | log_rad = np.log2(log_rad)
272 |
273 | # Radial transition function (a raised cosine in log-frequency):
274 | (Xrcos, Yrcos) = rcosFn(twidth, (-twidth/2.0), np.asarray([0, 1]))
275 | Yrcos = np.sqrt(Yrcos)
276 | YIrcos = np.sqrt(1.0 - Yrcos**2)
277 |
278 | # from reconSFpyrLevs
279 | lutsize = 1024
280 |
281 | Xcosn = np.pi * np.arange(-(2*lutsize+1), (lutsize+2)) / lutsize
282 |
283 | const = (2**(2*self.order))*(factorial(self.order, exact=True)**2) / float(self.num_orientations*factorial(2*self.order, exact=True))
284 | Ycosn = np.sqrt(const) * (np.cos(Xcosn))**self.order
285 |
286 | # lowest band
287 | # initialize reconstruction
288 | if 'residual_lowpass' in recon_keys:
289 | nresdft = np.fft.fftshift(np.fft.fft2(self.pyr_coeffs['residual_lowpass']))
290 | else:
291 | nresdft = np.zeros_like(self.pyr_coeffs['residual_lowpass'])
292 | resdft = np.zeros(dim_list[1]) + 0j
293 |
294 | bounds = (0, 0, 0, 0)
295 | for idx in range(len(bound_list)-2, 0, -1):
296 | diff = (bound_list[idx][2]-bound_list[idx][0],
297 | bound_list[idx][3]-bound_list[idx][1])
298 | bounds = (bounds[0]+bound_list[idx][0], bounds[1]+bound_list[idx][1],
299 | bounds[0]+bound_list[idx][0] + diff[0],
300 | bounds[1]+bound_list[idx][1] + diff[1])
301 | Xrcos -= np.log2(2.0)
302 | nlog_rad = log_rad[bounds[0]:bounds[2], bounds[1]:bounds[3]]
303 |
304 | nlog_rad_tmp = np.reshape(nlog_rad, (1, nlog_rad.shape[0]*nlog_rad.shape[1]))
305 | lomask = pointOp(nlog_rad_tmp, YIrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
306 | lomask = lomask.reshape(nresdft.shape[0], nresdft.shape[1])
307 | lomask = lomask + 0j
308 | resdft[bound_list[1][0]:bound_list[1][2],
309 | bound_list[1][1]:bound_list[1][3]] = nresdft * lomask
310 |
311 | # middle bands
312 | for idx in range(1, len(bound_list)-1):
313 | bounds1 = (0, 0, 0, 0)
314 | bounds2 = (0, 0, 0, 0)
315 | for boundIdx in range(len(bound_list) - 1, idx - 1, -1):
316 | diff = (bound_list[boundIdx][2]-bound_list[boundIdx][0],
317 | bound_list[boundIdx][3]-bound_list[boundIdx][1])
318 | bound2tmp = bounds2
319 | bounds2 = (bounds2[0]+bound_list[boundIdx][0],
320 | bounds2[1]+bound_list[boundIdx][1],
321 | bounds2[0]+bound_list[boundIdx][0] + diff[0],
322 | bounds2[1]+bound_list[boundIdx][1] + diff[1])
323 | bounds1 = bound2tmp
324 | nlog_rad1 = log_rad[bounds1[0]:bounds1[2], bounds1[1]:bounds1[3]]
325 | nlog_rad2 = log_rad[bounds2[0]:bounds2[2], bounds2[1]:bounds2[3]]
326 | dims = dim_list[idx]
327 | nangle = angle[bounds1[0]:bounds1[2], bounds1[1]:bounds1[3]]
328 | YIrcos = np.abs(np.sqrt(1.0 - Yrcos**2))
329 | if idx > 1:
330 | Xrcos += np.log2(2.0)
331 | nlog_rad2_tmp = np.reshape(nlog_rad2, (1, nlog_rad2.shape[0]*nlog_rad2.shape[1]))
332 | lomask = pointOp(nlog_rad2_tmp, YIrcos, Xrcos[0],
333 | Xrcos[1]-Xrcos[0])
334 | lomask = lomask.reshape(bounds2[2]-bounds2[0],
335 | bounds2[3]-bounds2[1])
336 | lomask = lomask + 0j
337 | nresdft = np.zeros(dim_list[idx]) + 0j
338 | nresdft[bound_list[idx][0]:bound_list[idx][2],
339 | bound_list[idx][1]:bound_list[idx][3]] = resdft * lomask
340 | resdft = nresdft.copy()
341 |
342 | # reconSFpyrLevs
343 | if idx != 0 and idx != len(bound_list)-1:
344 | for b in range(self.num_orientations):
345 | nlog_rad1_tmp = np.reshape(nlog_rad1,
346 | (1, nlog_rad1.shape[0]*nlog_rad1.shape[1]))
347 | himask = pointOp(nlog_rad1_tmp, Yrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
348 |
349 | himask = himask.reshape(nlog_rad1.shape)
350 | nangle_tmp = np.reshape(nangle, (1, nangle.shape[0]*nangle.shape[1]))
351 | anglemask = pointOp(nangle_tmp, Ycosn,
352 | Xcosn[0]+np.pi*b/self.num_orientations,
353 | Xcosn[1]-Xcosn[0])
354 |
355 | anglemask = anglemask.reshape(nangle.shape)
356 | # either the coefficients will already be real-valued (if
357 | # self.is_complex=False) or complex (if self.is_complex=True). in the
358 | # former case, this np.real() does nothing. in the latter, we want to only
359 | # reconstruct with the real portion
360 | curLev = self.num_scales-1 - (idx-1)
361 | band = np.real(self.pyr_coeffs[(curLev, b)])
362 | if (curLev, b) in recon_keys:
363 | banddft = np.fft.fftshift(np.fft.fft2(band))
364 | else:
365 | banddft = np.zeros(band.shape)
366 | resdft += ((np.power(-1+0j, 0.5))**(self.num_orientations-1) *
367 | banddft * anglemask * himask)
368 |
369 | # apply lo0mask
370 | Xrcos += np.log2(2.0)
371 | lo0mask = pointOp(log_rad, YIrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
372 |
373 | lo0mask = lo0mask.reshape(dims[0], dims[1])
374 | resdft = resdft * lo0mask
375 |
376 | # residual highpass subband
377 | hi0mask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
378 |
379 | hi0mask = hi0mask.reshape(resdft.shape[0], resdft.shape[1])
380 | if 'residual_highpass' in recon_keys:
381 | hidft = np.fft.fftshift(np.fft.fft2(self.pyr_coeffs['residual_highpass']))
382 | else:
383 | hidft = np.zeros_like(self.pyr_coeffs['residual_highpass'])
384 | resdft += hidft * hi0mask
385 |
386 | outresdft = np.real(np.fft.ifft2(np.fft.ifftshift(resdft)))
387 |
388 | return outresdft
389 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/SteerablePyramidSpace.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .pyramid import SteerablePyramidBase
3 | from .filters import parse_filter
4 | from .c.wrapper import corrDn, upConv
5 |
6 |
7 | class SteerablePyramidSpace(SteerablePyramidBase):
8 | """Steerable pyramid (using spatial convolutions)
9 |
10 | Notes
11 | -----
12 | Transform described in [1]_, filter kernel design described in [2]_.
13 |
14 | Parameters
15 | ----------
16 | image : `array_like`
17 | 2d image upon which to construct to the pyramid.
18 | height : 'auto' or `int`.
19 | The height of the pyramid. If 'auto', will automatically determine based on the size of
20 | `image`.
21 | order : {0, 1, 3, 5}.
22 | The Gaussian derivative order used for the steerable filters. If you want a different
23 | value, see SteerablePyramidFreq. Note that to achieve steerability the minimum number
24 | of orientation is `order` + 1, and is used here. To get more orientations at the same
25 | order, use the method `steer_coeffs`
26 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
27 | Specifies how to handle edges. Options are:
28 |
29 | * `'circular'` - circular convolution
30 | * `'reflect1'` - reflect about the edge pixels
31 | * `'reflect2'` - reflect, doubling the edge pixels
32 | * `'repeat'` - repeat the edge pixels
33 | * `'zero'` - assume values of zero outside image boundary
34 | * `'extend'` - reflect and invert
35 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
36 |
37 | Attributes
38 | ----------
39 | image : `array_like`
40 | The input image used to construct the pyramid.
41 | image_size : `tuple`
42 | The size of the input image.
43 | pyr_type : `str` or `None`
44 | Human-readable string specifying the type of pyramid. For base class, is None.
45 | edge_type : `str`
46 | Specifies how edges were handled.
47 | pyr_coeffs : `dict`
48 | Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
49 | values are 1d or 2d numpy arrays (same number of dimensions as the input image)
50 | pyr_size : `dict`
51 | Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
52 | tuples and values are tuples.
53 | is_complex : `bool`
54 | Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
55 | a value of True, all others must be False.
56 |
57 | References
58 | ----------
59 | .. [1] E P Simoncelli and W T Freeman, "The Steerable Pyramid: A Flexible Architecture for
60 | Multi-Scale Derivative Computation," Second Int'l Conf on Image Processing, Washington, DC,
61 | Oct 1995.
62 | .. [2] A Karasaridis and E P Simoncelli, "A Filter Design Technique for Steerable Pyramid
63 | Image Transforms", ICASSP, Atlanta, GA, May 1996.
64 | """
65 |
66 | def __init__(self, image, height='auto', order=1, edge_type='reflect1'):
67 | super().__init__(image=image, edge_type=edge_type)
68 |
69 | self.order = order
70 | self.num_orientations = self.order + 1
71 | self.filters = parse_filter("sp{:d}_filters".format(self.num_orientations-1), normalize=False)
72 | self.pyr_type = 'SteerableSpace'
73 | self._set_num_scales('lofilt', height)
74 |
75 | hi0 = corrDn(image=self.image, filt=self.filters['hi0filt'], edge_type=self.edge_type)
76 |
77 | self.pyr_coeffs['residual_highpass'] = hi0
78 | self.pyr_size['residual_highpass'] = hi0.shape
79 |
80 | lo = corrDn(image=self.image, filt=self.filters['lo0filt'], edge_type=self.edge_type)
81 | for i in range(self.num_scales):
82 | # assume square filters -- start of buildSpyrLevs
83 | bfiltsz = int(np.floor(np.sqrt(self.filters['bfilts'].shape[0])))
84 |
85 | for b in range(self.num_orientations):
86 | filt = self.filters['bfilts'][:, b].reshape(bfiltsz, bfiltsz).T
87 | band = corrDn(image=lo, filt=filt, edge_type=self.edge_type)
88 | self.pyr_coeffs[(i, b)] = np.asarray(band)
89 | self.pyr_size[(i, b)] = band.shape
90 |
91 | lo = corrDn(image=lo, filt=self.filters['lofilt'], edge_type=self.edge_type, step=(2, 2))
92 |
93 | self.pyr_coeffs['residual_lowpass'] = lo
94 | self.pyr_size['residual_lowpass'] = lo.shape
95 |
96 | def recon_pyr(self, order=None, edge_type=None, levels='all', bands='all'):
97 | """Reconstruct the image, optionally using subset of pyramid coefficients.
98 |
99 | Parameters
100 | ----------
101 | order : {None, 0, 1, 3, 5}.
102 | the Gaussian derivative order you want to use for the steerable pyramid filters used to
103 | reconstruct the pyramid. If None, uses the same order as that used to construct the
104 | pyramid.
105 | edge_type : {None, 'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend',
106 | 'dont-compute'}
107 | Specifies how to handle edges. Options are:
108 |
109 | * None (default) - use `self.edge_type`, the edge_type used to construct the pyramid
110 | * `'circular'` - circular convolution
111 | * `'reflect1'` - reflect about the edge pixels
112 | * `'reflect2'` - reflect, doubling the edge pixels
113 | * `'repeat'` - repeat the edge pixels
114 | * `'zero'` - assume values of zero outside image boundary
115 | * `'extend'` - reflect and inverts
116 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
117 | levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
118 | If `list` should contain some subset of integers from `0` to `self.num_scales-1`
119 | (inclusive) and `'residual_lowpass'`. If `'all'`, returned value will contain all
120 | valid levels. Otherwise, must be one of the valid levels.
121 | bands : `list`, `int`, or `'all'`.
122 | If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
123 | If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
124 | of the valid orientations.
125 |
126 | Returns
127 | -------
128 | recon : `np.array`
129 | The reconstructed image.
130 | """
131 |
132 | if order is None:
133 | filters = self.filters
134 | recon_keys = self._recon_keys(levels, bands)
135 | else:
136 | filters = parse_filter("sp{:d}_filters".format(order), normalize=False)
137 | recon_keys = self._recon_keys(levels, bands, order+1)
138 |
139 | # assume square filters -- start of buildSpyrLevs
140 | bfiltsz = int(np.floor(np.sqrt(filters['bfilts'].shape[0])))
141 |
142 | if edge_type is None:
143 | edges = self.edge_type
144 | else:
145 | edges = edge_type
146 |
147 |
148 | # initialize reconstruction
149 | if 'residual_lowpass' in recon_keys:
150 | recon = self.pyr_coeffs['residual_lowpass']
151 | else:
152 | recon = np.zeros_like(self.pyr_coeffs['residual_lowpass'])
153 |
154 | for lev in reversed(range(self.num_scales)):
155 | # we need to upConv once per level, in order to up-sample
156 | # the image back to the right shape.
157 | recon = upConv(image=recon, filt=filters['lofilt'], edge_type=edges,
158 | step=(2, 2), start=(0, 0), stop=self.pyr_size[(lev, 0)])
159 | # I think the most effective way to do this is to just
160 | # check every possible sub-band and then only add in the
161 | # ones we want (given that we have to loop through the
162 | # levels above in order to up-sample)
163 | for band in reversed(range(self.num_orientations)):
164 | if (lev, band) in recon_keys:
165 | filt = filters['bfilts'][:, band].reshape(bfiltsz, bfiltsz, order='F')
166 | recon += upConv(image=self.pyr_coeffs[(lev, band)], filt=filt, edge_type=edges,
167 | stop=self.pyr_size[(lev, band)])
168 |
169 | # apply lo0filt
170 | recon = upConv(image=recon, filt=filters['lo0filt'], edge_type=edges, stop=recon.shape)
171 |
172 | if 'residual_highpass' in recon_keys:
173 | recon += upConv(image=self.pyr_coeffs['residual_highpass'], filt=filters['hi0filt'],
174 | edge_type=edges, start=(0, 0), step=(1, 1), stop=recon.shape)
175 |
176 | return recon
177 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/WaveletPyramid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .pyramid import Pyramid
3 | from .filters import parse_filter
4 | from .c.wrapper import corrDn, upConv
5 |
6 |
7 | class WaveletPyramid(Pyramid):
8 | """Multiscale wavelet pyramid
9 |
10 | Parameters
11 | ----------
12 | image : `array_like`
13 | 1d or 2d image upon which to construct to the pyramid.
14 | height : 'auto' or `int`.
15 | The height of the pyramid. If 'auto', will automatically determine based on the size of
16 | `image`.
17 | filter_name : {'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4', 'qmf5',
18 | 'qmf9', 'qmf13'}
19 | name of filter to use when constructing pyramid. All scaled so L-2 norm is 1.0
20 |
21 | * `'binomN'` - binomial coefficient filter of order N-1
22 | * `'haar'` - Haar wavelet
23 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
24 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
25 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
26 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
27 | Specifies how to handle edges. Options are:
28 |
29 | * `'circular'` - circular convolution
30 | * `'reflect1'` - reflect about the edge pixels
31 | * `'reflect2'` - reflect, doubling the edge pixels
32 | * `'repeat'` - repeat the edge pixels
33 | * `'zero'` - assume values of zero outside image boundary
34 | * `'extend'` - reflect and invert
35 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
36 |
37 | Attributes
38 | ----------
39 | image : `array_like`
40 | The input image used to construct the pyramid.
41 | image_size : `tuple`
42 | The size of the input image.
43 | pyr_type : `str` or `None`
44 | Human-readable string specifying the type of pyramid. For base class, is None.
45 | edge_type : `str`
46 | Specifies how edges were handled.
47 | pyr_coeffs : `dict`
48 | Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
49 | values are 1d or 2d numpy arrays (same number of dimensions as the input image)
50 | pyr_size : `dict`
51 | Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
52 | tuples and values are tuples.
53 | is_complex : `bool`
54 | Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
55 | a value of True, all others must be False.
56 |
57 | References
58 | ----------
59 | .. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
60 | Proc. ICASSP, pp 291-294, 1980.
61 | .. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
62 | Math, vol. 42, pp 909-996, 1988.
63 | .. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
64 | Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
65 | Technical Report #100.
66 | .. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
67 | ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
68 | """
69 |
70 | def __init__(self, image, height='auto', filter_name='qmf9', edge_type='reflect1'):
71 | super().__init__(image=image, edge_type=edge_type)
72 | self.pyr_type = 'Wavelet'
73 |
74 | self.filters = {}
75 | self.filters['lo_filter'] = parse_filter(filter_name, normalize=False)
76 | self.filters["hi_filter"] = WaveletPyramid._modulate_flip(self.filters['lo_filter'])
77 | assert self.filters['lo_filter'].shape == self.filters['hi_filter'].shape
78 |
79 | # Stagger sampling if filter is odd-length
80 | self.stagger = (self.filters['lo_filter'].size + 1) % 2
81 |
82 | self._set_num_scales('lo_filter', height)
83 |
84 | # compute the number of channels per level
85 | if min(self.image.shape) == 1:
86 | self.num_orientations = 1
87 | else:
88 | self.num_orientations = 3
89 |
90 | self._build_pyr()
91 |
92 | def _modulate_flip(lo_filter):
93 | '''construct QMF/Wavelet highpass filter from lowpass filter
94 |
95 | modulate by (-1)^n, reverse order (and shift by one, which is handled by the convolution
96 | routines). This is an extension of the original definition of QMF's (e.g., see
97 | Simoncelli90).
98 |
99 | Parameters
100 | ----------
101 | lo_filter : `array_like`
102 | one-dimensional array (or effectively 1d array) containing the lowpass filter to
103 | convert into the highpass filter.
104 |
105 | Returns
106 | -------
107 | hi_filter : `np.array`
108 | The highpass filter constructed from the lowpass filter, same shape as the lowpass
109 | filter.
110 | '''
111 | # check lo_filter is effectively 1D
112 | lo_filter_shape = lo_filter.shape
113 | assert lo_filter.size == max(lo_filter_shape)
114 | lo_filter = lo_filter.flatten()
115 | ind = np.arange(lo_filter.size, 0, -1) - (lo_filter.size + 1) // 2
116 | hi_filter = lo_filter[::-1] * (-1.0) ** ind
117 |
118 | return hi_filter.reshape(lo_filter_shape)
119 |
120 | def _build_next(self, image):
121 | """Build the next level fo the Wavelet pyramid
122 |
123 | Should not be called by users directly, this is a helper function to construct the pyramid.
124 |
125 | Parameters
126 | ----------
127 | image : `array_like`
128 | image to use to construct next level.
129 |
130 | Returns
131 | -------
132 | lolo : `array_like`
133 | This is the result of applying the lowpass filter once if `image` is 1d, twice if it's
134 | 2d. It's downsampled by a factor of two from the original `image`.
135 | hi_tuple : `tuple`
136 | If `image` is 1d, this just contains `hihi`, the result of applying the highpass filter
137 | . If `image` is 2d, it is `(lohi, hilo, hihi)`, the result of applying the lowpass then
138 | the highpass, the highpass then the lowpass, and the highpass twice. All will be
139 | downsampled by a factor of two from the original `image`.
140 | """
141 | if image.shape[1] == 1:
142 | lolo = corrDn(image=image, filt=self.filters['lo_filter'], edge_type=self.edge_type, step=(2, 1), start=(self.stagger, 0))
143 | hihi = corrDn(image=image, filt=self.filters['hi_filter'], edge_type=self.edge_type, step=(2, 1), start=(1, 0))
144 | return lolo, (hihi, )
145 | elif image.shape[0] == 1:
146 | lolo = corrDn(image=image, filt=self.filters['lo_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, self.stagger))
147 | hihi = corrDn(image=image, filt=self.filters['hi_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, 1))
148 | return lolo, (hihi, )
149 | else:
150 | lo = corrDn(image=image, filt=self.filters['lo_filter'], edge_type=self.edge_type, step=(2, 1), start=(self.stagger, 0))
151 | hi = corrDn(image=image, filt=self.filters['hi_filter'], edge_type=self.edge_type, step=(2, 1), start=(1, 0))
152 | lolo = corrDn(image=lo, filt=self.filters['lo_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, self.stagger))
153 | lohi = corrDn(image=hi, filt=self.filters['lo_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, self.stagger))
154 | hilo = corrDn(image=lo, filt=self.filters['hi_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, 1))
155 | hihi = corrDn(image=hi, filt=self.filters['hi_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, 1))
156 | return lolo, (lohi, hilo, hihi)
157 |
158 | def _build_pyr(self):
159 | im = self.image
160 | for lev in range(self.num_scales):
161 | im, higher_bands = self._build_next(im)
162 | for j, band in enumerate(higher_bands):
163 | self.pyr_coeffs[(lev, j)] = band
164 | self.pyr_size[(lev, j)] = band.shape
165 | self.pyr_coeffs['residual_lowpass'] = im
166 | self.pyr_size['residual_lowpass'] = im.shape
167 |
168 |
169 | def _recon_prev(self, image, lev, recon_keys, output_size, lo_filter, hi_filter, edge_type,
170 | stagger):
171 | """Reconstruct the previous level of the pyramid.
172 |
173 | Should not be called by users directly, this is a helper function for reconstructing the
174 | input image using pyramid coefficients.
175 |
176 | """
177 | if self.num_orientations == 1:
178 | if output_size[0] == 1:
179 | recon = upConv(image=image, filt=lo_filter.T, edge_type=edge_type, step=(1, 2), start=(0, stagger), stop=output_size)
180 | if (lev, 0) in recon_keys:
181 | recon += upConv(image=self.pyr_coeffs[(lev, 0)], filt=hi_filter.T, edge_type=edge_type, step=(1, 2), start=(0, 1), stop=output_size)
182 | elif output_size[1] == 1:
183 | recon = upConv(image=image, filt=lo_filter, edge_type=edge_type, step=(2, 1), start=(stagger, 0), stop=output_size)
184 | if (lev, 0) in recon_keys:
185 | recon += upConv(image=self.pyr_coeffs[(lev, 0)], filt=hi_filter, edge_type=edge_type, step=(2, 1), start=(1, 0), stop=output_size)
186 | else:
187 | lo_size = ([self.pyr_size[(lev, 1)][0], output_size[1]])
188 | hi_size = ([self.pyr_size[(lev, 0)][0], output_size[1]])
189 |
190 | tmp_recon = upConv(image=image, filt=lo_filter.T, edge_type=edge_type, step=(1, 2), start=(0, stagger), stop=lo_size)
191 | recon = upConv(image=tmp_recon, filt=lo_filter, edge_type=edge_type, step=(2, 1), start=(stagger, 0), stop=output_size)
192 |
193 | bands_recon_dict = {
194 | 0: [{'filt': lo_filter.T, 'start': (0, stagger), 'stop': hi_size},
195 | {'filt': hi_filter, 'start': (1, 0)}],
196 | 1: [{'filt': hi_filter.T, 'start': (0, 1), 'stop': lo_size},
197 | {'filt': lo_filter, 'start': (stagger, 0)}],
198 | 2: [{'filt': hi_filter.T, 'start': (0, 1), 'stop': hi_size},
199 | {'filt': hi_filter, 'start': (1, 0)}],
200 | }
201 |
202 | for band in range(self.num_orientations):
203 | if (lev, band) in recon_keys:
204 | tmp_recon = upConv(image=self.pyr_coeffs[(lev, band)], edge_type=edge_type, step=(1, 2), **bands_recon_dict[band][0])
205 | recon += upConv(image=tmp_recon, edge_type=edge_type, step=(2, 1), stop=output_size, **bands_recon_dict[band][1])
206 |
207 | return recon
208 |
209 | def recon_pyr(self, filter_name=None, edge_type=None, levels='all', bands='all'):
210 | """Reconstruct the input image using pyramid coefficients.
211 |
212 | This function reconstructs the input image using pyramid coefficients.
213 |
214 | Parameters
215 | ----------
216 | filter_name : {None, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4',
217 | 'qmf5', 'qmf9', 'qmf13'}
218 | name of filter to use for reconstruction. All scaled so L-2 norm is 1.0
219 |
220 | * None (default) - use `self.filter_name`, the filter used to construct the pyramid.
221 | * `'binomN'` - binomial coefficient filter of order N-1
222 | * `'haar'` - Haar wavelet
223 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
224 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
225 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
226 | edge_type : {None, 'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend',
227 | 'dont-compute'}
228 | Specifies how to handle edges. Options are:
229 |
230 | * None (default) - use `self.edge_type`, the edge_type used to construct the pyramid
231 | * `'circular'` - circular convolution
232 | * `'reflect1'` - reflect about the edge pixels
233 | * `'reflect2'` - reflect, doubling the edge pixels
234 | * `'repeat'` - repeat the edge pixels
235 | * `'zero'` - assume values of zero outside image boundary
236 | * `'extend'` - reflect and inverts
237 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
238 | levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
239 | If `list` should contain some subset of integers from `0` to `self.num_scales-1`
240 | (inclusive) and `'residual_lowpass'`. If `'all'`, returned value will contain all
241 | valid levels. Otherwise, must be one of the valid levels.
242 | bands : `list`, `int`, or `'all'`.
243 | If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
244 | If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
245 | of the valid orientations.
246 |
247 | Returns
248 | -------
249 | recon : `np.array`
250 | The reconstructed image.
251 | """
252 | # Optional args
253 |
254 | if filter_name is None:
255 | lo_filter = self.filters['lo_filter']
256 | hi_filter = self.filters['hi_filter']
257 | stagger = self.stagger
258 | else:
259 | lo_filter = parse_filter(filter_name, normalize=False)
260 | hi_filter = WaveletPyramid._modulate_flip(lo_filter)
261 | stagger = (lo_filter.size + 1) % 2
262 |
263 | if edge_type is None:
264 | edges = self.edge_type
265 | else:
266 | edges = edge_type
267 |
268 | recon_keys = self._recon_keys(levels, bands)
269 |
270 | # initialize reconstruction
271 | if 'residual_lowpass' in recon_keys:
272 | recon = self.pyr_coeffs['residual_lowpass']
273 | else:
274 | recon = np.zeros_like(self.pyr_coeffs['residual_lowpass'])
275 |
276 | for lev in reversed(range(self.num_scales)):
277 | if self.num_orientations == 1:
278 | if lev == 0:
279 | output_size = self.image.shape
280 | else:
281 | output_size = self.pyr_size[(lev-1, 0)]
282 | else:
283 | output_size = (self.pyr_size[(lev, 0)][0] + self.pyr_size[(lev, 1)][0],
284 | self.pyr_size[(lev, 0)][1] + self.pyr_size[(lev, 1)][1])
285 | recon = self._recon_prev(recon, lev, recon_keys, output_size, lo_filter,
286 | hi_filter, edges, stagger)
287 |
288 | return recon
289 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/__init__.py:
--------------------------------------------------------------------------------
1 | from .GaussianPyramid import GaussianPyramid
2 | from .LaplacianPyramid import LaplacianPyramid
3 | from .WaveletPyramid import WaveletPyramid
4 | from .SteerablePyramidSpace import SteerablePyramidSpace
5 | from .SteerablePyramidFreq import SteerablePyramidFreq
6 | from .steer import steer, steer_to_harmonics_mtx
7 | from .pyr_utils import convert_pyr_coeffs_to_pyr, max_pyr_height
8 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/c/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LabForComputationalVision/pyrtools/d2ef019a5d8c16a52de597529a60bbdb2030e79c/src/pyrtools/pyramids/c/__init__.py
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/c/convolve.c:
--------------------------------------------------------------------------------
1 | /*
2 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
3 | ;;; File: convolve.c
4 | ;;; Author: Eero Simoncelli
5 | ;;; Description: General convolution code for 2D images
6 | ;;; Creation Date: Spring, 1987.
7 | ;;; MODIFICATIONS:
8 | ;;; 10/89: approximately optimized the choice of register vars on SPARCS.
9 | ;;; 6/96: Switched array types to double float.
10 | ;;; 2/97: made more robust and readable. Added STOP arguments.
11 | ;;; 8/97: Bug: when calling internal_reduce with edges in {reflect1,repeat,
12 | ;;; extend} and an even filter dimension. Solution: embed the filter
13 | ;;; in the upper-left corner of a filter with odd Y and X dimensions.
14 | ;;; ----------------------------------------------------------------
15 | ;;; Object-Based Vision and Image Understanding System (OBVIUS),
16 | ;;; Copyright 1988, Vision Science Group, Media Laboratory,
17 | ;;; Massachusetts Institute of Technology.
18 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
19 | */
20 |
21 | #include
22 | #include
23 | #include "convolve.h"
24 |
25 | /*
26 | --------------------------------------------------------------------
27 | Correlate FILT with IMAGE, subsampling according to START, STEP, and
28 | STOP parameters, with values placed into RESULT array. RESULT
29 | dimensions should be ceil((stop-start)/step). TEMP should be a
30 | pointer to a temporary double array the size of the filter.
31 | EDGES is a string specifying how to handle boundaries -- see edges.c.
32 | The convolution is done in 9 sections, where the border sections use
33 | specially computed edge-handling filters (see edges.c). The origin
34 | of the filter is assumed to be (floor(x_fdim/2), floor(y_fdim/2)).
35 | ------------------------------------------------------------------------ */
36 |
37 | /* abstract out the inner product computation */
38 | #define INPROD(XCNR,YCNR) \
39 | { \
40 | sum=0.0; \
41 | for (im_pos=YCNR*x_dim+XCNR, filt_pos=0, x_filt_stop=x_fdim; \
42 | x_filt_stop<=filt_size; \
43 | im_pos+=(x_dim-x_fdim), x_filt_stop+=x_fdim) \
44 | for (; \
45 | filt_pos
15 | #include
16 | #include "meta.h"
17 |
18 | #define ABS(x) (((x)>=0) ? (x) : (-(x)))
19 | #define ROOT2 1.4142135623730951
20 | #define REDUCE 0
21 | #define EXPAND 1
22 | #define IS ==
23 | #define ISNT !=
24 | #define AND &&
25 | #define OR ||
26 |
27 | typedef int (*fptr)();
28 |
29 | typedef struct
30 | {
31 | char *name;
32 | fptr func;
33 | } EDGE_HANDLER;
34 |
35 | typedef double image_type;
36 |
37 | fptr edge_function(char *edges);
38 | PYRTOOLS_EXPORT int internal_reduce(image_type *image, int x_idim, int y_idim,
39 | image_type *filt, image_type *temp, int x_fdim, int y_fdim,
40 | int x_start, int x_step, int x_stop,
41 | int y_start, int y_step, int y_stop,
42 | image_type *result, char *edges);
43 | PYRTOOLS_EXPORT int internal_expand(image_type *image,
44 | image_type *filt, image_type *temp, int x_fdim, int y_fdim,
45 | int x_start, int x_step, int x_stop,
46 | int y_start, int y_step, int y_stop,
47 | image_type *result, int x_rdim, int y_rdim, char *edges);
48 | PYRTOOLS_EXPORT int internal_wrap_reduce(image_type *image, int x_idim, int y_idim,
49 | image_type *filt, int x_fdim, int y_fdim,
50 | int x_start, int x_step, int x_stop,
51 | int y_start, int y_step, int y_stop,
52 | image_type *result);
53 | PYRTOOLS_EXPORT int internal_wrap_expand(image_type *image, image_type *filt, int x_fdim, int y_fdim,
54 | int x_start, int x_step, int x_stop,
55 | int y_start, int y_step, int y_stop,
56 | image_type *result, int x_rdim, int y_rdim);
57 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/c/edges.c:
--------------------------------------------------------------------------------
1 | /*
2 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
3 | ;;; File: edges.c
4 | ;;; Author: Eero Simoncelli
5 | ;;; Description: Boundary handling routines for use with convolve.c
6 | ;;; Creation Date: Spring 1987.
7 | ;;; MODIFIED, 6/96, to operate on double float arrays.
8 | ;;; MODIFIED by dgp, 4/1/97, to support THINK C.
9 | ;;; MODIFIED, 8/97: reflect1, reflect2, repeat, extend upgraded to
10 | ;;; work properly for non-symmetric filters. Added qreflect2 to handle
11 | ;;; even-length QMF's which broke under the reflect2 modification.
12 | ;;; ----------------------------------------------------------------
13 | ;;; Object-Based Vision and Image Understanding System (OBVIUS),
14 | ;;; Copyright 1988, Vision Science Group, Media Laboratory,
15 | ;;; Massachusetts Institute of Technology.
16 | ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
17 | */
18 |
19 | /* This file contains functions which determine how edges are to be
20 | handled when performing convolutions of images with linear filters.
21 | Any edge handling function which is local and linear may be defined,
22 | except (unfortunately) constants cannot be added. So to treat the
23 | edges as if the image is surrounded by a gray field, you must paste it
24 | into a gray image, convolve, and crop it out... The main convolution
25 | functions are called internal_reduce and internal_expand and are
26 | defined in the file convolve.c. The idea is that the convolution
27 | function calls the edge handling function which computes a new filter
28 | based on the old filter and the distance to the edge of the image.
29 | For example, reflection is done by reflecting the filter through the
30 | appropriate axis and summing. Currently defined functions are listed
31 | below.
32 | */
33 |
34 |
35 | #include
36 | #include
37 | #include
38 | #include "convolve.h"
39 |
40 | #define sgn(a) ( ((a)>0)?1:(((a)<0)?-1:0) )
41 | #define clip(a,mn,mx) ( ((a)<(mn))?(mn):(((a)>=(mx))?(mx-1):(a)) )
42 |
43 | int reflect1(), reflect2(), qreflect2(), repeat(), zero(), Extend(), nocompute();
44 | int ereflect(), predict();
45 |
46 | /* Lookup table matching a descriptive string to the edge-handling function */
47 | #if !THINK_C
48 | static EDGE_HANDLER edge_foos[] =
49 | {
50 | { "dont-compute", nocompute }, /* zero output for filter touching edge */
51 | { "zero", zero }, /* zero outside of image */
52 | { "repeat", repeat }, /* repeat edge pixel */
53 | { "reflect1", reflect1 }, /* reflect about edge pixels */
54 | { "reflect2", reflect2 }, /* reflect image, including edge pixels */
55 | { "qreflect2", qreflect2 }, /* reflect image, including edge pixels
56 | for even-length QMF decompositions */
57 | { "extend", Extend }, /* extend (reflect & invert) */
58 | { "ereflect", ereflect }, /* orthogonal QMF reflection */
59 | };
60 | #else
61 | /*
62 | This is really stupid, but THINK C won't allow initialization of static variables in
63 | a code resource with string addresses. So we do it this way.
64 | The 68K code for a MATLAB 4 MEX file can only be created by THINK C.
65 | However, for MATLAB 5, we'll be able to use Metrowerks CodeWarrior for both 68K and PPC, so this
66 | cludge can be dropped when we drop support for MATLAB 4.
67 | Denis Pelli, 4/1/97.
68 | */
69 | static EDGE_HANDLER edge_foos[8];
70 |
71 | void InitializeTable(EDGE_HANDLER edge_foos[])
72 | {
73 | static int i=0;
74 |
75 | if(i>0) return;
76 | edge_foos[i].name="dont-compute";
77 | edge_foos[i++].func=nocompute;
78 | edge_foos[i].name="zero";
79 | edge_foos[i++].func=zero;
80 | edge_foos[i].name="repeat";
81 | edge_foos[i++].func=repeat;
82 | edge_foos[i].name="reflect1";
83 | edge_foos[i++].func=reflect1;
84 | edge_foos[i].name="reflect2";
85 | edge_foos[i++].func=reflect2;
86 | edge_foos[i].name="qreflect2";
87 | edge_foos[i++].func=qreflect2;
88 | edge_foos[i].name="extend";
89 | edge_foos[i++].func=Extend;
90 | edge_foos[i].name="ereflect";
91 | edge_foos[i++].func=ereflect;
92 | }
93 | #endif
94 |
95 | /*
96 | Function looks up an edge handler id string in the structure above, and
97 | returns the associated function
98 | */
99 | fptr edge_function(char *edges)
100 | {
101 | int i;
102 |
103 | #if THINK_C
104 | InitializeTable(edge_foos);
105 | #endif
106 | for (i = 0; i1) OR (x_pos<-1) OR (y_pos>1) OR (y_pos<-1) )
150 | for (i=0; i0)?(x_pos-1):((x_pos<0)?(x_pos+1):0));
168 | int y_start = x_dim * ((y_pos>0)?(y_pos-1):((y_pos<0)?(y_pos+1):0));
169 | int i;
170 |
171 | for (i=0; i= 0) AND (y_res < filt_sz))
177 | for (x_filt=y_filt, x_res=x_start;
178 | x_filt= 0) AND (x_res < x_dim))
181 | result[y_res+x_res] = filt[x_filt];
182 | return(0);
183 | }
184 |
185 |
186 | /* --------------------------------------------------------------------
187 | reflect1() - Reflection through the edge pixels. Continuous, but
188 | discontinuous first derivative. This is the right thing to do if you
189 | are subsampling by 2, since it maintains parity (even pixels positions
190 | remain even, odd ones remain odd).
191 | */
192 |
193 | int reflect1(filt,x_dim,y_dim,x_pos,y_pos,result,r_or_e)
194 | register double *filt, *result;
195 | register int x_dim;
196 | int y_dim, x_pos, y_pos, r_or_e;
197 | {
198 | int filt_sz = x_dim*y_dim;
199 | register int y_filt,x_filt, y_res, x_res;
200 | register int x_base = (x_pos>0)?(x_dim-1):0;
201 | register int y_base = x_dim * ((y_pos>0)?(y_dim-1):0);
202 | int x_overhang = (x_pos>0)?(x_pos-1):((x_pos<0)?(x_pos+1):0);
203 | int y_overhang = x_dim * ((y_pos>0)?(y_pos-1):((y_pos<0)?(y_pos+1):0));
204 | int i;
205 | int mx_pos = (x_pos<0)?(x_dim/2):((x_dim-1)/2);
206 | int my_pos = x_dim * ((y_pos<0)?(y_dim/2):((y_dim-1)/2));
207 |
208 | for (i=0; i y_base-filt_sz;
224 | y_filt-=x_dim, y_res-=x_dim)
225 | {
226 | for (x_res=x_base, x_filt=x_base-x_overhang;
227 | x_filt > x_base-x_dim;
228 | x_res--, x_filt--)
229 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
230 | if ((x_overhang ISNT mx_pos) AND (x_pos ISNT 0))
231 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang;
232 | x_filt > x_base-x_dim;
233 | x_res--, x_filt--)
234 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
235 | }
236 | if ((y_overhang ISNT my_pos) AND (y_pos ISNT 0))
237 | for (y_res=y_base, y_filt = y_base-2*my_pos+y_overhang;
238 | y_filt > y_base-filt_sz;
239 | y_filt-=x_dim, y_res-=x_dim)
240 | {
241 | for (x_res=x_base, x_filt=x_base-x_overhang;
242 | x_filt > x_base-x_dim;
243 | x_res--, x_filt--)
244 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
245 | if ((x_overhang ISNT mx_pos) AND (x_pos ISNT 0))
246 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang;
247 | x_filt > x_base-x_dim;
248 | x_res--, x_filt--)
249 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
250 | }
251 | }
252 |
253 | return(0);
254 | }
255 |
256 | /* --------------------------------------------------------------------
257 | reflect2() - Reflect image at boundary. The edge pixel is repeated,
258 | then the next pixel, etc. Continuous, but discontinuous first
259 | derivative.
260 | */
261 |
262 | int reflect2(filt,x_dim,y_dim,x_pos,y_pos,result,r_or_e)
263 | register double *filt, *result;
264 | register int x_dim;
265 | int y_dim, x_pos, y_pos, r_or_e;
266 | {
267 | int filt_sz = x_dim*y_dim;
268 | register int y_filt,x_filt, y_res, x_res;
269 | register int x_base = (x_pos>0)?(x_dim-1):0;
270 | register int y_base = x_dim * ((y_pos>0)?(y_dim-1):0);
271 | int x_overhang = (x_pos>0)?(x_pos-1):((x_pos<0)?(x_pos+1):0);
272 | int y_overhang = x_dim * ((y_pos>0)?(y_pos-1):((y_pos<0)?(y_pos+1):0));
273 | int i;
274 | int mx_pos = (x_pos<0)?(x_dim/2):((x_dim-1)/2);
275 | int my_pos = x_dim * ((y_pos<0)?(y_dim/2):((y_dim-1)/2));
276 |
277 | for (i=0; i0)?x_dim:0);
281 | y_filt0)?1:0);
286 | x_filt y_base-filt_sz;
299 | y_filt-=x_dim, y_res-=x_dim)
300 | {
301 | for (x_res=x_base, x_filt=x_base-x_overhang;
302 | x_filt > x_base-x_dim;
303 | x_res--, x_filt--)
304 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
305 | if (x_pos ISNT 0)
306 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang-1;
307 | x_filt > x_base-x_dim;
308 | x_res--, x_filt--)
309 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
310 | }
311 | if (y_pos ISNT 0)
312 | for (y_res=y_base, y_filt = y_base-2*my_pos+y_overhang-x_dim;
313 | y_filt > y_base-filt_sz;
314 | y_filt-=x_dim, y_res-=x_dim)
315 | {
316 | for (x_res=x_base, x_filt=x_base-x_overhang;
317 | x_filt > x_base-x_dim;
318 | x_res--, x_filt--)
319 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
320 | if (x_pos ISNT 0)
321 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang-1;
322 | x_filt > x_base-x_dim;
323 | x_res--, x_filt--)
324 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
325 | }
326 | }
327 |
328 | return(0);
329 | }
330 |
331 |
332 | /* --------------------------------------------------------------------
333 | qreflect2() - Modified version of reflect2 that works properly for
334 | even-length QMF filters.
335 | */
336 |
337 | int qreflect2(filt,x_dim,y_dim,x_pos,y_pos,result,r_or_e)
338 | double *filt, *result;
339 | int x_dim, y_dim, x_pos, y_pos, r_or_e;
340 | {
341 | reflect2(filt,x_dim,y_dim,x_pos,y_pos,result,0);
342 | return(0);
343 | }
344 |
345 | /* --------------------------------------------------------------------
346 | repeat() - repeat edge pixel. Continuous, with discontinuous first
347 | derivative.
348 | */
349 |
350 | int repeat(filt,x_dim,y_dim,x_pos,y_pos,result,r_or_e)
351 | register double *filt, *result;
352 | register int x_dim;
353 | int y_dim, x_pos, y_pos, r_or_e;
354 | {
355 | register int y_filt,x_filt, y_res,x_res, y_tmp, x_tmp;
356 | register int x_base = (x_pos>0)?(x_dim-1):0;
357 | register int y_base = x_dim * ((y_pos>0)?(y_dim-1):0);
358 | int x_overhang = ((x_pos>0)?(x_pos-1):((x_pos<0)?(x_pos+1):0));
359 | int y_overhang = x_dim * ((y_pos>0)?(y_pos-1):((y_pos<0)?(y_pos+1):0));
360 | int filt_sz = x_dim*y_dim;
361 | int mx_pos = (x_dim/2);
362 | int my_pos = x_dim * (y_dim/2);
363 | int i;
364 |
365 | for (i=0; i=0)?((y_res=0)?((x_res y_base-filt_sz;
382 | y_filt-=x_dim, y_res-=x_dim)
383 | if ((x_base-x_overhang) ISNT mx_pos)
384 | for (x_res=x_base, x_filt=x_base-ABS(x_overhang);
385 | x_filt > x_base-x_dim;
386 | x_res--, x_filt--)
387 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
388 | else /* ((x_base-x_overhang) IS mx_pos) */
389 | for (x_res=x_base, x_filt=x_base-ABS(x_overhang);
390 | x_filt > x_base-x_dim;
391 | x_filt--, x_res--)
392 | for(x_tmp=x_filt; x_tmp > x_base-x_dim; x_tmp--)
393 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_tmp)];
394 | else /* ((y_base-y_overhang) IS my_pos) */
395 | for (y_res=y_base, y_filt=y_base-ABS(y_overhang);
396 | y_filt > y_base-filt_sz;
397 | y_filt-=x_dim, y_res-=x_dim)
398 | for (y_tmp=y_filt; y_tmp > y_base-filt_sz; y_tmp-=x_dim)
399 | if ((x_base-x_overhang) ISNT mx_pos)
400 | for (x_res=x_base, x_filt=x_base-ABS(x_overhang);
401 | x_filt > x_base-x_dim;
402 | x_filt--, x_res--)
403 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_tmp)+ABS(x_filt)];
404 | else /* ((x_base-x_overhang) IS mx_pos) */
405 | for (x_res=x_base, x_filt=x_base-ABS(x_overhang);
406 | x_filt > x_base-x_dim;
407 | x_filt--, x_res--)
408 | for (x_tmp=x_filt; x_tmp > x_base-x_dim; x_tmp--)
409 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_tmp)+ABS(x_tmp)];
410 | } /* End, if r_or_e IS EXPAND */
411 |
412 | return(0);
413 | }
414 |
415 | /* --------------------------------------------------------------------
416 | extend() - Extend image by reflecting and inverting about edge pixel
417 | value. Maintains continuity in intensity AND first derivative (but
418 | not higher derivs).
419 | */
420 |
421 | int Extend(filt,x_dim,y_dim,x_pos,y_pos,result,r_or_e)
422 | register double *filt, *result;
423 | register int x_dim;
424 | int y_dim, x_pos, y_pos, r_or_e;
425 | {
426 | int filt_sz = x_dim*y_dim;
427 | register int y_filt,x_filt, y_res,x_res, y_tmp, x_tmp;
428 | register int x_base = (x_pos>0)?(x_dim-1):0;
429 | register int y_base = x_dim * ((y_pos>0)?(y_dim-1):0);
430 | int x_overhang = (x_pos>0)?(x_pos-1):((x_pos<0)?(x_pos+1):0);
431 | int y_overhang = x_dim * ((y_pos>0)?(y_pos-1):((y_pos<0)?(y_pos+1):0));
432 | int mx_pos = (x_pos<0)?(x_dim/2):((x_dim-1)/2);
433 | int my_pos = x_dim * ((y_pos<0)?(y_dim/2):((y_dim-1)/2));
434 | int i;
435 |
436 | for (i=0; i=0) AND (y_res=0) AND (x_res=0) AND (x_res y_base-filt_sz;
474 | y_filt-=x_dim, y_res-=x_dim)
475 | {
476 | for (x_res=x_base, x_filt=x_base-x_overhang;
477 | x_filt > x_base-x_dim;
478 | x_res--, x_filt--)
479 | result[ABS(y_res)+ABS(x_res)] += filt[ABS(y_filt)+ABS(x_filt)];
480 | if (x_pos ISNT 0){
481 | if (x_overhang ISNT mx_pos){
482 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang;
483 | x_filt > x_base-x_dim;
484 | x_res--, x_filt--)
485 | result[ABS(y_res)+ABS(x_res)] -= filt[ABS(y_filt)+ABS(x_filt)];
486 | }else{ /* x_overhang IS mx_pos */
487 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang-1;
488 | x_filt > x_base-x_dim;
489 | x_res--, x_filt--)
490 | for (x_tmp=x_filt; x_tmp > x_base-x_dim; x_tmp--)
491 | result[ABS(y_res)+ABS(x_res)] += 2*filt[ABS(y_filt)+ABS(x_tmp)];
492 | }
493 | }
494 | }
495 | if (y_pos ISNT 0){
496 | if (y_overhang ISNT my_pos){
497 | for (y_res=y_base, y_filt = y_base-2*my_pos+y_overhang;
498 | y_filt > y_base-filt_sz;
499 | y_filt-=x_dim, y_res-=x_dim)
500 | {
501 | for (x_res=x_base, x_filt=x_base-x_overhang;
502 | x_filt > x_base-x_dim;
503 | x_res--, x_filt--)
504 | result[ABS(y_res)+ABS(x_res)] -= filt[ABS(y_filt)+ABS(x_filt)];
505 | if ((x_pos ISNT 0) AND (x_overhang ISNT mx_pos))
506 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang;
507 | x_filt > x_base-x_dim;
508 | x_res--, x_filt--)
509 | result[ABS(y_res)+ABS(x_res)] -= filt[ABS(y_filt)+ABS(x_filt)];
510 | }
511 | }else{ /* y_overhang IS my_pos */
512 | for (y_res=y_base, y_filt = y_base-2*my_pos+y_overhang-x_dim;
513 | y_filt > y_base-filt_sz;
514 | y_res-=x_dim, y_filt-=x_dim)
515 | for (y_tmp=y_filt; y_tmp > y_base-filt_sz; y_tmp-=x_dim)
516 | {
517 | for (x_res=x_base, x_filt=x_base-x_overhang;
518 | x_filt > x_base-x_dim;
519 | x_res--, x_filt--)
520 | result[ABS(y_res)+ABS(x_res)] += 2*filt[ABS(y_tmp)+ABS(x_filt)];
521 | if ((x_pos ISNT 0) AND (x_overhang IS mx_pos))
522 | for (x_res=x_base, x_filt=x_base-2*mx_pos+x_overhang-1;
523 | x_filt > x_base-x_dim;
524 | x_res--, x_filt--)
525 | for (x_tmp=x_filt; x_tmp > x_base-x_dim; x_tmp--)
526 | result[ABS(y_res)+ABS(x_res)] += 2*filt[ABS(y_tmp)+ABS(x_tmp)];
527 | }
528 | }
529 | }
530 | } /* r_or_e ISNT REDUCE */
531 |
532 | return(0);
533 | }
534 |
535 | /* --------------------------------------------------------------------
536 | predict() - Simple prediction. Like zero, but multiplies the result
537 | by the reciprocal of the percentage of filter being used. (i.e. if
538 | 50% of the filter is hanging over the edge of the image, multiply the
539 | taps being used by 2). */
540 |
541 | int predict(filt,x_dim,y_dim,x_pos,y_pos,result,r_or_e)
542 | register double *filt, *result;
543 | register int x_dim;
544 | int y_dim, x_pos, y_pos, r_or_e;
545 | {
546 | register int y_filt,x_filt, y_res,x_res;
547 | register double taps_used = 0.0; /* int *** */
548 | register double fraction = 0.0;
549 | int filt_sz = x_dim*y_dim;
550 | int x_start = ((x_pos>0)?(x_pos-1):((x_pos<0)?(x_pos+1):0));
551 | int y_start = x_dim * ((y_pos>0)?(y_pos-1):((y_pos<0)?(y_pos+1):0));
552 | int i;
553 |
554 | for (i=0; i= 0) AND (y_res < filt_sz))
560 | for (x_filt=y_filt, x_res=x_start;
561 | x_filt= 0) AND (x_res < x_dim))
564 | {
565 | result[y_res+x_res] = filt[x_filt];
566 | taps_used += ABS(filt[x_filt]);
567 | }
568 |
569 | if (r_or_e IS REDUCE)
570 | {
571 | /* fraction = ( (double) filt_sz ) / ( (double) taps_used ); */
572 | for (i=0; i0)?(x_dim-1):0;
593 | register int y_base = x_dim * ((y_pos>0)?(y_dim-1):0);
594 | int filt_sz = x_dim*y_dim;
595 | int x_overhang = (x_pos>1)?(x_pos-x_dim):((x_pos<-1)?(x_pos+1):0);
596 | int y_overhang = x_dim * ( (y_pos>1)?(y_pos-y_dim):((y_pos<-1)?(y_pos+1):0) );
597 | int i;
598 | double norm,onorm;
599 |
600 | for (i=0; i
2 | #include
3 | #include "internal_pointOp.h"
4 |
5 | /* Use linear interpolation on a lookup table.
6 | Taken from OBVIUS. EPS, Spring, 1987.
7 | */
8 | void internal_pointop (im, res, size, lut, lutsize, origin, increment, warnings)
9 | register double *im, *res, *lut;
10 | register double origin, increment;
11 | register int size, lutsize, warnings;
12 | {
13 | register int i, index;
14 | register double pos;
15 | register int l_unwarned = warnings;
16 | register int r_unwarned = warnings;
17 |
18 | lutsize = lutsize - 2; /* Maximum index value */
19 |
20 | /* printf("size=%d origin=%f lutsize=%d increment=%f\n",size, origin, lutsize,
21 | increment); */
22 |
23 | if (increment > 0)
24 | for (i=0; i lutsize)
38 | {
39 | index = lutsize;
40 | if (r_unwarned)
41 | {
42 | printf("Warning: Extrapolating to right of lookup table...\n");
43 | r_unwarned = 0;
44 | }
45 | }
46 | res[i] = lut[index] + (lut[index+1] - lut[index]) * (pos - index);
47 | if(isnan(res[i]))
48 | printf("**NAN: lut[%d]=%f lut[%d]=%f pos=%f index=%d\n", index,
49 | lut[index], index+1, lut[index+1], pos, index);
50 | }
51 | else
52 | for (i=0; i
18 |
19 | #include "convolve.h"
20 |
21 | /*
22 | --------------------------------------------------------------------
23 | Performs correlation (i.e., convolution with filt(-x,-y)) of FILT
24 | with IMAGE followed by subsampling (a.k.a. REDUCE in Burt&Adelson81).
25 | The operations are combined to avoid unnecessary computation of the
26 | convolution samples that are to be discarded in the subsampling
27 | operation. The convolution is done in 9 sections so that mod
28 | operations are not performed unnecessarily. The subsampling lattice
29 | is specified by the START, STEP and STOP parameters.
30 | -------------------------------------------------------------------- */
31 |
32 | /* abstract out the inner product computation */
33 | #define INPROD(YSTART,YIND,XSTART,XIND) \
34 | { \
35 | sum=0.0; \
36 | for (y_im=YSTART, filt_pos=0, x_filt_stop=x_fdim; \
37 | x_filt_stop<=filt_size; \
38 | y_im++, x_filt_stop+=x_fdim) \
39 | for (x_im=XSTART ; \
40 | filt_pos 0:
19 | lib = ctypes.cdll.LoadLibrary(libpath[0])
20 | else:
21 | warnings.warn("Can't load in C code, something went wrong in your install!")
22 |
23 |
24 | def corrDn(image, filt, edge_type='reflect1', step=(1, 1), start=(0, 0), stop=None):
25 | """Compute correlation of image with filt, followed by downsampling.
26 |
27 | These arguments should be 1D or 2D arrays, and image must be larger (in both dimensions) than
28 | filt. The origin of filt is assumed to be floor(size(filt)/2)+1.
29 |
30 | Downsampling factors are determined by step (optional, default=(1, 1)), which should be a
31 | 2-tuple (y, x).
32 |
33 | The window over which the convolution occurs is specfied by start (optional, default=(0,0), and
34 | stop (optional, default=size(image)).
35 |
36 | NOTE: this operation corresponds to multiplication of a signal vector by a matrix whose rows
37 | contain copies of the filt shifted by multiples of step. See `upConv` for the operation
38 | corresponding to the transpose of this matrix.
39 |
40 | WARNING: if both the image and filter are 1d, they must be 1d in the same dimension. E.g., if
41 | image.shape is (1, 36), then filt.shape must be (1, 5) and NOT (5, 1). If they're both 1d and
42 | 1d in different dimensions, then this may encounter a segfault. I've not been able to find a
43 | way to avoid that within this function (simply reshaping it does not work).
44 |
45 | Arguments
46 | ---------
47 | image : `array_like`
48 | 1d or 2d array containing the image to correlate and downsample.
49 | filt : `array_like`
50 | 1d or 2d array containing the filter to use for correlation and downsampling.
51 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
52 | Specifies how to handle edges. Options are:
53 |
54 | * `'circular'` - circular convolution
55 | * `'reflect1'` - reflect about the edge pixels
56 | * `'reflect2'` - reflect, doubling the edge pixels
57 | * `'repeat'` - repeat the edge pixels
58 | * `'zero'` - assume values of zero outside image boundary
59 | * `'extend'` - reflect and invert
60 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
61 | step : `tuple`
62 | 2-tuple (y, x) which determines the downsampling factor
63 | start : `tuple`
64 | 2-tuple which specifies the start of the window over which we perform the convolution
65 | start : `tuple` or None
66 | 2-tuple which specifies the end of the window over which we perform the convolution. If
67 | None, perform convolution over the whole image
68 |
69 | Returns
70 | -------
71 | result : `np.array`
72 | the correlated and downsampled array
73 |
74 | """
75 | image = image.copy().astype(float)
76 | filt = filt.copy().astype(float)
77 |
78 | if image.shape[0] < filt.shape[0] or image.shape[1] < filt.shape[1]:
79 | raise Exception("Signal smaller than filter in corresponding dimension: ", image.shape, filt.shape, " see parse filter")
80 |
81 | if edge_type not in ['circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute']:
82 | raise Exception("Don't know how to do convolution with edge_type %s!" % edge_type)
83 |
84 | if filt.ndim == 1:
85 | filt = filt.reshape(1, -1)
86 |
87 | if stop is None:
88 | stop = (image.shape[0], image.shape[1])
89 |
90 | rxsz = len(range(start[0], stop[0], step[0]))
91 | rysz = len(range(start[1], stop[1], step[1]))
92 | result = np.zeros((rxsz, rysz))
93 |
94 | if edge_type == 'circular':
95 | lib.internal_wrap_reduce(image.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
96 | image.shape[1], image.shape[0],
97 | filt.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
98 | filt.shape[1], filt.shape[0],
99 | start[1], step[1], stop[1], start[0], step[0],
100 | stop[0],
101 | result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
102 | else:
103 | tmp = np.zeros((filt.shape[0], filt.shape[1]))
104 | lib.internal_reduce(image.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
105 | image.shape[1], image.shape[0],
106 | filt.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
107 | tmp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
108 | filt.shape[1], filt.shape[0],
109 | start[1], step[1], stop[1], start[0], step[0],
110 | stop[0],
111 | result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
112 | edge_type.encode('ascii'))
113 |
114 | return result
115 |
116 |
117 | def upConv(image, filt, edge_type='reflect1', step=(1, 1), start=(0, 0), stop=None):
118 | """Upsample matrix image, followed by convolution with matrix filt.
119 |
120 | These arguments should be 1D or 2D matrices, and image must be larger (in both dimensions) than
121 | filt. The origin of filt is assumed to be floor(size(filt)/2)+1.
122 |
123 | Upsampling factors are determined by step (optional, default=(1, 1)),
124 | a 2-tuple (y, x).
125 |
126 | The window over which the convolution occurs is specfied by start (optional, default=(0, 0),
127 | and stop (optional, default = step .* (size(IM) + floor((start-1)./step))).
128 |
129 | NOTE: this operation corresponds to multiplication of a signal vector by a matrix whose columns
130 | contain copies of the time-reversed (or space-reversed) FILT shifted by multiples of STEP. See
131 | corrDn.m for the operation corresponding to the transpose of this matrix.
132 |
133 | WARNING: if both the image and filter are 1d, they must be 1d in the same dimension. E.g., if
134 | image.shape is (1, 36), then filt.shape must be (1, 5) and NOT (5, 1). If they're both 1d and
135 | 1d in different dimensions, then this may encounter a segfault. I've not been able to find a
136 | way to avoid that within this function (simply reshaping it does not work).
137 |
138 | Arguments
139 | ---------
140 | image : `array_like`
141 | 1d or 2d array containing the image to upsample and convolve.
142 | filt : `array_like`
143 | 1d or 2d array containing the filter to use for upsampling and convolution.
144 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
145 | Specifies how to handle edges. Options are:
146 |
147 | * `'circular'` - circular convolution
148 | * `'reflect1'` - reflect about the edge pixels
149 | * `'reflect2'` - reflect, doubling the edge pixels
150 | * `'repeat'` - repeat the edge pixels
151 | * `'zero'` - assume values of zero outside image boundary
152 | * `'extend'` - reflect and invert
153 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
154 | step : `tuple`
155 | 2-tuple (y, x) which determines the upsampling factor
156 | start : `tuple`
157 | 2-tuple which specifies the start of the window over which we perform the convolution.
158 | start : `tuple` or None
159 | 2-tuple which specifies the end of the window over which we perform the convolution. If
160 | None, perform convolution over the whole image
161 |
162 | Returns
163 | -------
164 | result : `np.array`
165 | the correlated and downsampled array
166 |
167 | """
168 | image = image.copy().astype(float)
169 | filt = filt.copy().astype(float)
170 |
171 | if image.ndim == 1:
172 | image = image.reshape(-1, 1)
173 |
174 | image_shape = (image.shape[0] * step[0], image.shape[1] * step[1])
175 |
176 | if image_shape[0] < filt.shape[0] or image_shape[1] < filt.shape[1]:
177 | raise Exception("Signal smaller than filter in corresponding dimension: ", image_shape, filt.shape, " see parse filter")
178 |
179 | if edge_type not in ['circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend',
180 | 'dont-compute']:
181 | raise Exception("Don't know how to do convolution with edge_type %s!" % edge_type)
182 |
183 | # from upConv.c, the c code that gets compiled in the matlab version: upConv has a bug for
184 | # even-length kernels when using the reflect1, extend, or repeat edge-handlers
185 | if ((edge_type in ["reflect1", "extend", "repeat"]) and
186 | (filt.shape[0] % 2 == 0 or filt.shape[1] % 2 == 0)):
187 | if filt.shape[1] == 1:
188 | filt = np.append(filt, 0.0)
189 | filt = np.reshape(filt, (len(filt), 1))
190 | elif filt.shape[0] == 1:
191 | filt = np.append(filt, 0.0)
192 | filt = np.reshape(filt, (1, len(filt)))
193 | else:
194 | raise Exception('Even sized 2D filters not yet supported by upConv.')
195 |
196 | if stop is None:
197 | stop = [imshape_d * step_d for imshape_d, step_d in zip(image.shape, step)]
198 |
199 | result = np.zeros((stop[1], stop[0]))
200 |
201 | temp = np.zeros((filt.shape[1], filt.shape[0]))
202 |
203 | if edge_type == 'circular':
204 | lib.internal_wrap_expand(image.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
205 | filt.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
206 | filt.shape[1], filt.shape[0], start[1],
207 | step[1], stop[1], start[0], step[0], stop[0],
208 | result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
209 | stop[1], stop[0])
210 | else:
211 | lib.internal_expand(image.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
212 | filt.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
213 | temp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
214 | filt.shape[1], filt.shape[0], start[1], step[1],
215 | stop[1], start[0], step[0], stop[0],
216 | result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
217 | stop[1], stop[0], edge_type.encode('ascii'))
218 | result = np.reshape(result, stop)
219 |
220 | return result
221 |
222 |
223 | def pointOp(image, lut, origin, increment, warnings=False):
224 | """Apply a point operation, specified by lookup table `lut`, to `image`
225 |
226 | This function is very fast and allows extrapolation beyond the lookup table domain. The
227 | drawbacks are that the lookup table must be equi-spaced, and the interpolation is linear.
228 |
229 | Arguments
230 | ---------
231 | image : `array_like`
232 | 1d or 2d array
233 | lut : `array_like`
234 | a row or column vector, assumed to contain (equi-spaced) samples of the function.
235 | origin : `float`
236 | specifies the abscissa associated with the first sample
237 | increment : `float`
238 | specifies the spacing between samples.
239 | warnings : `bool`
240 | whether to print a warning whenever the lookup table is extrapolated
241 |
242 | """
243 | result = np.empty_like(image)
244 | # this way we can use python booleans when calling
245 | if warnings:
246 | warnings = 1
247 | else:
248 | warnings = 0
249 | lib.internal_pointop(image.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
250 | result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
251 | image.shape[0] * image.shape[1],
252 | lut.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
253 | lut.shape[0],
254 | ctypes.c_double(origin),
255 | ctypes.c_double(increment), warnings)
256 |
257 | return np.asarray(result)
258 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/pyr_utils.py:
--------------------------------------------------------------------------------
1 | import functools
2 | from operator import mul
3 |
4 |
5 | def convert_pyr_coeffs_to_pyr(pyr_coeffs):
6 | """this function takes a 'new pyramid' and returns the coefficients as a list
7 |
8 | this is to enable backwards compatibility
9 |
10 | Parameters
11 | ----------
12 | pyr_coeffs : `dict`
13 | The `pyr_coeffs` attribute of a `pyramid`.
14 |
15 | Returns
16 | -------
17 | coeffs : `list`
18 | list of `np.array`, which contains the pyramid coefficients in each band, in order from
19 | bottom of the pyramid to top (going through the orientations in order)
20 | highpass : `np.array` or None
21 | either the residual highpass from the pyramid or, if that doesn't exist, None
22 | lowpass : `np.array` or None
23 | either the residual lowpass from the pyramid or, if that doesn't exist, None
24 |
25 | """
26 | highpass = pyr_coeffs.pop('residual_highpass', None)
27 | lowpass = pyr_coeffs.pop('residual_lowpass', None)
28 | coeffs = [i[1] for i in sorted(pyr_coeffs.items(), key=lambda x: x[0])]
29 | return coeffs, highpass, lowpass
30 |
31 |
32 | def max_pyr_height(imsz, filtsz):
33 | '''Compute maximum pyramid height for given image and filter sizes.
34 |
35 | Specifically, this computes the number of corrDn operations that can be sequentially performed
36 | when subsampling by a factor of 2.
37 |
38 | Parameters
39 | ----------
40 | imsz : `tuple` or `int`
41 | the size of the image (should be 2-tuple if image is 2d, `int` if it's 1d)
42 | filtsz : `tuple` or `int`
43 | the size of the filter (should be 2-tuple if image is 2d, `int` if it's 1d)
44 |
45 | Returns
46 | -------
47 | max_pyr_height : `int`
48 | The maximum height of the pyramid
49 | '''
50 | # check if inputs are one of int, tuple and have consistent type
51 | assert (isinstance(imsz, int) and isinstance(filtsz, int)) or (
52 | isinstance(imsz, tuple) and isinstance(filtsz, tuple))
53 | # 1D image case: reduce to the integer case
54 | if isinstance(imsz, tuple) and (len(imsz) == 1 or 1 in imsz):
55 | imsz = functools.reduce(mul, imsz)
56 | filtsz = functools.reduce(mul, filtsz)
57 | # integer case
58 | if isinstance(imsz, int):
59 | if imsz < filtsz:
60 | return 0
61 | else:
62 | return 1 + max_pyr_height(imsz // 2, filtsz)
63 | # 2D image case
64 | if isinstance(imsz, tuple):
65 | if min(imsz) < max(filtsz):
66 | return 0
67 | else:
68 | return 1 + max_pyr_height((imsz[0] // 2, imsz[1] // 2), filtsz)
69 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/pyramid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import warnings
3 | from .pyr_utils import max_pyr_height
4 | from .filters import named_filter
5 | from .steer import steer
6 |
7 |
8 | class Pyramid:
9 | """Base class for multiscale pyramids
10 |
11 | You should not instantiate this base class, it is instead inherited by the other classes found
12 | in this module.
13 |
14 | Parameters
15 | ----------
16 | image : `array_like`
17 | 1d or 2d image upon which to construct to the pyramid.
18 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
19 | Specifies how to handle edges. Options are:
20 |
21 | * `'circular'` - circular convolution
22 | * `'reflect1'` - reflect about the edge pixels
23 | * `'reflect2'` - reflect, doubling the edge pixels
24 | * `'repeat'` - repeat the edge pixels
25 | * `'zero'` - assume values of zero outside image boundary
26 | * `'extend'` - reflect and invert
27 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
28 |
29 | Attributes
30 | ----------
31 | image : `array_like`
32 | The input image used to construct the pyramid.
33 | image_size : `tuple`
34 | The size of the input image.
35 | pyr_type : `str` or `None`
36 | Human-readable string specifying the type of pyramid. For base class, is None.
37 | edge_type : `str`
38 | Specifies how edges were handled.
39 | pyr_coeffs : `dict`
40 | Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
41 | values are 1d or 2d numpy arrays (same number of dimensions as the input image)
42 | pyr_size : `dict`
43 | Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
44 | tuples and values are tuples.
45 | is_complex : `bool`
46 | Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
47 | a value of True, all others must be False.
48 | """
49 |
50 | def __init__(self, image, edge_type):
51 |
52 | self.image = np.asarray(image).astype(float)
53 | if self.image.ndim == 1:
54 | self.image = self.image.reshape(-1, 1)
55 | assert self.image.ndim == 2, "Error: Input signal must be 1D or 2D."
56 |
57 | self.image_size = self.image.shape
58 | if not hasattr(self, 'pyr_type'):
59 | self.pyr_type = None
60 | self.edge_type = edge_type
61 | self.pyr_coeffs = {}
62 | self.pyr_size = {}
63 | self.is_complex = False
64 |
65 |
66 | def _set_num_scales(self, filter_name, height, extra_height=0):
67 | """Figure out the number of scales (height) of the pyramid
68 |
69 | The user should not call this directly. This is called during construction of a pyramid,
70 | and is based on the size of the filters (thus, should be called after instantiating the
71 | filters) and the input image, as well as the `extra_height` parameter (which corresponds to
72 | the residuals, which the Gaussian pyramid contains and others do not).
73 |
74 | This sets `self.num_scales` directly instead of returning something, so be careful.
75 |
76 | Parameters
77 | ----------
78 | filter_name : `str`
79 | Name of the filter in the `filters` dict that determines the height of the pyramid
80 | height : `'auto'` or `int`
81 | During construction, user can specify the number of scales (height) of the pyramid.
82 | The pyramid will have this number of scales unless that's greater than the maximum
83 | possible height.
84 | extra_height : `int`, optional
85 | The automatically calculated maximum number of scales is based on the size of the input
86 | image and filter size. The Gaussian pyramid also contains the final residuals and so we
87 | need to add one more to this number.
88 |
89 | Returns
90 | -------
91 | None
92 | """
93 | # the Gaussian and Laplacian pyramids can go one higher than the value returned here, so we
94 | # use the extra_height argument to allow for that
95 | max_ht = max_pyr_height(self.image.shape, self.filters[filter_name].shape) + extra_height
96 | if height == 'auto':
97 | self.num_scales = max_ht
98 | elif height > max_ht:
99 | raise ValueError("Cannot build pyramid higher than %d levels." % (max_ht))
100 | else:
101 | self.num_scales = int(height)
102 |
103 | def _recon_levels_check(self, levels):
104 | """Check whether levels arg is valid for reconstruction and return valid version
105 |
106 | When reconstructing the input image (i.e., when calling `recon_pyr()`), the user specifies
107 | which levels to include. This makes sure those levels are valid and gets them in the form
108 | we expect for the rest of the reconstruction. If the user passes `'all'`, this constructs
109 | the appropriate list (based on the values of `self.pyr_coeffs`).
110 |
111 | Parameters
112 | ----------
113 | levels : `list`, `int`, or {`'all'`, `'residual_highpass'`, or `'residual_lowpass'`}
114 | If `list` should contain some subset of integers from `0` to `self.num_scales-1`
115 | (inclusive) and `'residual_highpass'` and `'residual_lowpass'` (if appropriate for the
116 | pyramid). If `'all'`, returned value will contain all valid levels. Otherwise, must be
117 | one of the valid levels.
118 |
119 | Returns
120 | -------
121 | levels : `list`
122 | List containing the valid levels for reconstruction.
123 |
124 | """
125 | if isinstance(levels, str) and levels == 'all':
126 | levels = ['residual_highpass'] + list(range(self.num_scales)) + ['residual_lowpass']
127 | else:
128 | if not hasattr(levels, '__iter__') or isinstance(levels, str):
129 | # then it's a single int or string
130 | levels = [levels]
131 | levs_nums = np.asarray([int(i) for i in levels if isinstance(i, int) or i.isdigit()])
132 | assert (levs_nums >= 0).all(), "Level numbers must be non-negative."
133 | assert (levs_nums < self.num_scales).all(), "Level numbers must be in the range [0, %d]" % (self.num_scales-1)
134 | levs_tmp = list(np.sort(levs_nums)) # we want smallest first
135 | if 'residual_highpass' in levels:
136 | levs_tmp = ['residual_highpass'] + levs_tmp
137 | if 'residual_lowpass' in levels:
138 | levs_tmp = levs_tmp + ['residual_lowpass']
139 | levels = levs_tmp
140 | # not all pyramids have residual highpass / lowpass, but it's easier to construct the list
141 | # including them, then remove them if necessary.
142 | if 'residual_lowpass' not in self.pyr_coeffs.keys() and 'residual_lowpass' in levels:
143 | levels.pop(-1)
144 | if 'residual_highpass' not in self.pyr_coeffs.keys() and 'residual_highpass' in levels:
145 | levels.pop(0)
146 | return levels
147 |
148 | def _recon_bands_check(self, bands):
149 | """Check whether bands arg is valid for reconstruction and return valid version
150 |
151 | When reconstructing the input image (i.e., when calling `recon_pyr()`), the user specifies
152 | which orientations to include. This makes sure those orientations are valid and gets them
153 | in the form we expect for the rest of the reconstruction. If the user passes `'all'`, this
154 | constructs the appropriate list (based on the values of `self.pyr_coeffs`).
155 |
156 | Parameters
157 | ----------
158 | bands : `list`, `int`, or `'all'`.
159 | If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
160 | If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
161 | of the valid orientations.
162 |
163 | Returns
164 | -------
165 | bands: `list`
166 | List containing the valid orientations for reconstruction.
167 | """
168 | if isinstance(bands, str) and bands == "all":
169 | bands = np.arange(self.num_orientations)
170 | else:
171 | bands = np.array(bands, ndmin=1)
172 | assert (bands >= 0).all(), "Error: band numbers must be larger than 0."
173 | assert (bands < self.num_orientations).all(), "Error: band numbers must be in the range [0, %d]" % (self.num_orientations - 1)
174 | return bands
175 |
176 | def _recon_keys(self, levels, bands, max_orientations=None):
177 | """Make a list of all the relevant keys from `pyr_coeffs` to use in pyramid reconstruction
178 |
179 | When reconstructing the input image (i.e., when calling `recon_pyr()`), the user specifies
180 | some subset of the pyramid coefficients to include in the reconstruction. This function
181 | takes in those specifications, checks that they're valid, and returns a list of tuples
182 | that are keys into the `pyr_coeffs` dictionary.
183 |
184 | Parameters
185 | ----------
186 | levels : `list`, `int`, or {`'all'`, `'residual_highpass'`, `'residual_lowpass'`}
187 | If `list` should contain some subset of integers from `0` to `self.num_scales-1`
188 | (inclusive) and `'residual_highpass'` and `'residual_lowpass'` (if appropriate for the
189 | pyramid). If `'all'`, returned value will contain all valid levels. Otherwise, must be
190 | one of the valid levels.
191 | bands : `list`, `int`, or `'all'`.
192 | If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
193 | If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
194 | of the valid orientations.
195 | max_orientations: `None` or `int`.
196 | The maximum number of orientations we allow in the reconstruction. when we determine
197 | which ints are allowed for bands, we ignore all those greater than max_orientations.
198 |
199 | Returns
200 | -------
201 | recon_keys : `list`
202 | List of `tuples`, all of which are keys in `pyr_coeffs`. These are the coefficients to
203 | include in the reconstruction of the image.
204 |
205 | """
206 | levels = self._recon_levels_check(levels)
207 | bands = self._recon_bands_check(bands)
208 | if max_orientations is not None:
209 | for i in bands:
210 | if i >= max_orientations:
211 | warnings.warn(("You wanted band %d in the reconstruction but max_orientation"
212 | " is %d, so we're ignoring that band" % (i, max_orientations)))
213 | bands = [i for i in bands if i < max_orientations]
214 | recon_keys = []
215 | for level in levels:
216 | # residual highpass and lowpass
217 | if isinstance(level, str):
218 | recon_keys.append(level)
219 | # else we have to get each of the (specified) bands at
220 | # that level
221 | else:
222 | recon_keys.extend([(level, band) for band in bands])
223 | return recon_keys
224 |
225 |
226 | class SteerablePyramidBase(Pyramid):
227 | """base class for steerable pyramid
228 |
229 | should not be called directly, we just use it so we can make both SteerablePyramidFreq and
230 | SteerablePyramidSpace inherit the steer_coeffs function
231 |
232 | """
233 | def __init__(self, image, edge_type):
234 | super().__init__(image=image, edge_type=edge_type)
235 |
236 | def steer_coeffs(self, angles, even_phase=True):
237 | """Steer pyramid coefficients to the specified angles
238 |
239 | This allows you to have filters that have the Gaussian derivative order specified in
240 | construction, but arbitrary angles or number of orientations.
241 |
242 | Parameters
243 | ----------
244 | angles : `list`
245 | list of angles (in radians) to steer the pyramid coefficients to
246 | even_phase : `bool`
247 | specifies whether the harmonics are cosine or sine phase aligned about those positions.
248 |
249 | Returns
250 | -------
251 | resteered_coeffs : `dict`
252 | dictionary of re-steered pyramid coefficients. will have the same number of scales as
253 | the original pyramid (though it will not contain the residual highpass or lowpass).
254 | like `self.pyr_coeffs`, keys are 2-tuples of ints indexing the scale and orientation,
255 | but now we're indexing `angles` instead of `self.num_orientations`.
256 | resteering_weights : `dict`
257 | dictionary of weights used to re-steer the pyramid coefficients. will have the same
258 | keys as `resteered_coeffs`.
259 |
260 | """
261 | resteered_coeffs = {}
262 | resteering_weights = {}
263 | for i in range(self.num_scales):
264 | basis = np.vstack([self.pyr_coeffs[(i, j)].flatten() for j in
265 | range(self.num_orientations)]).T
266 | for j, a in enumerate(angles):
267 | res, steervect = steer(basis, a, return_weights=True, even_phase=even_phase)
268 | resteered_coeffs[(i, j)] = res.reshape(self.pyr_coeffs[(i, 0)].shape)
269 | resteering_weights[(i, j)] = steervect
270 |
271 | return resteered_coeffs, resteering_weights
272 |
--------------------------------------------------------------------------------
/src/pyrtools/pyramids/steer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import warnings
3 |
4 |
5 | def steer_to_harmonics_mtx(harmonics, angles=None, even_phase=True):
6 | '''Compute a steering matrix
7 |
8 | This maps a directional basis set onto the angular Fourier harmonics.
9 |
10 | Parameters
11 | ----------
12 | harmonics: `array_like`
13 | array specifying the angular harmonics contained in the steerable basis/filters.
14 | angles: `array_like` or None
15 | vector specifying the angular position of each filter (in radians). If None, defaults to
16 | `pi * np.arange(numh) / numh`, where `numh = harmonics.size + np.count_nonzero(harmonics)`
17 | even_phase : `bool`
18 | specifies whether the harmonics are cosine or sine phase aligned about those positions.
19 |
20 | Returns
21 | -------
22 | imtx : `np.array`
23 | This matrix is suitable for passing to the function `steer`.
24 |
25 | '''
26 | # default parameter
27 | numh = harmonics.size + np.count_nonzero(harmonics)
28 | if angles is None:
29 | angles = np.pi * np.arange(numh) / numh
30 |
31 | # Compute inverse matrix, which maps to Fourier components onto
32 | # steerable basis
33 | imtx = np.zeros((angles.size, numh))
34 | col = 0
35 | for h in harmonics:
36 | args = h * angles
37 | if h == 0:
38 | imtx[:, col] = np.ones(angles.shape)
39 | col += 1
40 | elif even_phase:
41 | imtx[:, col] = np.cos(args)
42 | imtx[:, col+1] = np.sin(args)
43 | col += 2
44 | else: # odd phase
45 | imtx[:, col] = np.sin(args)
46 | imtx[:, col+1] = -1.0 * np.cos(args)
47 | col += 2
48 |
49 | r = np.linalg.matrix_rank(imtx)
50 | if r < np.min(imtx.shape):
51 | warnings.warn("Matrix is not full rank")
52 |
53 | return np.linalg.pinv(imtx)
54 |
55 |
56 | def steer(basis, angle, harmonics=None, steermtx=None, return_weights=False, even_phase=True):
57 | '''Steer BASIS to the specfied ANGLE.
58 |
59 | Parameters
60 | ----------
61 | basis : `array_like`
62 | array whose columns are vectorized rotated copies of a steerable function, or the responses
63 | of a set of steerable filters.
64 | angle : `array_like` or `int`
65 | scalar or column vector the size of the basis. specifies the angle(s) (in radians) to
66 | steer to
67 | harmonics : `list` or None
68 | a list of harmonic numbers indicating the angular harmonic content of the basis. if None
69 | (default), N even or odd low frequencies, as for derivative filters
70 | steermtx : `array_like` or None.
71 | matrix which maps the filters onto Fourier series components (ordered [cos0 cos1 sin1 cos2
72 | sin2 ... sinN]). See steer_to_harmonics_mtx function for more details. If None (default),
73 | assumes cosine phase harmonic components, and filter positions at 2pi*n/N.
74 | return_weights : `bool`
75 | whether to return the weights or not.
76 | even_phase : `bool`
77 | specifies whether the harmonics are cosine or sine phase aligned about those positions.
78 |
79 | Returns
80 | -------
81 | res : `np.array`
82 | the resteered basis
83 | steervect : `np.array`
84 | the weights used to resteer the basis. only returned if `return_weights` is True
85 | '''
86 |
87 | num = basis.shape[1]
88 |
89 | if isinstance(angle, (int, float)):
90 | angle = np.asarray([angle])
91 | else:
92 | if angle.shape[0] != basis.shape[0] or angle.shape[1] != 1:
93 | raise Exception("""ANGLE must be a scalar, or a column vector
94 | the size of the basis elements""")
95 |
96 | # If HARMONICS is not specified, assume derivatives.
97 | if harmonics is None:
98 | harmonics = np.arange(1 - (num % 2), num, 2)
99 |
100 | if len(harmonics.shape) == 1 or harmonics.shape[0] == 1:
101 | # reshape to column matrix
102 | harmonics = harmonics.reshape(harmonics.shape[0], 1)
103 | elif harmonics.shape[0] != 1 and harmonics.shape[1] != 1:
104 | raise Exception('input parameter HARMONICS must be 1D!')
105 |
106 | if 2 * harmonics.shape[0] - (harmonics == 0).sum() != num:
107 | raise Exception('harmonics list is incompatible with basis size!')
108 |
109 | # If STEERMTX not passed, assume evenly distributed cosine-phase filters:
110 | if steermtx is None:
111 | steermtx = steer_to_harmonics_mtx(harmonics, np.pi * np.arange(num) / num,
112 | even_phase=even_phase)
113 |
114 | steervect = np.zeros((angle.shape[0], num))
115 | arg = angle * harmonics[np.nonzero(harmonics)[0]].T
116 | if all(harmonics):
117 | steervect[:, range(0, num, 2)] = np.cos(arg)
118 | steervect[:, range(1, num, 2)] = np.sin(arg)
119 | else:
120 | steervect[:, 0] = np.ones((arg.shape[0], 1))
121 | steervect[:, range(1, num, 2)] = np.cos(arg)
122 | steervect[:, range(2, num, 2)] = np.sin(arg)
123 |
124 | steervect = np.dot(steervect, steermtx)
125 |
126 | if steervect.shape[0] > 1:
127 | tmp = np.dot(basis, steervect)
128 | res = sum(tmp).T
129 | else:
130 | res = np.dot(basis, steervect.T)
131 |
132 | if return_weights:
133 | return res, np.asarray(steervect).reshape(num)
134 | else:
135 | return res
136 |
--------------------------------------------------------------------------------
/src/pyrtools/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LabForComputationalVision/pyrtools/d2ef019a5d8c16a52de597529a60bbdb2030e79c/src/pyrtools/tools/__init__.py
--------------------------------------------------------------------------------
/src/pyrtools/tools/compare_matpyrtools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | from ..pyramids import convert_pyr_coeffs_to_pyr
4 |
5 |
6 | def comparePyr(matPyr, pyPyr, rtol=1e-5, atol=1e-8):
7 | '''compare two pyramids
8 |
9 | returns True if they are the same with in desired precision and False if not.
10 |
11 | written for unit testing code.
12 |
13 | '''
14 | # compare two pyramids - return 0 for !=, 1 for ==
15 | # correct number of elements?
16 | matSz = sum(matPyr.shape)
17 | try:
18 | pySz = 1 + sum([np.asarray(size).prod() for size in pyPyr.pyr_size.values()])
19 | except AttributeError:
20 | pySz = 1 + sum([np.asarray(size).prod() for size in pyPyr.pyrSize])
21 |
22 | if(matSz != pySz):
23 | print("size difference: %d != %d, returning False" % (matSz, pySz))
24 | return False
25 |
26 | # values are close to each other?
27 | matStart = 0
28 | try:
29 | pyCoeffs, pyHigh, pyLow = convert_pyr_coeffs_to_pyr(pyPyr.pyr_coeffs)
30 | if pyHigh is not None:
31 | pyCoeffs.insert(0, pyHigh)
32 | if pyLow is not None:
33 | pyCoeffs.append(pyLow)
34 | except AttributeError:
35 | pyCoeffs = pyPyr.pyr
36 | for idx, pyTmp in enumerate(pyCoeffs):
37 | matTmp = matPyr[matStart:matStart + pyTmp.size]
38 | matStart = matStart + pyTmp.size
39 | matTmp = np.reshape(matTmp, pyTmp.shape, order='F')
40 |
41 | # relative tolerance rtol
42 | # absolute tolerance atol
43 | isclose = np.isclose(matTmp, pyTmp, rtol, atol)
44 | if not isclose.all():
45 | print("some pyramid elements not identical: checking...")
46 | for i in range(isclose.shape[0]):
47 | for j in range(isclose.shape[1]):
48 | if not isclose[i, j]:
49 | print("failed level:%d element:%d %d value:%.15f %.15f" %
50 | (idx, i, j, matTmp[i, j], pyTmp[i, j]))
51 | return False
52 |
53 | return True
54 |
55 |
56 | def compareRecon(recon1, recon2, rtol=1e-5, atol=1e-10):
57 | '''compare two arrays
58 |
59 | returns True is they are the same within specified precision and False if not. function was
60 | made to accompany unit test code.
61 |
62 | This function is deprecated. Instead use the builtin numpy:
63 |
64 | np.allclose(recon1, recon2, rtol=1e-05, atol=1e-08, equal_nan=False)
65 |
66 | This will not tell you where the error is, but you can find that yourself
67 |
68 | '''
69 |
70 | # NOTE builtin numpy:
71 |
72 | # BUT won't print where and what the first error is
73 |
74 | if recon1.shape != recon2.shape:
75 | print('shape is different!')
76 | print(recon1.shape)
77 | print(recon2.shape)
78 | return False
79 |
80 | prec = -1
81 | for i in range(recon1.shape[0]):
82 | for j in range(recon2.shape[1]):
83 | if np.absolute(recon1[i, j].real - recon2[i, j].real) > math.pow(10, -11):
84 | print("real: i=%d j=%d %.15f %.15f diff=%.15f" %
85 | (i, j, recon1[i, j].real, recon2[i, j].real,
86 | np.absolute(recon1[i, j].real-recon2[i, j].real)))
87 | return False
88 | # FIX: need a better way to test
89 | # if we have many significant digits to the left of decimal we
90 | # need to be less stringent about digits to the right.
91 | # The code below works, but there must be a better way.
92 | if isinstance(recon1, complex):
93 | if int(math.log(np.abs(recon1[i, j].imag), 10)) > 1:
94 | prec = prec + int(math.log(np.abs(recon1[i, j].imag), 10))
95 | if prec > 0:
96 | prec = -1
97 | print(prec)
98 | if np.absolute(recon1[i, j].imag - recon2[i, j].imag) > math.pow(10, prec):
99 | print("imag: i=%d j=%d %.15f %.15f diff=%.15f" %
100 | (i, j, recon1[i, j].imag, recon2[i, j].imag,
101 | np.absolute(recon1[i, j].imag-recon2[i, j].imag)))
102 | return False
103 |
104 | return True
105 |
--------------------------------------------------------------------------------
/src/pyrtools/tools/convolutions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from ..pyramids.filters import parse_filter
3 | from ..pyramids.c.wrapper import corrDn, upConv
4 | import scipy.signal
5 |
6 |
7 | def blur(image, n_levels=1, filt='binom5'):
8 | '''blur an image by filtering-downsampling and then upsampling-filtering
9 |
10 | Blur an image, by filtering and downsampling `n_levels` times (default=1), followed by upsampling
11 | and filtering `n_levels` times. The blurring is done with filter kernel specified by `filt` (default
12 | = 'binom5'), which can be a string (to be passed to named_filter), a vector (applied separably
13 | as a 1D convolution kernel in X and Y), or a matrix (applied as a 2D convolution kernel). The
14 | downsampling is always by 2 in each direction.
15 |
16 | This differs from blurDn in that here we upsample afterwards.
17 |
18 | Arguments
19 | ---------
20 | image : `array_like`
21 | 1d or 2d image to blur
22 | n_levels : `int`
23 | the number of times to filter and downsample. the higher this is, the more blurred the
24 | resulting image will be
25 | filt : {`array_like`, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4',
26 | 'qmf5', 'qmf9', 'qmf13'}
27 | filter to use for filtering image. If array_like, can be 1d or 2d. All scaled so L-1 norm
28 | is 1.0
29 |
30 | * `'binomN'` - binomial coefficient filter of order N-1
31 | * `'haar'` - Haar wavelet
32 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
33 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
34 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
35 |
36 | Returns
37 | -------
38 | image : `array_like`
39 | the blurred image
40 |
41 | References
42 | ----------
43 | .. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
44 | Proc. ICASSP, pp 291-294, 1980.
45 | .. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
46 | Math, vol. 42, pp 909-996, 1988.
47 | .. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
48 | Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
49 | Technical Report #100.
50 | .. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
51 | ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
52 | '''
53 |
54 | if image.ndim == 1:
55 | image = image.reshape(-1, 1)
56 |
57 | filt = parse_filter(filt)
58 |
59 | if n_levels > 0:
60 | if image.shape[1] == 1:
61 | # 1D image [M, 1] 1D filter [N, 1]
62 | imIn = corrDn(image=image, filt=filt, step=(2, 1))
63 | out = blur(imIn, n_levels-1, filt)
64 | res = upConv(image=out, filt=filt, step=(2, 1), stop=image.shape)
65 | return res
66 |
67 | elif image.shape[1] == 1:
68 | # 1D image [1, M] 1D filter [N, 1]
69 | imIn = corrDn(image=image, filt=filt.T, step=(1, 2))
70 | out = blur(imIn, n_levels-1, filt)
71 | res = upConv(image=out, filt=filt.T, step=(1, 2), stop=image.shape)
72 | return res
73 |
74 | elif filt.shape[1] == 1:
75 | # 2D image 1D filter [N, 1]
76 | imIn = corrDn(image=image, filt=filt, step=(2, 1))
77 | imIn = corrDn(image=imIn, filt=filt.T, step=(1, 2))
78 | out = blur(imIn, n_levels-1, filt)
79 | res = upConv(image=out, filt=filt.T, step=(1, 2), start=(0, 0), stop=[out.shape[0], image.shape[1]])
80 | res = upConv(image=res, filt=filt, step=(2, 1), start=(0, 0), stop=image.shape)
81 | return res
82 |
83 | else:
84 | # 2D image 2D filter
85 | imIn = corrDn(image=image, filt=filt, step=(2, 2))
86 | out = blur(imIn, n_levels-1, filt)
87 | res = upConv(image=out, filt=filt, step=(2, 2), stop=image.shape)
88 | return res
89 |
90 | else:
91 | return image
92 |
93 |
94 | def blurDn(image, n_levels=1, filt='binom5'):
95 | '''blur and downsample an image
96 |
97 | Blur and downsample an image. The blurring is done with filter kernel specified by FILT
98 | (default = 'binom5'), which can be a string (to be passed to named_filter), a vector (applied
99 | separably as a 1D convolution kernel in X and Y), or a matrix (applied as a 2D convolution
100 | kernel). The downsampling is always by 2 in each direction.
101 |
102 | The procedure is applied recursively `n_levels` times (default=1).
103 |
104 | This differs from blur in that we do NOT upsample afterwards.
105 |
106 | Arguments
107 | ---------
108 | image : `array_like`
109 | 1d or 2d image to blur and downsample
110 | n_levels : `int`
111 | the number of times to filter and downsample. the higher this is, the blurrier and smaller
112 | the resulting image will be
113 | filt : {`array_like`, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4',
114 | 'qmf5', 'qmf9', 'qmf13'}
115 | filter to use for filtering image. If array_like, can be 1d or 2d. All scaled so L-1 norm
116 | is 1.0
117 |
118 | * `'binomN'` - binomial coefficient filter of order N-1
119 | * `'haar'` - Haar wavelet
120 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
121 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
122 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
123 |
124 | Returns
125 | -------
126 | image : `array_like`
127 | the blurred and downsampled image
128 |
129 | References
130 | ----------
131 | .. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
132 | Proc. ICASSP, pp 291-294, 1980.
133 | .. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
134 | Math, vol. 42, pp 909-996, 1988.
135 | .. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
136 | Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
137 | Technical Report #100.
138 | .. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
139 | ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
140 | '''
141 |
142 | if image.ndim == 1:
143 | image = image.reshape(-1, 1)
144 |
145 | filt = parse_filter(filt)
146 |
147 | if n_levels > 1:
148 | image = blurDn(image, n_levels-1, filt)
149 |
150 | if n_levels >= 1:
151 | if image.shape[1] == 1:
152 | # 1D image [M, 1] and 1D filter [N, 1]
153 | res = corrDn(image=image, filt=filt, step=(2, 1))
154 |
155 | elif image.shape[0] == 1:
156 | # 1D image [1, M] and 1D filter [N, 1]
157 | res = corrDn(image=image, filt=filt.T, step=(1, 2))
158 |
159 | elif filt.shape[1] == 1:
160 | # 2D image and 1D filter [N, 1]
161 | res = corrDn(image=image, filt=filt, step=(2, 1))
162 | res = corrDn(image=res, filt=filt.T, step=(1, 2))
163 |
164 | else:
165 | # 2D image and 2D filter
166 | res = corrDn(image=image, filt=filt, step=(2, 2))
167 |
168 | else:
169 | res = image
170 |
171 | return res
172 |
173 |
174 | def upBlur(image, n_levels=1, filt='binom5'):
175 | '''upsample and blur an image.
176 |
177 | Upsample and blur an image. The blurring is done with filter kernel specified by FILT (default
178 | = 'binom5'), which can be a string (to be passed to named_filter), a vector (applied separably
179 | as a 1D convolution kernel in X and Y), or a matrix (applied as a 2D convolution kernel). The
180 | downsampling is always by 2 in each direction.
181 |
182 | The procedure is applied recursively n_levels times (default=1).
183 |
184 | Arguments
185 | ---------
186 | image : `array_like`
187 | 1d or 2d image to upsample and blur
188 | n_levels : `int`
189 | the number of times to filter and downsample. the higher this is, the blurrier and larger
190 | the resulting image will be
191 | filt : {`array_like`, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4',
192 | 'qmf5', 'qmf9', 'qmf13'}
193 | filter to use for filtering image. If array_like, can be 1d or 2d. All scaled so L-1 norm
194 | is 1.0
195 |
196 | * `'binomN'` - binomial coefficient filter of order N-1
197 | * `'haar'` - Haar wavelet
198 | * `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
199 | * `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
200 | * `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
201 |
202 | Returns
203 | -------
204 | image : `array_like`
205 | the upsampled and blurred image
206 |
207 | References
208 | ----------
209 | .. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
210 | Proc. ICASSP, pp 291-294, 1980.
211 | .. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
212 | Math, vol. 42, pp 909-996, 1988.
213 | .. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
214 | Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
215 | Technical Report #100.
216 | .. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
217 | ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
218 |
219 | '''
220 |
221 | if image.ndim == 1:
222 | image = image.reshape(-1, 1)
223 |
224 | filt = parse_filter(filt, normalize=False)
225 |
226 | if n_levels > 1:
227 | image = upBlur(image, n_levels-1, filt)
228 |
229 | if n_levels >= 1:
230 | if image.shape[1] == 1:
231 | # 1D image [M, 1] and 1D filter [N, 1]
232 | res = upConv(image=image, filt=filt, step=(2, 1))
233 |
234 | elif image.shape[0] == 1:
235 | # 1D image [1, M] and 1D filter [N, 1]
236 | res = upConv(image=image, filt=filt.T, step=(1, 2))
237 |
238 | elif filt.shape[1] == 1:
239 | # 2D image and 1D filter [N, 1]
240 | res = upConv(image=image, filt=filt, step=(2, 1))
241 | res = upConv(image=res, filt=filt.T, step=(1, 2))
242 |
243 | else:
244 | # 2D image and 2D filter
245 | res = upConv(image=image, filt=filt, step=(2, 2))
246 |
247 | else:
248 | res = image
249 |
250 | return res
251 |
252 |
253 | def image_gradient(image, edge_type="dont-compute"):
254 | '''Compute the gradient of the image using smooth derivative filters
255 |
256 | Compute the gradient of the image using smooth derivative filters optimized for accurate
257 | direction estimation. Coordinate system corresponds to standard pixel indexing: X axis points
258 | rightward. Y axis points downward. `edges` specify boundary handling.
259 |
260 | Notes
261 | -----
262 | original filters from Int'l Conf Image Processing, 1994.
263 | updated filters 10/2003: see Farid & Simoncelli, IEEE Trans Image
264 | Processing, 13(4):496-508, April 2004.
265 |
266 | Arguments
267 | ---------
268 | image : `array_like`
269 | 2d array to compute the gradients of
270 | edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
271 | Specifies how to handle edges. Options are:
272 |
273 | * `'circular'` - circular convolution
274 | * `'reflect1'` - reflect about the edge pixels
275 | * `'reflect2'` - reflect, doubling the edge pixels
276 | * `'repeat'` - repeat the edge pixels
277 | * `'zero'` - assume values of zero outside image boundary
278 | * `'extend'` - reflect and invert
279 | * `'dont-compute'` - zero output when filter overhangs imput boundaries.
280 |
281 | Returns
282 | -------
283 | dx, dy : `np.array`
284 | the X derivative and the Y derivative
285 |
286 | '''
287 |
288 | # kernels from Farid & Simoncelli, IEEE Trans Image Processing,
289 | # 13(4):496-508, April 2004.
290 | gp = np.asarray([0.037659, 0.249153, 0.426375, 0.249153, 0.037659]).reshape(5, 1)
291 | gd = np.asarray([-0.109604, -0.276691, 0.000000, 0.276691, 0.109604]).reshape(5, 1)
292 |
293 | dx = corrDn(corrDn(image, gp, edge_type), gd.T, edge_type)
294 | dy = corrDn(corrDn(image, gd, edge_type), gp.T, edge_type)
295 |
296 | return (dx, dy)
297 |
298 |
299 | # ----------------------------------------------------------------
300 | # Below are (slow) scipy convolution functions
301 | # they are intended for comparison purpose only
302 | # the c code is prefered and used throughout this package
303 | # ----------------------------------------------------------------
304 |
305 |
306 | def rconv2(mtx1, mtx2, ctr=0):
307 | '''Convolution of two matrices, with boundaries handled via reflection about the edge pixels.
308 |
309 | Result will be of size of LARGER matrix.
310 |
311 | The origin of the smaller matrix is assumed to be its center.
312 | For even dimensions, the origin is determined by the CTR (optional)
313 | argument:
314 | CTR origin
315 | 0 DIM/2 (default)
316 | 1 (DIM/2)+1
317 |
318 | In general, you should not use this function, since it will be slow. Instead, use `upConv` or
319 | `corrDn`, which use the C code and so are much faster.
320 |
321 | '''
322 |
323 | if (mtx1.shape[0] >= mtx2.shape[0] and mtx1.shape[1] >= mtx2.shape[1]):
324 | large = mtx1
325 | small = mtx2
326 | elif (mtx1.shape[0] <= mtx2.shape[0] and mtx1.shape[1] <= mtx2.shape[1]):
327 | large = mtx2
328 | small = mtx1
329 | else:
330 | print('one matrix must be larger than the other in both dimensions!')
331 | return
332 |
333 | ly = large.shape[0]
334 | lx = large.shape[1]
335 | sy = small.shape[0]
336 | sx = small.shape[1]
337 |
338 | # These values are one less than the index of the small mtx that falls on
339 | # the border pixel of the large matrix when computing the first
340 | # convolution response sample:
341 | sy2 = int(np.floor((sy+ctr-1)/2))
342 | sx2 = int(np.floor((sx+ctr-1)/2))
343 |
344 | # pad with reflected copies
345 | nw = large[sy-sy2-1:0:-1, sx-sx2-1:0:-1]
346 | n = large[sy-sy2-1:0:-1, :]
347 | ne = large[sy-sy2-1:0:-1, lx-2:lx-sx2-2:-1]
348 | w = large[:, sx-sx2-1:0:-1]
349 | e = large[:, lx-2:lx-sx2-2:-1]
350 | sw = large[ly-2:ly-sy2-2:-1, sx-sx2-1:0:-1]
351 | s = large[ly-2:ly-sy2-2:-1, :]
352 | se = large[ly-2:ly-sy2-2:-1, lx-2:lx-sx2-2:-1]
353 |
354 | n = np.column_stack((nw, n, ne))
355 | c = np.column_stack((w, large, e))
356 | s = np.column_stack((sw, s, se))
357 |
358 | clarge = np.concatenate((n, c), axis=0)
359 | clarge = np.concatenate((clarge, s), axis=0)
360 |
361 | return scipy.signal.convolve(clarge, small, 'valid')
362 |
363 |
364 | # TODO: low priority
365 |
366 | # def cconv2(mtx1, mtx2, ctr=0):
367 | # '''Circular convolution of two matrices. Result will be of size of
368 | # LARGER vector.
369 | #
370 | # The origin of the smaller matrix is assumed to be its center.
371 | # For even dimensions, the origin is determined by the CTR (optional)
372 | # argument:
373 | # CTR origin
374 | # 0 DIM/2 (default)
375 | # 1 (DIM/2)+1
376 | #
377 | # Eero Simoncelli, 6/96. Modified 2/97.
378 | # Python port by Rob Young, 8/15
379 | # '''
380 | #
381 | # if len(args) < 2:
382 | # print 'Error: cconv2 requires two input matrices!'
383 | # print 'Usage: cconv2(matrix1, matrix2, center)'
384 | # print 'where center parameter is optional'
385 | # return
386 | # else:
387 | # a = np.asarray(args[0])
388 | # b = np.asarray(args[1])
389 | #
390 | # if len(args) == 3:
391 | # ctr = args[2]
392 | # else:
393 | # ctr = 0
394 | #
395 | # if a.shape[0] >= b.shape[0] and a.shape[1] >= b.shape[1]:
396 | # large = a
397 | # small = b
398 | # elif a.shape[0] <= b.shape[0] and a.shape[1] <= b.shape[1]:
399 | # large = b
400 | # small = a
401 | # else:
402 | # print 'Error: one matrix must be larger than the other in both dimensions!'
403 | # return
404 | #
405 | # ly = large.shape[0]
406 | # lx = large.shape[1]
407 | # sy = small.shape[0]
408 | # sx = small.shape[1]
409 | #
410 | # ## These values are the index of the small mtx that falls on the
411 | # ## border pixel of the large matrix when computing the first
412 | # ## convolution response sample:
413 | # sy2 = np.floor((sy+ctr+1)/2.0).astype(int)
414 | # sx2 = np.floor((sx+ctr+1)/2.0).astype(int)
415 | #
416 | # # pad
417 | # nw = large[ly-sy+sy2:ly, lx-sx+sx2:lx]
418 | # n = large[ly-sy+sy2:ly, :]
419 | # ne = large[ly-sy+sy2:ly, :sx2-1]
420 | # w = large[:, lx-sx+sx2:lx]
421 | # c = large
422 | # e = large[:, :sx2-1]
423 | # sw = large[:sy2-1, lx-sx+sx2:lx]
424 | # s = large[:sy2-1, :]
425 | # se = large[:sy2-1, :sx2-1]
426 | #
427 | # n = np.column_stack((nw, n, ne))
428 | # c = np.column_stack((w,large,e))
429 | # s = np.column_stack((sw, s, se))
430 | #
431 | # clarge = np.concatenate((n, c), axis=0)
432 | # clarge = np.concatenate((clarge, s), axis=0)
433 | #
434 | # c = scipy.signal.convolve(clarge, small, 'valid')
435 | #
436 | # return c
437 |
438 |
439 | # def zconv2(mtx1, mtx2, ctr=0):
440 | # ''' RES = ZCONV2(MTX1, MTX2, CTR)
441 | #
442 | # Convolution of two matrices, with boundaries handled as if the larger
443 | # mtx lies in a sea of zeros. Result will be of size of LARGER vector.
444 | #
445 | # The origin of the smaller matrix is assumed to be its center.
446 | # For even dimensions, the origin is determined by the CTR (optional)
447 | # argument:
448 | # CTR origin
449 | # 0 DIM/2 (default)
450 | # 1 (DIM/2)+1 (behaves like conv2(mtx1,mtx2,'same'))
451 | #
452 | # Eero Simoncelli, 2/97. Python port by Rob Young, 10/15. '''
453 | #
454 | # # REQUIRED ARGUMENTS
455 | # #----------------------------------------------------------------
456 | #
457 | # if len(args) < 2 or len(args) > 3:
458 | # print 'Usage: zconv2(matrix1, matrix2, center)'
459 | # print 'first two input parameters are required'
460 | # return
461 | # else:
462 | # a = np.asarray(args[0])
463 | # b = np.asarray(args[1])
464 | #
465 | # # OPTIONAL ARGUMENT
466 | # #----------------------------------------------------------------
467 | #
468 | # if len(args) == 3:
469 | # ctr = args[2]
470 | # else:
471 | # ctr = 0
472 | #
473 | # #----------------------------------------------------------------
474 | #
475 | # if (a.shape[0] >= b.shape[0]) and (a.shape[1] >= b.shape[1]):
476 | # large = a
477 | # small = b
478 | # elif (a.shape[0] <= b.shape[0]) and (a.shape[1] <= b.shape[1]):
479 | # large = b
480 | # small = a
481 | # else:
482 | # print 'Error: one arg must be larger than the other in both dimensions!'
483 | # return
484 | #
485 | # ly = large.shape[0]
486 | # lx = large.shape[1]
487 | # sy = small.shape[0]
488 | # sx = small.shape[1]
489 | #
490 | # ## These values are the index of the small matrix that falls on the
491 | # ## border pixel of the large matrix when computing the first
492 | # ## convolution response sample:
493 | # sy2 = np.floor((sy+ctr+1)/2.0).astype(int)-1
494 | # sx2 = np.floor((sx+ctr+1)/2.0).astype(int)-1
495 | #
496 | # clarge = scipy.signal.convolve(large, small, 'full')
497 | #
498 | # c = clarge[sy2:ly+sy2, sx2:lx+sx2]
499 | #
500 | # return c
501 |
--------------------------------------------------------------------------------
/src/pyrtools/tools/image_stats.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .utils import matlab_histo
3 |
4 |
5 | def entropy(vec, binsize=None):
6 | '''Compute the first-order sample entropy of `vec`
7 |
8 | Samples of `vec` are first discretized. Optional argument `binsize` controls the
9 | discretization, and defaults to 256/(max(`vec`)-min(`vec`)).
10 |
11 | NOTE: This is a heavily biased estimate of entropy when you don't have much data.
12 |
13 | Arguments
14 | ---------
15 | vec : `array_like`
16 | the 2d or 2d array to calculate the entropy of
17 | binsize : `float` or None
18 | the size of the bins we discretize into. If None, will set to 256/(max(vec)-min(vec))
19 |
20 | Returns
21 | -------
22 | entropy : `float`
23 | estimate of entropy from the data
24 |
25 | '''
26 | [bincount, _] = matlab_histo(vec, nbins=256, binsize=binsize)
27 |
28 | # Collect non-zero bins:
29 | H = bincount[np.where(bincount > 0)]
30 | H = H / H.sum()
31 |
32 | return -(H * np.log2(H)).sum()
33 |
34 |
35 | def range(array):
36 | '''compute minimum and maximum values of the input array
37 |
38 | `array` must be real-valued
39 |
40 | Arguments
41 | ---------
42 | array : `np.array`
43 | array to calculate the range of
44 |
45 | Returns
46 | -------
47 | array_range : `tuple`
48 | (min, max)
49 | '''
50 | if not np.isreal(array.all()):
51 | raise Exception('array must be real-valued')
52 |
53 | return (array.min(), array.max())
54 |
55 |
56 | def var(array, array_mean=None):
57 | '''Sample variance of the input numpy array.
58 |
59 | Passing `mean` (optional) makes the calculation faster. This works equally well for real and
60 | complex-valued `array`
61 |
62 | Arguments
63 | ---------
64 | array : `np.array`
65 | array to calculate the variance of
66 | array_mean : `float` or None
67 | the mean of `array`. If None, will calculate it.
68 |
69 | Returns
70 | -------
71 | array_var : `float`
72 | the variance of `array`
73 | '''
74 | if array_mean is None:
75 | array_mean = array.mean()
76 |
77 | if np.isreal(array).all():
78 | return ((array - array_mean)**2).sum() / max(array.size - 1, 1)
79 | else:
80 | return var(array.real, array_mean.real) + 1j * var(array.imag, array_mean.imag)
81 |
82 |
83 | def skew(array, array_mean=None, array_var=None):
84 | '''Sample skew (third moment divided by variance^3/2) of the input array.
85 |
86 | `mean` (optional) and `var` (optional) make the computation faster. This works equally well for
87 | real and complex-valued `array`
88 |
89 | Arguments
90 | ---------
91 | array : `np.array`
92 | array to calculate the variance of
93 | array_mean : `float` or None
94 | the mean of `array`. If None, will calculate it.
95 | array_var : `float` or None
96 | the variance of `array`. If None, will calculate it
97 |
98 | Returns
99 | -------
100 | array_skew : `float`
101 | the skew of `array`.
102 |
103 | '''
104 | if array_mean is None:
105 | array_mean = array.mean()
106 | if array_var is None:
107 | array_var = var(array, array_mean)
108 |
109 | if np.isreal(array).all():
110 | return ((array - array_mean)**3).mean() / np.sqrt(array_var) ** 3
111 | else:
112 | return (skew(array.real, array_mean.real, array_var.real) + 1j *
113 | skew(array.imag, array_mean.imag, array_var.imag))
114 |
115 |
116 | def kurt(array, array_mean=None, array_var=None):
117 | '''Sample kurtosis (fourth moment divided by squared variance) of the input array.
118 |
119 | For reference, kurtosis of a Gaussian distribution is 3.
120 |
121 | `mean` (optional) and `var` (optional) make the computation faster. This works equally well for
122 | real and complex-valued `array`
123 |
124 | Arguments
125 | ---------
126 | array : `np.array`
127 | array to calculate the variance of
128 | array_mean : `float` or None
129 | the mean of `array`. If None, will calculate it.
130 | array_var : `float` or None
131 | the variance of `array`. If None, will calculate it
132 |
133 | Returns
134 | -------
135 | array_kurt : `float`
136 | the kurtosis of `array`.
137 |
138 | '''
139 | if array_mean is None:
140 | array_mean = array.mean()
141 | if array_var is None:
142 | array_var = var(array, array_mean)
143 |
144 | if np.isreal(array).all():
145 | return ((array - array_mean) ** 4).mean() / array_var ** 2
146 | else:
147 | return (kurt(array.real, array_mean.real, array_var.real) + 1j *
148 | kurt(array.imag, array_mean.imag, array_var.imag))
149 |
150 |
151 | def image_compare(im_array0, im_array1):
152 | '''Prints and returns min, max, mean, stdev of the difference, and SNR (relative to im_array0).
153 |
154 | Arguments
155 | ---------
156 | im_array0 : `np.array`
157 | the first image to compare
158 | im_array1 : `np.array`
159 | the second image to compare
160 |
161 | Returns
162 | -------
163 | min_diff : `float`
164 | the minimum difference between `im_array0` and `im_array1`
165 | max_diff : `float`
166 | the maximum difference between `im_array0` and `im_array1`
167 | mean_diff : `float`
168 | the mean difference between `im_array0` and `im_array1`
169 | std_diff : `float`
170 | the standard deviation of the difference between `im_array0` and `im_array1`
171 | snr : `float`
172 | the signal-to-noise ratio of the difference between `im_array0` and `im_array0` (relative
173 | to `im_array0`)
174 | '''
175 | if not im_array0.size == im_array1.size:
176 | raise Exception('Input images must have the same size')
177 |
178 | if not np.isreal(im_array0).all() or not np.isreal(im_array1).all():
179 | raise Exception('Input images must be real-valued matrices')
180 |
181 | difference = im_array0 - im_array1
182 | (min_diff, max_diff) = range(difference)
183 | mean_diff = difference.mean()
184 | var_diff = var(difference, mean_diff)
185 | if var_diff < np.finfo(np.double).tiny:
186 | snr = np.inf
187 | else:
188 | snr = 10 * np.log10(var(im_array0) / var_diff)
189 | print('Difference statistics:')
190 | print(' Range: [%d, %d]' % (min_diff, max_diff))
191 | print(' Mean: %f, Stdev (rmse): %f, SNR (dB): %f' % (mean_diff, np.sqrt(var_diff), snr))
192 | return min_diff, max_diff, mean_diff, np.sqrt(var_diff), snr
193 |
194 |
195 | def image_stats(im_array):
196 | '''Prints and returns image statistics: min, max, mean, stdev, and kurtosis.
197 |
198 | Arguments
199 | ---------
200 | im_array : `np.array`
201 | the image to summarize
202 |
203 | Returns
204 | -------
205 | array_min : `float`
206 | the minimum of `im_array`
207 | array_max : `float`
208 | the maximum of `im_array`
209 | array_mean : `float`
210 | the mean of `im_array`
211 | array_std : `float`
212 | the standard deviation of `im_array`
213 | array_kurt : `float`
214 | the kurtosis of `im_array`
215 | '''
216 | if not np.isreal(im_array).all():
217 | raise Exception('Input images must be real-valued matrices')
218 |
219 | (mini, maxi) = range(im_array)
220 | array_mean = im_array.mean()
221 | array_var = var(im_array, array_mean)
222 | array_kurt = kurt(im_array, array_mean, array_var)
223 | print('Image statistics:')
224 | print(' Range: [%f, %f]' % (mini, maxi))
225 | print(' Mean: %f, Stdev: %f, Kurtosis: %f' % (array_mean, np.sqrt(array_var), array_kurt))
226 | return mini, maxi, array_mean, np.sqrt(array_var), array_kurt
227 |
--------------------------------------------------------------------------------
/src/pyrtools/tools/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy import ndimage
3 | import warnings
4 |
5 |
6 | def matlab_round(array):
7 | '''round equivalent to matlab function, which rounds .5 away from zero
8 |
9 | (used in matlab_histo so we can unit test against matlab code).
10 |
11 | But np.round() would rounds .5 to nearest even number, e.g.:
12 | - np.round(0.5) = 0, matlab_round(0.5) = 1
13 | - np.round(2.5) = 2, matlab_round(2.5) = 3
14 |
15 | Arguments
16 | ---------
17 | array : `np.array`
18 | the array to round
19 |
20 | Returns
21 | -------
22 | rounded_array : `np.array`
23 | the rounded array
24 | '''
25 | (fracPart, intPart) = np.modf(array)
26 | return intPart + (np.abs(fracPart) >= 0.5) * np.sign(fracPart)
27 |
28 |
29 | def matlab_histo(array, nbins=101, binsize=None, center=None):
30 | '''Compute a histogram of array.
31 |
32 | How does this differ from MatLab's HIST function? This function:
33 | - allows uniformly spaced bins only.
34 | + operates on all elements of MTX, instead of columnwise.
35 | + is much faster (approximately a factor of 80 on my machine).
36 | + allows specification of number of bins OR binsize. Default=101 bins.
37 | + allows (optional) specification of binCenter.
38 |
39 | Arguments
40 | ---------
41 | array : `np.array`
42 | the array to bin
43 | nbins : `int`
44 | the number of histogram bins
45 | binsize : `float` or None
46 | the size of each bin. if None, we use nbins to determine it as:
47 | `(array.max() - array.min()) / nbins`
48 | center : `float` or None
49 | the center position for the histogram bins. if None, this is `array.mean()`
50 |
51 | Returns
52 | -------
53 | N : `np.array`
54 | the histogram counts
55 | edges : `np.array`
56 | vector containing the centers of the histogram bins
57 | '''
58 | mini = array.min()
59 | maxi = array.max()
60 |
61 | if center is None:
62 | center = array.mean()
63 |
64 | if binsize is None:
65 | # use nbins to determine binsize
66 | binsize = (maxi-mini) / nbins
67 |
68 | nbins2 = int(matlab_round((maxi - center) / binsize) - matlab_round((mini - center) / binsize))
69 | if nbins2 != nbins:
70 | warnings.warn('Overriding bin number %d (requested %d)' % (nbins2, nbins))
71 | nbins = nbins2
72 |
73 | # np.histogram uses bin edges, not centers like Matlab's hist
74 | # compute bin edges (nbins + 1 of them)
75 | edge_left = center + binsize * (-0.499 + matlab_round((mini - center) / binsize))
76 | edges = edge_left + binsize * np.arange(nbins+1)
77 | N, _ = np.histogram(array, edges)
78 |
79 | # matlab version returns column vectors, so we will too.
80 | # to check: return edges or centers? edit comments.
81 | return (N.reshape(1, -1), edges.reshape(1, -1))
82 |
83 |
84 | def rcosFn(width=1, position=0, values=(0, 1)):
85 | '''Return a lookup table containing a "raised cosine" soft threshold function
86 |
87 | Y = VALUES(1) + (VALUES(2)-VALUES(1)) * cos^2( PI/2 * (X - POSITION + WIDTH)/WIDTH )
88 |
89 | this lookup table is suitable for use by `pointOp`
90 |
91 | Arguments
92 | ---------
93 | width : `float`
94 | the width of the region over which the transition occurs
95 | position : `float`
96 | the location of the center of the threshold
97 | values : `tuple`
98 | 2-tuple specifying the values to the left and right of the transition.
99 |
100 | Returns
101 | -------
102 | X : `np.array`
103 | the x valuesof this raised cosine
104 | Y : `np.array`
105 | the y valuesof this raised cosine
106 | '''
107 |
108 | sz = 256 # arbitrary!
109 |
110 | X = np.pi * np.arange(-sz-1, 2) / (2*sz)
111 |
112 | Y = values[0] + (values[1]-values[0]) * np.cos(X)**2
113 |
114 | # make sure end values are repeated, for extrapolation...
115 | Y[0] = Y[1]
116 | Y[sz+2] = Y[sz+1]
117 |
118 | X = position + (2*width/np.pi) * (X + np.pi/4)
119 |
120 | return (X, Y)
121 |
122 |
123 | def project_polar_to_cartesian(data):
124 | """Take a function defined in polar coordinates and project it into Cartesian coordinates
125 |
126 | Inspired by https://pyabel.readthedocs.io/en/latest/_modules/abel/tools/polar.html, which went
127 | the other way. Note that we currently don't implement the Cartesian to polar projection, but
128 | could do so based on this code fairly simply if it's necessary.
129 |
130 | Currently, this only works for square images and we require that the original image and the
131 | reprojected image are the same size. There should be a way to avoid both of these issues, but I
132 | can't think of a way to do that right now.
133 |
134 | Parameters
135 | ----------
136 | data : array_like
137 | The 2d array to convert from polar to Cartesian coordinates. We assume the first dimension
138 | is the polar radius and the second is the polar angle.
139 |
140 | Returns
141 | -------
142 | output : np.array
143 | The 2d array in Cartesian coordinates.
144 |
145 | """
146 | if np.isnan(data).any():
147 | data[np.isnan(data)] = 0
148 | warnings.warn("project_polar_to_cartesian won't work if there are any NaNs in the array, "
149 | "so we've replaced all NaNs with 0s")
150 | nx = data.shape[1]
151 | ny = data.shape[0]
152 | if nx != ny:
153 | raise Exception("There's an occasional bug where we don't wrap the angle correctly if nx "
154 | "and ny aren't equal, so we don't support this for now!")
155 |
156 | max_radius = data.shape[0]
157 | x_i = np.linspace(-max_radius, max_radius, nx, endpoint=False)
158 | y_i = np.linspace(-max_radius, max_radius, ny, endpoint=False)
159 | x_grid, y_grid = np.meshgrid(x_i, y_i)
160 | # need to flip the y indices so that negative is at the bottom (to correspond with how we have
161 | # the polar angle -- 0 on the right)
162 | y_grid = np.flipud(y_grid)
163 |
164 | r = np.sqrt(x_grid**2 + y_grid**2)
165 |
166 | theta = np.arctan2(y_grid, x_grid)
167 | # having the angle run from 0 to 2 pi seems to avoid most of the discontinuities
168 | theta = np.mod(theta, 2*np.pi)
169 | # need to convert from 2pi to pixel values
170 | theta *= nx/(2*np.pi)
171 |
172 | r_i, theta_i = r.flatten(), theta.flatten()
173 | # map_coordinates requires a 2xn array
174 | coords = np.vstack((r_i, theta_i))
175 | # we use mode="nearest" to deal with weird discontinuities that may pop up near the theta=0
176 | # line
177 | zi = ndimage.map_coordinates(data, coords, mode='nearest')
178 | output = zi.reshape((ny, nx))
179 | return output
180 |
181 |
182 | if __name__ == "__main__":
183 | X, Y = rcosFn(width=1, position=0, values=(0, 1))
184 |
185 | import matplotlib.pyplot as plt
186 | plt.figure()
187 | plt.plot(X, Y)
188 | plt.show()
189 |
--------------------------------------------------------------------------------