├── .github ├── dependabot.yml └── workflows │ ├── master-cd.yml │ ├── tags-release.yml │ └── test-ci.yml ├── .gitignore ├── LICENSE.txt ├── README.rst ├── docs ├── Makefile ├── _templates │ ├── custom-class-template.rst │ └── custom-module-template.rst ├── api.rst ├── conf.py ├── denoisers.rst ├── index.rst └── usage.rst ├── examples ├── README.rst ├── conftest.py └── example_experimental_data.py ├── pyproject.toml ├── src └── patch_denoise │ ├── __init__.py │ ├── _docs.py │ ├── bindings │ ├── __init__.py │ ├── cli.py │ ├── modopt.py │ ├── nipype.py │ └── utils.py │ ├── denoise.py │ ├── simulation │ ├── __init__.py │ ├── activations.py │ ├── noise.py │ └── phantom.py │ └── space_time │ ├── __init__.py │ ├── base.py │ ├── lowrank.py │ └── utils.py └── tests ├── __init__.py ├── conftest.py ├── test_bindings.py ├── test_denoiser.py └── test_spacetime_utils.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Documentation 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | schedule: 9 | interval: weekly 10 | -------------------------------------------------------------------------------- /.github/workflows/master-cd.yml: -------------------------------------------------------------------------------- 1 | name: CD 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | 9 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 10 | permissions: 11 | contents: write 12 | pages: write 13 | id-token: write 14 | 15 | # Allow one concurrent deployment 16 | concurrency: 17 | group: "pages" 18 | cancel-in-progress: true 19 | 20 | jobs: 21 | coverage: 22 | name: Deploy Coverage Results 23 | runs-on: ubuntu-latest 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v4 28 | 29 | - name: Set up Python 30 | uses: actions/setup-python@v5 31 | with: 32 | python-version: "3.10" 33 | 34 | - name: Install dependencies 35 | shell: bash -l {0} 36 | run: | 37 | python -m pip install .[test,optional] 38 | - name: Run Tests 39 | shell: bash -l {0} 40 | run: | 41 | pytest -n auto --cov=patch_denoise --cov-report xml:coverage.xml 42 | 43 | - name: Upload coverage to Codecov 44 | uses: codecov/codecov-action@v4 45 | with: 46 | flags: unittests # optional 47 | file: coverage.xml 48 | name: codecov-umbrella # optional 49 | fail_ci_if_error: false # optional (default = false) 50 | verbose: true # optional (default = false) 51 | 52 | makedocs: 53 | name: Deploy API Documentation 54 | runs-on: ubuntu-latest 55 | if: success() 56 | steps: 57 | - name: Checkout 58 | uses: actions/checkout@v4 59 | - name: Get history and tags for SCM versioning to work 60 | run: | 61 | git fetch --prune --unshallow 62 | git fetch --depth=1 origin +refs/tags/*:refs/tags/* 63 | - name: Set up Python 64 | uses: actions/setup-python@v5 65 | with: 66 | python-version: "3.12" 67 | - name: Install dependencies 68 | shell: bash -l {0} 69 | run: | 70 | python -m pip install --upgrade pip 71 | python -m pip install .[optional,doc] 72 | - name: Build API documentation 73 | run: make -C docs html 74 | - name: Upload artifact 75 | uses: actions/upload-pages-artifact@v3 76 | with: 77 | # Upload entire repository 78 | path: 'build' 79 | - name: Deploy to GitHub Pages 80 | uses: peaceiris/actions-gh-pages@v4 81 | with: 82 | publish_branch: gh-pages 83 | github_token: ${{ secrets.GITHUB_TOKEN }} 84 | publish_dir: docs/build/html 85 | force_orphan: true 86 | 87 | build-n-publish: 88 | name: Build and publish Python 🐍 distributions 📦 to TestPyPI 89 | runs-on: ubuntu-latest 90 | steps: 91 | - name: Checkout 92 | uses: actions/checkout@v4 93 | - name: Get history and tags for SCM versioning to work 94 | run: | 95 | git fetch --prune --unshallow 96 | git fetch --depth=1 origin +refs/tags/*:refs/tags/* 97 | - name: Set up Python 3.10 98 | uses: actions/setup-python@v5 99 | with: 100 | python-version: "3.10" 101 | 102 | - name: Install pypa/build 103 | run: | 104 | python -m pip install build twine 105 | python -m pip install . 106 | 107 | - name: Build a binary wheel and a source tarball 108 | run: | 109 | python -m build --sdist --wheel --outdir dist/ . 110 | 111 | - name: Check Dist 112 | run: | 113 | python -m twine check dist/* 114 | - name: Upload to Test PyPI 115 | run: | 116 | python -m twine upload -r testpypi -u __token__ -p ${{ secrets.TEST_PYPI_API }} dist/* 117 | -------------------------------------------------------------------------------- /.github/workflows/tags-release.yml: -------------------------------------------------------------------------------- 1 | name: Release on PyPi 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v[0-9]+.[0-9]+.[0-9]+' 7 | jobs: 8 | build-n-publish: 9 | name: Build and publish to PyPI 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v4 14 | - name: Get history and tags for SCM versioning to work 15 | run: | 16 | git fetch --prune --unshallow 17 | git fetch --depth=1 origin +refs/tags/*:refs/tags/* 18 | - name: Set up Python 3.10 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: "3.10" 22 | 23 | - name: Install pypa/build 24 | run: | 25 | python -m pip install build 26 | python -m pip install . 27 | 28 | - name: Build a binary wheel and a source tarball 29 | run: | 30 | python -m build --sdist --wheel --outdir dist/ . 31 | 32 | - name: Publish distribution 📦 to PyPI 33 | uses: pypa/gh-action-pypi-publish@release/v1 34 | with: 35 | password: ${{ secrets.PYPI_TOKEN_RESTRICTED }} 36 | 37 | - name: Create release 38 | uses: actions/create-release@v1 39 | env: 40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token 41 | with: 42 | tag_name: ${{ github.ref }} 43 | release_name: Release ${{ github.ref }} 44 | draft: true 45 | prerelease: false 46 | -------------------------------------------------------------------------------- /.github/workflows/test-ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | workflow_dispatch: 10 | 11 | jobs: 12 | linter-check: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | - name: Set up Python "3.10" 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: "3.10" 21 | - name: Black setup 22 | shell: bash 23 | run: pip install black ruff 24 | - name: Black Check 25 | shell: bash 26 | run: black . --diff --color 27 | - name: ruff Check 28 | shell: bash 29 | run: ruff check src 30 | 31 | makedocs: 32 | name: build documentation 33 | runs-on: ubuntu-latest 34 | 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v4 38 | - name: Get history and tags for SCM versioning to work 39 | run: | 40 | git fetch --prune --unshallow 41 | git fetch --depth=1 origin +refs/tags/*:refs/tags/* 42 | - name: Set up Python 43 | uses: actions/setup-python@v5 44 | with: 45 | python-version: "3.12" 46 | - name: Install dependencies 47 | shell: bash -l {0} 48 | run: | 49 | python -m pip install --upgrade pip 50 | python -m pip install .[optional,doc] 51 | 52 | - name: Build API documentation 53 | run: make -C docs html 54 | 55 | codespell: 56 | name: Check for spelling errors 57 | runs-on: ubuntu-latest 58 | 59 | steps: 60 | - name: Checkout 61 | uses: actions/checkout@v4 62 | - name: Annotate locations with typos 63 | uses: codespell-project/codespell-problem-matcher@v1 64 | - name: Codespell 65 | uses: codespell-project/actions-codespell@v2 66 | 67 | test-suite: 68 | runs-on: ${{ matrix.os }} 69 | needs: linter-check 70 | strategy: 71 | fail-fast: true 72 | matrix: 73 | os: [ubuntu-latest] 74 | python-version: ["3.12", "3.11", "3.10", "3.9"] 75 | steps: 76 | - uses: actions/checkout@v4 77 | - name: Set up Python ${{ matrix.python-version }} 78 | uses: actions/setup-python@v5 79 | with: 80 | python-version: ${{ matrix.python-version }} 81 | - name: Install Dependencies 82 | shell: bash 83 | run: | 84 | python --version 85 | python -m pip install --upgrade pip 86 | python -m pip install .[test,optional] 87 | - name: Run Tests 88 | shell: bash 89 | run: | 90 | pytest -n auto -x 91 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | *.log 4 | sg_execution_times.rst 5 | tmp/ 6 | 7 | *_cache/ 8 | *.py[cod] 9 | *.egg 10 | build 11 | dist 12 | htmlcov 13 | *.egg-info 14 | *coverage* 15 | 16 | docs/_autosummary 17 | docs/auto_examples/ 18 | docs_build/ 19 | 20 | src/patch_denoise/_version.py 21 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Pierre-Antoine Comby 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Patch Denoising Methods 3 | ======================== 4 | 5 | 6 | |COVERAGE| |CI| |CD| 7 | |DOC| |RELEASE| |PYVERSION| 8 | 9 | |LINTER| |STYLE| |LICENSE| |CITATION| 10 | 11 | 12 | .. |COVERAGE| image:: https://img.shields.io/codecov/c/github/paquiteau/patch-denoising 13 | :target: https://app.codecov.io/gh/paquiteau/patch-denoising 14 | .. |CI| image:: https://github.com/paquiteau/patch-denoising/workflows/CI/badge.svg 15 | .. |CD| image:: https://github.com/paquiteau/patch-denoising/workflows/CD/badge.svg 16 | .. |LICENSE| image:: https://img.shields.io/github/license/paquiteau/patch-denoising 17 | .. |DOC| image:: https://img.shields.io/badge/docs-Sphinx-blue 18 | :target: https://paquiteau.github.io/patch-denoising 19 | .. |RELEASE| image:: https://img.shields.io/pypi/v/patch-denoise 20 | :target: https://pypi.org/project/patch-denoise/ 21 | .. |STYLE| image:: https://img.shields.io/badge/style-black-black 22 | :target: https://github.com/psf/black 23 | .. |LINTER| image:: https://img.shields.io/badge/linter-ruff-inactive 24 | :target: https://github.com/charliemarsh/ruff 25 | .. |PYVERSION| image:: https://img.shields.io/pypi/pyversions/patch-denoise 26 | :target: https://pypi.org/project/patch-denoise/ 27 | .. |CITATION| image:: https://img.shields.io/badge/paper-hal--openaccess-green 28 | :target: https://hal.science/hal-03895194 29 | 30 | This repository implements patch-denoising methods, with a particular focus on local-low rank methods. 31 | 32 | The target application is functional MRI thermal noise removal, but this methods can be applied to a wide range of image modalities. 33 | 34 | It includes several local-low-rank based denoising methods (see the `documentation `_ for more details): 35 | 36 | 1. MP-PCA 37 | 2. Hybrid-PCA 38 | 3. NORDIC 39 | 4. Optimal Thresholding 40 | 5. Raw Singular Value Thresholding 41 | 42 | A mathematical description of these methods is available in the documentation. 43 | 44 | 45 | 46 | Installation 47 | ============ 48 | 49 | .. code:: 50 | 51 | $ pip install patch-denoise 52 | 53 | patch-denoise requires Python>=3.9 54 | 55 | 56 | Quickstart 57 | ========== 58 | 59 | After installing you can use the ``patch-denoise`` command-line. 60 | 61 | .. code:: 62 | 63 | $ patch-denoise input_file.nii output_file.nii --mask="auto" 64 | 65 | See ``patch-denoise --help`` for detailed options. 66 | 67 | Documentation and Examples 68 | ========================== 69 | 70 | Documentation and examples are available at https://paquiteau.github.io/patch-denoising/ 71 | 72 | 73 | Development version 74 | =================== 75 | 76 | .. code:: 77 | 78 | $ git clone https://github.com/paquiteau/patch-denoising 79 | $ pip install -e patch-denoising[dev,doc,test,optional] 80 | 81 | Citation 82 | ======== 83 | 84 | If you use this package for academic work, please cite the associated publication, available on `HAL `_ :: 85 | 86 | @inproceedings{comby2023, 87 | TITLE = {{Denoising of fMRI volumes using local low rank methods}}, 88 | AUTHOR = {Pierre-Antoine, Comby and Zaineb, Amor and Alexandre, Vignaud and Philippe, Ciuciu}, 89 | URL = {https://hal.science/hal-03895194}, 90 | BOOKTITLE = {{ISBI 2023 - International Symposium on Biomedical Imaging 2023}}, 91 | ADDRESS = {Carthagena de India, Colombia}, 92 | YEAR = {2023}, 93 | MONTH = Apr, 94 | KEYWORDS = {functional MRI ; patch denoising ; singular value thresholding ; functional MRI patch denoising singular value thresholding}, 95 | PDF = {https://hal.science/hal-03895194/file/isbi2023_denoise.pdf}, 96 | HAL_ID = {hal-03895194}, 97 | HAL_VERSION = {v1}, 98 | } 99 | 100 | 101 | Related Packages 102 | ================ 103 | 104 | - https://github.com/paquiteau/retino-pypeline 105 | 106 | For the application of the denoising in an fMRI pypeline using Nipype 107 | 108 | - https://github.com/CEA-COSMIC/ModOpt 109 | 110 | For the integration of the patch-denoising in convex optimisation algorithms. 111 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= --verbose 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | clean: 18 | rm -fr build auto_examples _autosummary 19 | rm -f sg_execution_times.rst 20 | 21 | # Catch-all target: route all unknown targets to Sphinx using the new 22 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 23 | %: Makefile 24 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 25 | -------------------------------------------------------------------------------- /docs/_templates/custom-class-template.rst: -------------------------------------------------------------------------------- 1 | {{ name | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :private-members: 8 | :show-inheritance: 9 | :special-members: __call__, __add__, __mul__, __matmul__ 10 | 11 | {% block methods %} 12 | {% if methods %} 13 | .. rubric:: {{ _('Methods') }} 14 | 15 | .. autosummary:: 16 | :nosignatures: 17 | {% for item in methods %} 18 | ~{{ name }}.{{ item }} 19 | {%- endfor %} 20 | {% endif %} 21 | {% endblock %} 22 | 23 | {% block attributes %} 24 | {% if attributes %} 25 | .. rubric:: {{ _('Attributes') }} 26 | 27 | .. autosummary:: 28 | {% for item in attributes %} 29 | ~{{ name }}.{{ item }} 30 | {%- endfor %} 31 | {% endif %} 32 | {% endblock %} 33 | -------------------------------------------------------------------------------- /docs/_templates/custom-module-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: Module attributes 8 | 9 | .. autosummary:: 10 | :toctree: 11 | {% for item in attributes %} 12 | {{ item }} 13 | {%- endfor %} 14 | {% endif %} 15 | {% endblock %} 16 | 17 | {% block functions %} 18 | {% if functions %} 19 | .. rubric:: {{ _('Functions') }} 20 | 21 | .. autosummary:: 22 | :toctree: 23 | :nosignatures: 24 | {% for item in functions %} 25 | {{ item }} 26 | {%- endfor %} 27 | {% endif %} 28 | {% endblock %} 29 | 30 | {% block classes %} 31 | {% if classes %} 32 | .. rubric:: {{ _('Classes') }} 33 | 34 | .. autosummary:: 35 | :toctree: 36 | :template: custom-class-template.rst 37 | :nosignatures: 38 | {% for item in classes %} 39 | {{ item }} 40 | {%- endfor %} 41 | {% endif %} 42 | {% endblock %} 43 | 44 | {% block exceptions %} 45 | {% if exceptions %} 46 | .. rubric:: {{ _('Exceptions') }} 47 | 48 | .. autosummary:: 49 | :toctree: 50 | {% for item in exceptions %} 51 | {{ item }} 52 | {%- endfor %} 53 | {% endif %} 54 | {% endblock %} 55 | 56 | {% block modules %} 57 | {% if modules %} 58 | .. autosummary:: 59 | :toctree: 60 | :template: custom-module-template.rst 61 | :recursive: 62 | {% for item in modules %} 63 | {{ item }} 64 | {%- endfor %} 65 | {% endif %} 66 | {% endblock %} 67 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | .. autosummary:: 5 | :toctree: _autosummary 6 | :template: custom-module-template.rst 7 | :recursive: 8 | 9 | patch_denoise 10 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration file for the Sphinx documentation builder. 3 | 4 | This file only contains a selection of the most common options. For a full 5 | list see the documentation: 6 | https://www.sphinx-doc.org/en/master/usage/configuration.html 7 | """ 8 | # -- Path setup -------------------------------------------------------------- 9 | 10 | # If extensions (or modules to document with autodoc) are in another directory, 11 | # add these directories to sys.path here. If the directory is relative to the 12 | # documentation root, use os.path.abspath to make it absolute, like shown here. 13 | # 14 | 15 | import os 16 | import sys 17 | from importlib.metadata import version 18 | 19 | sys.path.insert(0, os.path.abspath("..")) # Source code dir relative to this file 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = "patch-denoise" 24 | copyright = "2022, Pierre-Antoine Comby" 25 | author = "Pierre-Antoine Comby" 26 | 27 | release = version("patch-denoise") 28 | # for example take major/minor 29 | version = ".".join(release.split(".")[:2]) 30 | 31 | # -- General configuration --------------------------------------------------- 32 | 33 | # Add any Sphinx extension module names here, as strings. They can be 34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 | # ones. 36 | extensions = [ 37 | "sphinx.ext.duration", 38 | "sphinx.ext.autodoc", 39 | "sphinx.ext.autosummary", 40 | "sphinx.ext.doctest", 41 | "sphinx.ext.intersphinx", 42 | "sphinx.ext.mathjax", 43 | "sphinx.ext.viewcode", 44 | "sphinx.ext.napoleon", 45 | # "sphinx_gallery.gen_gallery", 46 | "sphinxarg.ext" 47 | ] 48 | 49 | # Add any paths that contain templates here, relative to this directory. 50 | templates_path = ["_templates"] 51 | 52 | # List of patterns, relative to source directory, that match files and 53 | # directories to ignore when looking for source files. 54 | # This pattern also affects html_static_path and html_extra_path. 55 | exclude_patterns = ["build", "Thumbs.db", ".DS_Store"] 56 | 57 | _python_doc_base = "https://docs.python.org/3.9" 58 | 59 | intersphinx_mapping = { 60 | "python": (_python_doc_base, None), 61 | "numpy": ("https://numpy.org/doc/stable/", None), 62 | "scipy": ("https://scipy.github.io/devdocs/", None), 63 | "matplotlib": ("https://matplotlib.org/stable/", None), 64 | } 65 | 66 | # generate autosummary even if no references 67 | autosummary_generate = True 68 | # autosummary_imported_members = True 69 | autodoc_inherit_docstrings = True 70 | 71 | napoleon_include_private_with_doc = True 72 | 73 | # -- Options for Sphinx Gallery ---------------------------------------------- 74 | 75 | sphinx_gallery_conf = { 76 | "examples_dirs": ["../examples/"], 77 | "filename_pattern": "/example_", 78 | 'ignore_pattern': 'conftest.py', 79 | 'example_extensions': {'.py'}, 80 | "gallery_dirs" : ["auto_examples"], 81 | "reference_url": { 82 | "numpy": "http://docs.scipy.org/doc/numpy-1.9.1", 83 | "scipy": "http://docs.scipy.org/doc/scipy-0.17.0/reference", 84 | }, 85 | } 86 | 87 | 88 | # -- Options for HTML output ------------------------------------------------- 89 | 90 | # The theme to use for HTML and HTML Help pages. See the documentation for 91 | # a list of builtin themes. 92 | # 93 | 94 | html_theme = "pydata_sphinx_theme" 95 | 96 | # Add any paths that contain custom static files (such as style sheets) here, 97 | # relative to this directory. They are copied after the builtin static files, 98 | # so a file named "default.css" will overwrite the builtin "default.css". 99 | html_static_path = [] 100 | html_context = {"default_mode": "light"} 101 | -------------------------------------------------------------------------------- /docs/denoisers.rst: -------------------------------------------------------------------------------- 1 | LLR Denoising Methods 2 | ===================== 3 | 4 | Patch-denoise implement several local-low-rank methods, based on singular values thresholding. 5 | 6 | 7 | Singular Value Thresholding 8 | --------------------------- 9 | 10 | General Procedure 11 | ~~~~~~~~~~~~~~~~~ 12 | 13 | Consider a sequence of image or volume. From this image, patches are extracted, processed and recombined. 14 | 15 | 1. Extraction 16 | ^^^^^^^^^^^^^ 17 | 18 | The extraction of the patch consist of selecting a spatial region of the image, and take all the time information associated to this region. 19 | The patch is then flatten in a 2D Matrix (so called Casorati matrix). A row represents the temporal evolution of a single voxel, and a column is the flatten image at a specific time point. 20 | Moreover, a mask, defining a Region of Interest (ROI) can be used to determined is a patch should be processed or not (and save computations). 21 | 22 | .. warning:: 23 | The size of the patch and the overlapping are the main factor for computational cost. Moreover for the SVD-based process to work well, it is required to have a "tall" matrix , i.e. that the number of row is greater than the number of column. 24 | 25 | 2. Processing 26 | ^^^^^^^^^^^^^ 27 | 28 | Each patch is processed, by applying a threshold function on the centered singular value decomposition of the :math:`M \times N` patch: 29 | 30 | .. math:: 31 | 32 | X = U S V^T + M 33 | 34 | Where :math:`M = \langle X \rangle` is the mean of each row of :math:`X`, and :math:`U,S,V^T` is the SVD decomposition of :math:`X-M`. 35 | In particular, :math:`S=\mathrm{diag}(\sigma_1, \dots, \sigma_n)` 36 | 37 | The processing of the singular values by a threshold function :math:`\eta(\sigma_i) = \sigma_i'` yields new (typically sparser) singular values :math:`S'` 38 | 39 | Then the processed patch is defined as: 40 | 41 | .. math:: 42 | 43 | \hat{X} = U \mathcal{T}(S) V^T + M 44 | 45 | 3. Recombination 46 | ^^^^^^^^^^^^^^^^ 47 | 48 | The recombination of processed patches uses weights associated to each patch after its processing to determine the final value in case of patch overlapping. 49 | Currently three recombination are considered: 50 | 51 | - Identical weights. The patches values for a pixel are averaged together (available with ``recombination='average'``) 52 | 53 | - Sparse Patch promotion. The patches values are for a pixel are average with weights :math:`\theta`. This Weighted method comes from [1]_ (available with ``recombination='weighted'``), let :math:`P` the number of patch overlapping for voxel :math:`x_i`, and :math:`\hat{x_i}(p)` the value associated to each patch. the final value of pixel :math:`\hat{x_i}` is 54 | 55 | .. math:: 56 | \hat{x_i} = \frac{\sum_{p=1}^P\theta_p\hat{x_j}(p)}{\sum_{p=1}^P\theta_p} \quad \text{where } \theta_p = \frac{1}{1+\|S'_p\|_0} 57 | 58 | The more the processed patch :math:`S'_p` is sparse, the bigger the weight associated to it. 59 | 60 | - Use the center of patch. In the case of maximally overlapping patches, the patch center value is use for the corresponding pixel. 61 | 62 | .. seealso:: 63 | :class:`~patch_denoise.space_time.base.BaseSpaceTimeDenoiser` 64 | For the generic patch processing algorithm. 65 | 66 | Raw Singular Value Thresholding 67 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | 69 | For raw singular value thresholding, the threshold function is simply a hard threshold on the singular value, according to a provided threshold. 70 | 71 | .. math:: 72 | \eta_\tau(\sigma_i) = \begin{cases} 73 | \sigma_i & \text{if}\quad \sigma_i > \tau \\ 74 | 0 & \text{otherwise} 75 | \end{cases} 76 | 77 | .. seealso:: 78 | :class:`~patch_denoise.space_time.lowrank.RawSVDDenoiser` 79 | For the implementation. 80 | 81 | MP-PCA Thresholding 82 | ~~~~~~~~~~~~~~~~~~~ 83 | 84 | MP-PCA [2]_ uses the Marshenko-Pastur distribution to find a threshold for each patch. In particular, the noise variance is estimated from the eigen values (squared singular values) and uses to determined the threshold. (See equations 10-12 in reference). 85 | 86 | 87 | .. seealso:: 88 | :class:`~patch_denoise.space_time.lowrank.MPPCADenoiser` 89 | 90 | Hybrid PCA 91 | ~~~~~~~~~~ 92 | 93 | Hybrid-PCA [3]_ uses an a priori spatial distribution of the noise variance, and the singular values are selected such that the discarded one have a mean less or equal to this a priori. 94 | 95 | .. seealso:: 96 | :class:`~patch_denoise.space_time.lowrank.HybridPCADenoiser` 97 | 98 | 99 | NORDIC 100 | ~~~~~~ 101 | 102 | NORDIC [4]_ makes the assumptions that the image noise level is uniform (for instance by pre processing the image and dividing it by an externally available g-map). The threshold is determined by taking the average of maximum singular value of a set of randomly generated matrix with the dimension as the flattened patch. The uniform noise level must also be provided. 103 | 104 | 105 | .. seealso:: 106 | :class:`~patch_denoise.space_time.lowrank.NordicDenoiser` 107 | 108 | Optimal Thresholding 109 | ~~~~~~~~~~~~~~~~~~~~ 110 | 111 | An optimal thresholding of the singular values [5]_ is also possible associated with a specific norm (Frobenius, nuclear norm or operator norm). 112 | 113 | .. seealso:: 114 | :class:`~patch_denoise.space_time.lowrank.OptimalSVDDenoiser` 115 | 116 | Adaptive Thresholding 117 | ~~~~~~~~~~~~~~~~~~~~~ 118 | 119 | Extending the possibility of optimal thresholding using SURE in presence of noise variance estimation [6]_. 120 | 121 | .. seealso:: 122 | :class:`~patch_denoise.space_time.lowrank.AdaptiveDenoiser` 123 | 124 | 125 | 126 | References 127 | ---------- 128 | .. [1] Manjón, José V., Pierrick Coupé, Luis Concha, Antonio Buades, D. Louis Collins, and Montserrat Robles. “Diffusion Weighted Image Denoising Using Overcomplete Local PCA.” PLOS ONE 8, no. 9 (September 3, 2013): e73021. https://doi.org/10.1371/journal.pone.0073021. 129 | 130 | .. [2] Veraart, Jelle, Dmitry S. Novikov, Daan Christiaens, Benjamin Ades-Aron, Jan Sijbers, and Els Fieremans. “Denoising of Diffusion MRI Using Random Matrix Theory.” NeuroImage 142 (November 15, 2016): 394–406. https://doi.org/10.1016/j.neuroimage.2016.08.016. 131 | 132 | .. [3] https://submissions.mirasmart.com/ISMRM2022/Itinerary/Files/PDFFiles/2688.html 133 | 134 | .. [4] Moeller, Steen, Pramod Kumar Pisharady, Sudhir Ramanna, Christophe Lenglet, Xiaoping Wu, Logan Dowdle, Essa Yacoub, Kamil Uğurbil, and Mehmet Akçakaya. “NOise Reduction with DIstribution Corrected (NORDIC) PCA in DMRI with Complex-Valued Parameter-Free Locally Low-Rank Processing.” NeuroImage 226 (February 1, 2021): 117539. https://doi.org/10.1016/j.neuroimage.2020.117539. 135 | .. [5] Gavish, Matan, and David L. Donoho. “Optimal Shrinkage of Singular Values.” IEEE Transactions on Information Theory 63, no. 4 (April 2017): 2137–52. https://doi.org/10.1109/TIT.2017.2653801. 136 | .. [6] J. Josse and S. Sardy, “Adaptive Shrinkage of singular values.” arXiv, Nov. 22, 2014. doi: 10.48550/arXiv.1310.6602. 137 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. patch-denoise documentation master file, created by 2 | sphinx-quickstart on Mon Jul 11 09:45:05 2022. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. include:: ../README.rst 7 | 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents 12 | 13 | api 14 | denoisers 15 | usage 16 | 17 | .. auto_examples/index 18 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | ############ 2 | Command line 3 | ############ 4 | 5 | ``patch-denoise`` minimally requires a path to a NIfTI file, 6 | but it can take advantage of reconstructed phase data and/or noise volumes. 7 | 8 | .. argparse:: 9 | :ref: patch_denoise.bindings.cli._get_parser 10 | :prog: patch-denoise 11 | :func: _get_parser 12 | 13 | 14 | ================================ 15 | Using patch-denoise on BIDS data 16 | ================================ 17 | 18 | .. warning:: 19 | These examples assume that the phase data are in radians. 20 | If they are in arbitrary units, 21 | you will need to rescale them before running patch-denoise. 22 | 23 | 24 | Magnitude only 25 | ============== 26 | 27 | .. code-block:: bash 28 | 29 | patch-denoise \ 30 | sub-01/func/sub-01_task-rest_part-mag_bold.nii.gz \ 31 | sub-01_task-rest_part-mag_desc-denoised_bold.nii.gz \ 32 | --mask auto \ 33 | --method optimal-fro \ 34 | --patch-shape 11 \ 35 | --patch-overlap 5 \ 36 | --recombination weighted \ 37 | --mask-threshold 1 38 | 39 | 40 | Magnitude with noise volumes 41 | ============================ 42 | 43 | .. code-block:: bash 44 | 45 | patch-denoise \ 46 | sub-01/func/sub-01_task-rest_part-mag_bold.nii.gz \ 47 | sub-01_task-rest_part-mag_desc-denoised_bold.nii.gz \ 48 | --noise-map sub-01/func/sub-01_task-rest_part-mag_noRF.nii.gz \ 49 | --mask auto \ 50 | --method optimal-fro \ 51 | --patch-shape 11 \ 52 | --patch-overlap 5 \ 53 | --recombination weighted \ 54 | --mask-threshold 1 55 | 56 | 57 | Magnitude and phase 58 | =================== 59 | 60 | .. code-block:: bash 61 | 62 | patch-denoise \ 63 | sub-01/func/sub-01_task-rest_part-mag_bold.nii.gz \ 64 | sub-01_task-rest_part-mag_desc-denoised_bold.nii.gz \ 65 | --input-phase sub-01/func/sub-01_task-rest_part-phase_bold.nii.gz \ 66 | --mask auto \ 67 | --method optimal-fro \ 68 | --patch-shape 11 \ 69 | --patch-overlap 5 \ 70 | --recombination weighted \ 71 | --mask-threshold 1 72 | 73 | 74 | Magnitude and phase with noise volumes 75 | ====================================== 76 | 77 | .. code-block:: bash 78 | 79 | patch-denoise \ 80 | sub-01/func/sub-01_task-rest_part-mag_bold.nii.gz \ 81 | sub-01_task-rest_part-mag_desc-denoised_bold.nii.gz \ 82 | --input-phase sub-01/func/sub-01_task-rest_part-phase_bold.nii.gz \ 83 | --noise-map sub-01/func/sub-01_task-rest_part-mag_noRF.nii.gz \ 84 | --noise-map-phase sub-01/func/sub-01_task-rest_part-phase_noRF.nii.gz \ 85 | --mask auto \ 86 | --method optimal-fro \ 87 | --patch-shape 11 \ 88 | --patch-overlap 5 \ 89 | --recombination weighted \ 90 | --mask-threshold 1 91 | -------------------------------------------------------------------------------- /examples/README.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | This folders contains examples showing how to the patch-based denoising methods. 5 | 6 | Results are demonstrated on small scaled simulation data for the sake of reproducibility. 7 | -------------------------------------------------------------------------------- /examples/conftest.py: -------------------------------------------------------------------------------- 1 | """Configuration for testing the example scripts.""" 2 | from pathlib import Path 3 | import runpy 4 | import pytest 5 | 6 | 7 | def pytest_collect_file(path, parent): 8 | """Pytest hook. 9 | 10 | Create a collector for the given path, or None if not relevant. 11 | The new node needs to have the specified parent as parent. 12 | """ 13 | p = Path(path) 14 | if p.suffix == ".py" and "example" in p.name: 15 | return Script.from_parent(parent, path=p, name=p.name) 16 | 17 | 18 | class Script(pytest.File): 19 | """Script files collected by pytest.""" 20 | 21 | def collect(self): 22 | """Collect the script as its own item.""" 23 | yield ScriptItem.from_parent(self, name=self.name) 24 | 25 | 26 | class ScriptItem(pytest.Item): 27 | """Item script collected by pytest.""" 28 | 29 | def runtest(self) -> None: 30 | """Run the script as a test.""" 31 | runpy.run_path(str(self.path)) 32 | 33 | def repr_failure(self, excinfo): 34 | """Return only the error traceback of the script.""" 35 | excinfo.traceback = excinfo.traceback.cut(path=self.path) 36 | return super().repr_failure(excinfo) 37 | -------------------------------------------------------------------------------- /examples/example_experimental_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Experimental Data denoising 3 | =========================== 4 | """ 5 | 6 | 7 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "patch-denoise" 3 | description = "Denoising method for sequence of images or volumes. Primarily targeting fMRI data." 4 | authors = [{name="Pierre-antoine Comby", email="pierre-antoine.comby@crans.org"}] 5 | 6 | readme = "README.rst" 7 | license = {file = "LICENSE.txt"} 8 | dependencies = ["numpy", "scipy", "matplotlib", "scikit-image", "tqdm", "nibabel"] 9 | requires-python = ">=3.9" 10 | 11 | dynamic = ["version"] 12 | 13 | classifiers = [ 14 | "Development Status :: 3 - Alpha", 15 | "Environment :: Console", 16 | "Intended Audience :: Science/Research", 17 | "License :: OSI Approved :: MIT License", 18 | "Operating System :: OS Independent", 19 | "Programming Language :: Python", 20 | "Programming Language :: Python :: 3.9", 21 | "Programming Language :: Python :: 3.10", 22 | "Programming Language :: Python :: 3.11", 23 | "Programming Language :: Python :: 3.12", 24 | "Programming Language :: Python :: 3 :: Only", 25 | "Topic :: Scientific/Engineering :: Image Processing", 26 | "Topic :: Scientific/Engineering :: Medical Science Apps.", 27 | ] 28 | 29 | [project.optional-dependencies] 30 | optional = ["modopt", "nipype", "numba"] 31 | test = ["pytest", "pytest-cov", "pytest-xdist", "pytest-sugar"] 32 | 33 | doc = [ 34 | "pydata-sphinx-theme", 35 | "numpydoc", 36 | "sphinx_gallery", 37 | "sphinx-argparse", 38 | "sphinx", 39 | ] 40 | 41 | dev = ["black", "isort", "ruff"] 42 | 43 | [project.scripts] 44 | patch-denoise = "patch_denoise.bindings.cli:main" 45 | 46 | [build-system] 47 | requires = ["setuptools", "setuptools-scm[toml]", "wheel"] 48 | 49 | [tool.setuptools_scm] 50 | write_to = "src/patch_denoise/_version.py" 51 | version_scheme = "python-simplified-semver" 52 | local_scheme="no-local-version" 53 | 54 | [tool.coverage.run] 55 | omit = ["*tests*"] 56 | 57 | [tool.coverage.report] 58 | precision = 2 59 | exclude_lines = ["pragma: no cover", "raise NotImplementedError"] 60 | 61 | # Formatting using black. 62 | [tool.black] 63 | 64 | #linting using ruff 65 | [tool.ruff] 66 | src = ["src", "tests"] 67 | 68 | [tool.ruff.lint] 69 | ignore = ["B905"] 70 | exclude = ["examples/", "tests/"] 71 | select = ["E", "F", "B", "Q", "UP", "D"] 72 | 73 | [tool.ruff.lint.pydocstyle] 74 | convention="numpy" 75 | 76 | [tool.isort] 77 | profile="black" 78 | 79 | [tool.pytest.ini_options] 80 | minversion = "6.0" 81 | addopts = [ 82 | "--cov=patch_denoise", 83 | "--cov-report=term-missing", 84 | "--cov-report=xml" 85 | ] 86 | 87 | [tool.codespell] 88 | # Ref: https://github.com/codespell-project/codespell#using-a-config-file 89 | skip = '.git*' 90 | check-hidden = true 91 | ignore-regex = '\bND\b' 92 | ignore-words-list = 'fro' 93 | -------------------------------------------------------------------------------- /src/patch_denoise/__init__.py: -------------------------------------------------------------------------------- 1 | """Collection of patch-based denoising methods.""" 2 | from patch_denoise.denoise import ( 3 | mp_pca, 4 | hybrid_pca, 5 | optimal_thresholding, 6 | adaptive_thresholding, 7 | raw_svt, 8 | nordic, 9 | ) 10 | 11 | from patch_denoise.space_time.lowrank import ( 12 | AdaptiveDenoiser, 13 | HybridPCADenoiser, 14 | MPPCADenoiser, 15 | NordicDenoiser, 16 | OptimalSVDDenoiser, 17 | RawSVDDenoiser, 18 | ) 19 | 20 | __all__ = [ 21 | "AdaptiveDenoiser", 22 | "HybridPCADenoiser", 23 | "MPPCADenoiser", 24 | "NordicDenoiser", 25 | "OptimalSVDDenoiser", 26 | "RawSVDDenoiser", 27 | "mp_pca", 28 | "hybrid_pca", 29 | "optimal_thresholding", 30 | "adaptive_thresholding", 31 | "raw_svt", 32 | "nordic", 33 | ] 34 | 35 | try: 36 | # -- Distribution mode -- 37 | # import from _version.py generated by setuptools_scm during release 38 | from ._version import version as __version__ 39 | except ImportError: 40 | # -- Source mode -- 41 | # use setuptools_scm to get the current version from src using git 42 | from setuptools_scm import get_version as _gv 43 | from os import path as _path 44 | 45 | try: 46 | __version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir)) 47 | except LookupError: 48 | __version__ = "0.0.0+unknown" 49 | -------------------------------------------------------------------------------- /src/patch_denoise/_docs.py: -------------------------------------------------------------------------------- 1 | """Docstring utils. 2 | 3 | docdict contains the standard argument documentation found across the package. 4 | 5 | Docstring can then use templated argument such as ``$patch_config`` that will be 6 | substitute by their definition (see docdict items). 7 | 8 | """ 9 | 10 | import inspect 11 | from string import Template 12 | 13 | 14 | docdict = dict( 15 | patch_config=""" 16 | patch_shape: tuple 17 | The patch shape 18 | patch_overlap: tuple 19 | the overlap of each pixel 20 | recombination: str, optional 21 | The recombination method of the patch. "weighted", "average" or "center". 22 | default "weighted".""", 23 | mask_config=""" 24 | mask: numpy.ndarray 25 | A boolean array, defining a ROI in the volume. Only patch with voxels in the ROI 26 | will be processed. 27 | mask_threshold: int 28 | percentage of the path that has to be in the mask so that the patch is processed. 29 | if mask_threshold = -1, all the patch are processed, if mask_threshold=100, all 30 | the voxels of the patch needs to be in the mask""", 31 | denoise_return=""" 32 | tuple 33 | numpy.ndarray: The denoised sequence of volume 34 | numpy.ndarray: The weight of each pixel after the processing. 35 | numpy.ndarray: If possible, the noise variance distribution in the volume 36 | numpy.ndarray: If possible, the rank of each patch in the volume.""", 37 | input_config=""" 38 | input_data: numpy.ndarray 39 | The input data to denoise. It should be a ND array, and the last 40 | dimension should a dynamically varying one (eg time). 41 | progbar: tqdm.tqdm Progress bar, optiononal 42 | An existing Progressbar, default (None) will create a new one. 43 | """, 44 | noise_std=""" 45 | noise_std: float or numpy.ndarray 46 | An estimation of the spatial noise map standard deviation.""", 47 | ) 48 | 49 | # complete the standard config with patch and mask configuration. 50 | docdict["standard_config"] = ( 51 | docdict["input_config"] + docdict["patch_config"] + docdict["mask_config"] 52 | ) 53 | 54 | 55 | def fill_doc(f): 56 | """Fill a docstring with docdict entries. 57 | 58 | Parameters 59 | ---------- 60 | f : callable 61 | The function to fill the docstring of. Will be modified in place. 62 | 63 | Returns 64 | ------- 65 | f : callable 66 | The function, potentially with an updated ``__doc__``. 67 | """ 68 | docstring = f.__doc__ 69 | if not docstring: 70 | return f 71 | try: 72 | docstring = Template(inspect.cleandoc(docstring)).safe_substitute(docdict) 73 | # remove possible gap between headline and body. 74 | f.__doc__ = docstring.replace("---\n\n", "---\n") 75 | except (TypeError, ValueError, KeyError) as exp: 76 | funcname = f.__name__ 77 | raise RuntimeError(f"Error documenting {funcname}s:\n{str(exp)}") from exp 78 | return f 79 | -------------------------------------------------------------------------------- /src/patch_denoise/bindings/__init__.py: -------------------------------------------------------------------------------- 1 | """Bindings for different external packages. 2 | 3 | 3 Bindings are available: 4 | 5 | - A `Nipype `_ binding 6 | - A `Modopt `_ binding 7 | - A Command line interface, available as ``patch-denoise`` after installation. 8 | 9 | """ 10 | -------------------------------------------------------------------------------- /src/patch_denoise/bindings/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Cli interface.""" 3 | 4 | import argparse 5 | import logging 6 | from functools import partial 7 | from pathlib import Path 8 | 9 | import numpy as np 10 | 11 | from .utils import ( 12 | DENOISER_MAP, 13 | DenoiseParameters, 14 | compute_mask, 15 | load_as_array, 16 | save_array, 17 | load_complex_nifti, 18 | ) 19 | from patch_denoise import __version__ 20 | 21 | 22 | DENOISER_NAMES = ", ".join(d for d in DENOISER_MAP if d) 23 | 24 | 25 | def _path_exists(path, parser): 26 | """Ensure a given path exists.""" 27 | if path is None or not Path(path).exists(): 28 | raise parser.error(f"Path does not exist: <{path}>.") 29 | return Path(path).absolute() 30 | 31 | 32 | def _is_file(path, parser): 33 | """Ensure a given path exists and it is a file.""" 34 | path = _path_exists(path, parser) 35 | if not path.is_file(): 36 | raise parser.error( 37 | f"Path should point to a file (or symlink of file): <{path}>." 38 | ) 39 | return path 40 | 41 | 42 | def _positive_int(string, is_parser=True): 43 | """Check if argument is an integer >= 0.""" 44 | error = argparse.ArgumentTypeError if is_parser else ValueError 45 | try: 46 | intarg = int(string) 47 | except ValueError: 48 | msg = "Argument must be a nonnegative integer." 49 | raise error(msg) from None 50 | 51 | if intarg < 0: 52 | raise error("Int argument must be nonnegative.") 53 | return intarg 54 | 55 | 56 | class ToDict(argparse.Action): 57 | """A custom argparse "store" action to handle a list of key=value pairs.""" 58 | 59 | def __call__(self, parser, namespace, values, option_string=None): # noqa: U100 60 | """Call the argument.""" 61 | d = {} 62 | for spec in values: 63 | try: 64 | key, value = spec.split("=") 65 | except ValueError: 66 | raise ValueError( 67 | "Extra arguments must be in the form key=value." 68 | ) from None 69 | 70 | # Convert any float-like values to float 71 | try: 72 | value = float(value) 73 | except ValueError: 74 | pass 75 | 76 | d[key] = value 77 | setattr(namespace, self.dest, d) 78 | 79 | 80 | def _get_parser(): 81 | parser = argparse.ArgumentParser( 82 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 83 | ) 84 | IsFile = partial(_is_file, parser=parser) 85 | PositiveInt = partial(_positive_int, is_parser=True) 86 | 87 | parser.add_argument( 88 | "input_file", 89 | help="Input (noisy) file.", 90 | type=IsFile, 91 | ) 92 | parser.add_argument( 93 | "output_file", 94 | nargs="?", 95 | default=None, 96 | type=Path, 97 | help=("Output (denoised) file.\nDefault is D."), 98 | ) 99 | 100 | parser.add_argument("--version", action="version", version=__version__) 101 | 102 | denoising_group = parser.add_argument_group("Denoising parameters") 103 | 104 | conf_vs_separate = denoising_group.add_mutually_exclusive_group(required=True) 105 | conf_vs_separate.add_argument( 106 | "--method", 107 | help=( 108 | "Denoising method.\n" 109 | f"Available denoising methods:\n {DENOISER_NAMES}.\n" 110 | "This parameter is mutually exclusive with --conf." 111 | ), 112 | choices=DENOISER_MAP, 113 | default="optimal-fro", 114 | ) 115 | 116 | denoising_group.add_argument( 117 | "--patch-shape", 118 | help=( 119 | "Patch shape.\n" 120 | "If int, this is the size of the patch in each dimension.\n" 121 | "If not specified, the default value is used.\n" 122 | "Note: setting a low aspect ratio will increase the number of " 123 | "patches to be processed, " 124 | "and will increase memory usage and computation times.\n" 125 | "This parameter should be used in conjunction with --method and " 126 | "is mutually exclusive with --conf." 127 | ), 128 | default=11, 129 | type=PositiveInt, 130 | metavar="INT", 131 | ) 132 | denoising_group.add_argument( 133 | "--patch-overlap", 134 | help=( 135 | "Patch overlap.\n" 136 | "If int, this is the size of the overlap in each dimension.\n" 137 | "If not specified, the default value is used.\n" 138 | "Note: setting a low overlap will increase the number of patches " 139 | "to be processed, " 140 | "and will increase memory usage and computation times.\n" 141 | "This parameter should be used in conjunction with --method and " 142 | "is mutually exclusive with --conf." 143 | ), 144 | default=5, 145 | type=PositiveInt, 146 | metavar="INT", 147 | ) 148 | denoising_group.add_argument( 149 | "--recombination", 150 | help=( 151 | "Recombination method.\n" 152 | "If 'mean', the mean of the overlapping patches is used.\n" 153 | "If 'weighted', the weighted mean of the overlapping patches is used.\n" 154 | "This parameter should be used in conjunction with --method and " 155 | "is mutually exclusive with --conf." 156 | ), 157 | default="weighted", 158 | choices=["mean", "weighted"], 159 | ) 160 | denoising_group.add_argument( 161 | "--mask-threshold", 162 | help=( 163 | "Mask threshold.\n" 164 | "If int, this is the threshold for the mask.\n" 165 | "If not specified, the default value is used.\n" 166 | "This parameter should be used in conjunction with --method and " 167 | "is mutually exclusive with --conf." 168 | ), 169 | default=10, 170 | type=int, 171 | metavar="INT", 172 | ) 173 | conf_vs_separate.add_argument( 174 | "--conf", 175 | help=( 176 | "Denoising configuration.\n" 177 | "Format should be " 178 | "____.\n" 179 | "See Documentation of 'DenoiseParameter.from_str' for full specification.\n" 180 | f"Available denoising methods:\n {DENOISER_NAMES}.\n" 181 | "This parameter is mutually exclusive with --method." 182 | ), 183 | default=None, 184 | ) 185 | 186 | denoising_group.add_argument( 187 | "--extra", 188 | metavar="key=value", 189 | default=None, 190 | nargs="+", 191 | help="extra key=value arguments for denoising methods.", 192 | action=ToDict, 193 | ) 194 | 195 | data_group = parser.add_argument_group("Additional input data") 196 | data_group.add_argument( 197 | "--mask", 198 | metavar="FILE|auto", 199 | default=None, 200 | help=( 201 | "mask file, if auto or not provided" 202 | " it would be determined from the average image." 203 | ), 204 | ) 205 | data_group.add_argument( 206 | "--noise-map", 207 | metavar="FILE", 208 | default=None, 209 | type=IsFile, 210 | help="noise map estimation file", 211 | ) 212 | data_group.add_argument( 213 | "--input-phase", 214 | metavar="FILE", 215 | default=None, 216 | type=IsFile, 217 | help=( 218 | "Phase of the input data. This MUST be in radians. " 219 | "No rescaling will be applied." 220 | ), 221 | ) 222 | data_group.add_argument( 223 | "--noise-map-phase", 224 | metavar="FILE", 225 | default=None, 226 | type=IsFile, 227 | help=( 228 | "Phase component of the noise map estimation file. " 229 | "This MUST be in radians. No rescaling will be applied." 230 | ), 231 | ) 232 | 233 | misc_group = parser.add_argument_group("Miscellaneous options") 234 | misc_group.add_argument( 235 | "--time-slice", 236 | help=( 237 | "Slice across time. \n" 238 | "If x the patch will be N times longer in space than in time \n" 239 | "If int, this is the size of the time dimension patch. \n" 240 | "If not specified, the whole time series is used. \n" 241 | "Note: setting a low aspect ratio will increase the number of patch to be" 242 | "processed, and will increase memory usage and computation times." 243 | ), 244 | default=None, 245 | type=str, 246 | ) 247 | misc_group.add_argument( 248 | "--output-noise-map", 249 | metavar="FILE", 250 | default=None, 251 | type=Path, 252 | help="Output name for calculated noise map", 253 | ) 254 | misc_group.add_argument( 255 | "--nan-to-num", 256 | metavar="VALUE", 257 | default=None, 258 | type=float, 259 | help="Replace NaN by the provided value.", 260 | ) 261 | misc_group.add_argument( 262 | "-v", 263 | "--verbose", 264 | action="count", 265 | default=0, 266 | help="Increase verbosity level. You can provide multiple times (e.g., -vvv).", 267 | ) 268 | return parser 269 | 270 | 271 | def parse_args(): 272 | """Parse input arguments.""" 273 | parser = _get_parser() 274 | args = parser.parse_args() 275 | 276 | # default value for output. 277 | if args.output_file is None: 278 | args.output_file = args.input_file.with_stem("D" + args.input_file.stem) 279 | 280 | if not args.extra: 281 | args.extra = {} 282 | 283 | levels = [logging.WARNING, logging.INFO, logging.DEBUG] 284 | level = levels[min(args.verbose, len(levels) - 1)] # cap to last level index 285 | logging.basicConfig(level=level) 286 | 287 | return args 288 | 289 | 290 | def main(): 291 | """Command line entry point.""" 292 | args = parse_args() 293 | 294 | if args.input_phase is not None: 295 | input_data, affine = load_complex_nifti(args.input_file, args.input_phase) 296 | else: 297 | input_data, affine = load_as_array(args.input_file) 298 | 299 | kwargs = args.extra 300 | 301 | if args.nan_to_num is not None: 302 | input_data = np.nan_to_num(input_data, nan=args.nan_to_num) 303 | 304 | n_nans = np.isnan(input_data).sum() 305 | if n_nans > 0: 306 | logging.warning( 307 | f"{n_nans}/{input_data.size} voxels are NaN. " 308 | "You might want to use --nan-to-num=", 309 | stacklevel=0, 310 | ) 311 | 312 | if args.mask == "auto": 313 | mask = compute_mask(input_data) 314 | affine_mask = None 315 | else: 316 | mask, affine_mask = load_as_array(args.mask) 317 | 318 | if args.noise_map is not None and args.noise_map_phase is not None: 319 | noise_map, affine_noise_map = load_complex_nifti( 320 | args.noise_map, 321 | args.noise_map_phase, 322 | ) 323 | elif args.noise_map is not None: 324 | noise_map, affine_noise_map = load_as_array(args.noise_map) 325 | elif args.noise_map_phase is not None: 326 | raise ValueError( 327 | "The phase component of the noise map has been provided, " 328 | "but not the magnitude." 329 | ) 330 | else: 331 | noise_map = None 332 | affine_noise_map = None 333 | 334 | if affine is not None: 335 | if (affine_mask is not None) and not np.allclose(affine, affine_mask): 336 | logging.warning( 337 | "Affine matrix of input and mask does not match", stacklevel=2 338 | ) 339 | 340 | if (affine_noise_map is not None) and not np.allclose(affine, affine_noise_map): 341 | logging.warning( 342 | "Affine matrix of input and noise map does not match", stacklevel=2 343 | ) 344 | 345 | # Parse configuration string instead of defining each parameter separately 346 | if args.conf is not None: 347 | d_par = DenoiseParameters.from_str(args.conf) 348 | args.method = d_par.method 349 | args.patch_shape = d_par.patch_shape 350 | args.patch_overlap = d_par.patch_overlap 351 | args.recombination = d_par.recombination 352 | args.mask_threshold = d_par.mask_threshold 353 | 354 | if isinstance(args.time_slice, str): 355 | if args.time_slice.endswith("x"): 356 | t = float(args.time_slice[:-1]) 357 | t = int(args.patch_shape ** (input_data.ndim - 1) / t) 358 | else: 359 | t = int(args.time_slice) 360 | 361 | args.patch_shape = (args.patch_shape,) * (input_data.ndim - 1) + (t,) 362 | 363 | denoise_func = DENOISER_MAP[args.method] 364 | 365 | if args.method in [ 366 | "nordic", 367 | "hybrid-pca", 368 | "adaptive-qut", 369 | "optimal-fro-noise", 370 | ]: 371 | kwargs["noise_std"] = noise_map 372 | if noise_map is None: 373 | raise RuntimeError("A noise map must be specified for this method.") 374 | 375 | denoised_data, _, noise_std_map, _ = denoise_func( 376 | input_data, 377 | patch_shape=args.patch_shape, 378 | patch_overlap=args.patch_overlap, 379 | mask=mask, 380 | mask_threshold=args.mask_threshold, 381 | recombination=args.recombination, 382 | **kwargs, 383 | ) 384 | 385 | save_array(denoised_data, affine, args.output_file) 386 | save_array(noise_std_map, affine, args.output_noise_map) 387 | 388 | 389 | if __name__ == "__main__": 390 | main() 391 | -------------------------------------------------------------------------------- /src/patch_denoise/bindings/modopt.py: -------------------------------------------------------------------------------- 1 | """Binding for ModOpt Package.""" 2 | 3 | import numpy as np 4 | from modopt.opt.proximity import ProximityParent 5 | 6 | from .utils import DENOISER_MAP 7 | 8 | 9 | class LLRDenoiserOperator(ProximityParent): 10 | """Proximal Operator drop-in replacement using local low rank denoising. 11 | 12 | Parameters 13 | ---------- 14 | denoiser: str 15 | name of the denoising method. 16 | patch_shape: tuple 17 | The patch shape 18 | patch_overlap: tuple 19 | the overlap of each pixel 20 | mask: numpy.ndarray 21 | A boolean array, defining a ROI in the volume. Only patch with voxels in the ROI 22 | will be processed. 23 | mask_threshold: int 24 | percentage of the path that should be in the mask in order to be processed. 25 | If mask_threshold = -1, all the patch are processed, if mask_threshold=100, all 26 | the voxels of the patch needs to be in the mask 27 | """ 28 | 29 | def __init__( 30 | self, 31 | denoiser, 32 | patch_shape, 33 | patch_overlap, 34 | recombination="weighted", 35 | mask=None, 36 | mask_threshold=-1, 37 | progbar=None, 38 | time_dimension=-1, 39 | **kwargs, 40 | ): 41 | self._denoiser = DENOISER_MAP[denoiser] 42 | self._params = dict( 43 | patch_shape=patch_shape, 44 | patch_overlap=patch_overlap, 45 | mask=mask, 46 | mask_threshold=mask_threshold, 47 | recombination=recombination, 48 | progbar=progbar, 49 | **kwargs, 50 | ) 51 | self.op = self._op_method 52 | self.cost = lambda *args, **kw: np.nan 53 | self.time_dimension = time_dimension 54 | 55 | def _op_method(self, data, **kwargs): 56 | run_kwargs = self._params.copy() 57 | run_kwargs.update(kwargs) 58 | return np.moveaxis( 59 | self._denoiser( 60 | np.moveaxis(data, self.time_dimension, -1), 61 | **run_kwargs, 62 | )[0], 63 | -1, 64 | self.time_dimension, 65 | ) 66 | -------------------------------------------------------------------------------- /src/patch_denoise/bindings/nipype.py: -------------------------------------------------------------------------------- 1 | """Nipype Bindings, Provide bindings To apply patch based denoising.""" 2 | 3 | import os 4 | 5 | import nibabel as nib 6 | import numpy as np 7 | from nipype.interfaces.base import ( 8 | BaseInterfaceInputSpec, 9 | File, 10 | SimpleInterface, 11 | TraitedSpec, 12 | isdefined, 13 | traits, 14 | ) 15 | from nipype.utils.filemanip import split_filename 16 | 17 | from .utils import DENOISER_MAP, DenoiseParameters 18 | from ..space_time.utils import estimate_noise 19 | 20 | 21 | class PatchDenoiseInputSpec(BaseInterfaceInputSpec): 22 | """InputSpec for Patch denoising Interface.""" 23 | 24 | in_mag = File( 25 | exists=True, 26 | xor=["in_real", "in_imag"], 27 | desc="magnitude input file to denoise.", 28 | ) 29 | in_real = File( 30 | exists=True, 31 | xor=["in_mag"], 32 | require=["in_imag"], 33 | desc="Real-part of input file to denoise.", 34 | ) 35 | in_imag = File( 36 | exists=True, 37 | xor=["in_mag"], 38 | require=["in_real"], 39 | desc="imaginary part of input file to denoise.", 40 | ) 41 | 42 | mask = File(exists=True) 43 | noise_std_map = File(desc="noise_std_map") 44 | denoise_str = traits.Str(desc="string describing the denoiser configuration") 45 | method = traits.Enum( 46 | *DENOISER_MAP.keys(), 47 | xor=["denoise_str"], 48 | require=["patch_shape", "patch_overlap"], 49 | ) 50 | patch_shape = traits.Union( 51 | traits.Int(), 52 | traits.List(traits.Int(), minlen=3, maxlen=3), 53 | xor=["denoise_str"], 54 | require=["denoise_method", "patch_overlap"], 55 | ) 56 | patch_overlap = traits.Union( 57 | traits.Int(), 58 | traits.List(traits.Int(), minlen=3, maxlen=3), 59 | xor=["denoise_str"], 60 | require=["patch_shape", "denoise_method"], 61 | ) 62 | mask_threshold = traits.Int(10) 63 | recombination = traits.Enum("weighted", "mean") 64 | extra_kwargs = traits.Dict() 65 | 66 | 67 | class PatchDenoiseOutputSpec(TraitedSpec): 68 | """OutputSpec for Denoising Interface.""" 69 | 70 | denoised_file = File(desc="denoised file") 71 | noise_std_map = File(desc="a map of the noise variance.") 72 | rank_map = File(desc="a map of the rank of the patch.") 73 | 74 | 75 | class PatchDenoise(SimpleInterface): 76 | """Patch based denoising interface.""" 77 | 78 | input_spec = PatchDenoiseInputSpec 79 | output_spec = PatchDenoiseOutputSpec 80 | 81 | _denoise_attrs = [ 82 | "method", 83 | "patch_shape", 84 | "patch_overlap", 85 | "mask_threshold", 86 | "recombination", 87 | ] 88 | 89 | def _run_interface(self, runtime): 90 | # INPUT 91 | if isdefined(self.inputs.denoise_str): 92 | d_par = DenoiseParameters.from_str(self.inputs.denoise_str) 93 | else: 94 | d_par = DenoiseParameters() 95 | for attr in PatchDenoise._denoise_attrs: 96 | setattr(d_par, attr, getattr(self.inputs, attr)) 97 | 98 | if isdefined(self.inputs.in_mag): 99 | data_mag_nii = nib.load(self.inputs.in_mag) 100 | data = data_mag_nii.get_fdata(dtype=np.float32) 101 | basename = self.inputs.in_mag 102 | self._affine = data_mag_nii.affine 103 | else: 104 | data_real_nii = nib.load(self.inputs.in_real) 105 | self._affine = data_real_nii.affine 106 | data_real = data_real_nii.get_fdata(dtype=np.float32) 107 | data_imag = nib.load(self.inputs.in_imag).get_fdata(dtype=np.float32) 108 | data = 1j * data_imag 109 | data += data_real 110 | basename = self.inputs.in_real 111 | 112 | if isdefined(self.inputs.mask) and self.inputs.mask: 113 | mask = np.abs(nib.load(self.inputs.mask).get_fdata()) > 0 114 | else: 115 | mask = None 116 | 117 | try: 118 | denoise_func = DENOISER_MAP[d_par.method] 119 | except KeyError: 120 | raise ValueError( 121 | f"unknown denoising denoise_method '{self.inputs.denoise_method}', " 122 | f"available are {list(DENOISER_MAP.keys())}" 123 | ) from None 124 | 125 | if isdefined(self.inputs.extra_kwargs) and self.inputs.extra_kwargs: 126 | extra_kwargs = self.inputs.extra_kwargs 127 | else: 128 | extra_kwargs = dict() 129 | if d_par.method in [ 130 | "nordic", 131 | "hybrid-pca", 132 | "adaptive-qut", 133 | "optimal-fro-noise", 134 | ]: 135 | extra_kwargs["noise_std"] = nib.load(self.inputs.noise_std_map).get_fdata() 136 | 137 | if denoise_func is not None: 138 | # CORE CALL 139 | denoised_data, _, noise_std_map, rank_map = denoise_func( 140 | data, 141 | patch_shape=d_par.patch_shape, 142 | patch_overlap=d_par.patch_overlap, 143 | mask=mask, 144 | mask_threshold=d_par.mask_threshold, 145 | recombination=d_par.recombination, 146 | **extra_kwargs, 147 | ) 148 | else: 149 | denoised_data = data 150 | noise_std_map = np.std(data, axis=-1, dtype=np.float32) 151 | rank_map = np.zeros_like(noise_std_map) 152 | # OUTPUT 153 | if np.any(np.iscomplex(denoised_data)): 154 | denoised_data = np.abs(denoised_data, dtype=np.float32) 155 | 156 | _, base, _ = split_filename(basename) 157 | base = base.replace("_mag", "") 158 | base = base.replace("_real", "") 159 | 160 | self._make_results_file("rank_map", f"{base}_rank_map.nii", rank_map) 161 | self._make_results_file( 162 | "denoised_file", 163 | f"{base}_d_{d_par.method}.nii", 164 | denoised_data, 165 | ) 166 | self._make_results_file("noise_std_map", f"{base}_noise_map.nii", noise_std_map) 167 | return runtime 168 | 169 | def _make_results_file(self, result_file, file_name, array): 170 | """Add a new results file.""" 171 | self._results[result_file] = os.path.abspath(file_name) 172 | nib.save(nib.Nifti1Image(array, self._affine), file_name) 173 | 174 | 175 | class NoiseStdMapInputSpec(BaseInterfaceInputSpec): 176 | """InputSpec for Noise Map Estimation.""" 177 | 178 | noise_map_file = File( 179 | exists=True, 180 | mandatory=True, 181 | desc="A 0-Volt volume acquisition", 182 | ) 183 | fft_scale = traits.Int(default=100, desc="scaling parameter of the reconstruction.") 184 | block_size = traits.Int(default=3, desc="size of spatial block to compute the std.") 185 | 186 | 187 | class NoiseStdMapOutputSpec(TraitedSpec): 188 | """OutputSpec for Noise Map Estimation.""" 189 | 190 | noise_std_map = File(desc="Spatial variation of noise variance") 191 | 192 | 193 | class NoiseStdMap(SimpleInterface): 194 | """Noise std estimation.""" 195 | 196 | input_spec = NoiseStdMapInputSpec 197 | output_spec = NoiseStdMapOutputSpec 198 | 199 | def _run_interface(self, runtime): 200 | noise_map = nib.load(self.inputs.noise_map_file) 201 | noise_std_map = estimate_noise( 202 | noise_map.get_fdata() / self.inputs.fft_scale, self.inputs.block_size 203 | ) 204 | noise_std_map_img = nib.Nifti1Image(noise_std_map, affine=noise_map.affine) 205 | 206 | filename = os.path.abspath( 207 | os.path.basename(self.inputs.noise_map_file).split(".")[0] + "_std.nii" 208 | ) 209 | noise_std_map_img.to_filename(filename) 210 | self._results["noise_std_map"] = filename 211 | 212 | return runtime 213 | -------------------------------------------------------------------------------- /src/patch_denoise/bindings/utils.py: -------------------------------------------------------------------------------- 1 | """Common utilities for bindings.""" 2 | 3 | from __future__ import annotations 4 | 5 | import logging 6 | from dataclasses import dataclass 7 | 8 | import numpy as np 9 | 10 | from patch_denoise.denoise import ( 11 | hybrid_pca, 12 | mp_pca, 13 | nordic, 14 | optimal_thresholding, 15 | raw_svt, 16 | adaptive_thresholding, 17 | ) 18 | 19 | 20 | DENOISER_MAP = { 21 | None: None, 22 | "mp-pca": mp_pca, 23 | "hybrid-pca": hybrid_pca, 24 | "raw": raw_svt, 25 | "optimal-fro": lambda *args, **kwargs: optimal_thresholding( 26 | *args, loss="fro", **kwargs 27 | ), 28 | "optimal-fro-noise": lambda *args, **kwargs: optimal_thresholding( 29 | *args, loss="fro", **kwargs 30 | ), 31 | "optimal-nuc": lambda *args, **kwargs: optimal_thresholding( 32 | *args, loss="nuc", **kwargs 33 | ), 34 | "optimal-ope": lambda *args, **kwargs: optimal_thresholding( 35 | *args, loss="ope", **kwargs 36 | ), 37 | "nordic": nordic, 38 | "adaptive-qut": lambda *args, **kwargs: adaptive_thresholding( 39 | *args, method="qut", **kwargs 40 | ), 41 | } 42 | 43 | _RECOMBINATION = {"w": "weighted", "c": "center", "a": "average"} 44 | 45 | 46 | @dataclass 47 | class DenoiseParameters: 48 | """Denoise Parameters data structure.""" 49 | 50 | method: str = None 51 | patch_shape: int | tuple[int, ...] = 11 52 | patch_overlap: int | tuple[int, ...] = 0 53 | recombination: str = "weighted" # "center" is also available 54 | mask_threshold: int = 10 55 | 56 | @property 57 | def pretty_name(self): 58 | """Return a pretty name for the representation of parameters.""" 59 | if self.method: 60 | name = self.method 61 | for attr in [ 62 | "patch_shape", 63 | "patch_overlap", 64 | "recombination", 65 | "mask_threshold", 66 | ]: 67 | if getattr(self, attr): 68 | name += f"_{getattr(self, attr)}" 69 | else: 70 | name = "noisy" 71 | return name 72 | 73 | @property 74 | def pretty_par(self): 75 | """Get pretty representation of parameters.""" 76 | name = f"{self.patch_shape}_{self.patch_overlap}{self.recombination[0]}" 77 | return name 78 | 79 | @classmethod 80 | def get_str(cls, **kwargs): 81 | """Get full string representation from set of kwargs.""" 82 | return cls(**kwargs).pretty_name 83 | 84 | @classmethod 85 | def from_str(self, config_str): 86 | """Create a DenoiseParameters from a string.""" 87 | if "noisy" in config_str: 88 | return DenoiseParameters( 89 | method=None, 90 | patch_shape=None, 91 | patch_overlap=None, 92 | recombination=None, 93 | mask_threshold=None, 94 | ) 95 | else: 96 | conf = config_str.split("_") 97 | d = DenoiseParameters() 98 | if conf: 99 | d.method = conf.pop(0) 100 | if conf: 101 | d.patch_shape = int(conf.pop(0)) 102 | if conf: 103 | d.patch_overlap = int(conf.pop(0)) 104 | if conf: 105 | c = conf.pop(0) 106 | d.recombination = c 107 | if conf: 108 | d.mask_threshold = int(conf.pop(0)) 109 | return d 110 | 111 | def __str__(self): 112 | """Get string representation.""" 113 | return self.pretty_name 114 | 115 | 116 | def load_as_array(input): 117 | """Load a file as a numpy array, and return affine matrix if available.""" 118 | import nibabel as nib 119 | 120 | if input is None: 121 | return None, None 122 | if input.suffix == ".npy": 123 | return np.load(input), None 124 | elif ".nii" in input.suffixes: 125 | nii = nib.load(input) 126 | return nii.get_fdata(dtype=np.float32), nii.affine 127 | else: 128 | raise ValueError("Unsupported file format. use numpy or nifti formats.") 129 | 130 | 131 | def save_array(data, affine, filename): 132 | """Save array to file, with affine matrix if required.""" 133 | import nibabel as nib 134 | 135 | if filename is None: 136 | return None 137 | 138 | if ".nii" in filename.suffixes: 139 | if affine is None: 140 | affine = np.eye(len(data.shape)) 141 | nii_img = nib.Nifti1Image(data, affine) 142 | nii_img.to_filename(filename) 143 | elif filename.endswith(".npy"): 144 | np.save(filename, data) 145 | 146 | return filename 147 | 148 | 149 | def load_complex_nifti(mag_file, phase_file, filename=None): # pragma: no cover 150 | """Load two nifti image (magnitude and phase) to create a complex valued array. 151 | 152 | Optionally, the result can be save as a .npy file 153 | 154 | Parameters 155 | ---------- 156 | mag_file: str 157 | The source magnitude file 158 | phase_file: str 159 | The source phase file 160 | filename: str, default None 161 | The output filename 162 | """ 163 | mag, mag_affine = load_as_array(mag_file) 164 | phase, phase_affine = load_as_array(phase_file) 165 | 166 | if not np.allclose(mag_affine, phase_affine): 167 | logging.warning("Affine matrices for magnitude and phase are not the same") 168 | 169 | logging.info("Phase data range is [%.2f %.2f]", np.min(phase), np.max(phase)) 170 | logging.info("Mag data range is [%.2f %.2f]", np.min(mag), np.max(mag)) 171 | img = mag * np.exp(1j * phase) 172 | 173 | if filename is not None: 174 | np.save(filename, img) 175 | return img, mag_affine 176 | 177 | 178 | def compute_mask(array, convex=False): 179 | """Compute mask for array using the Otzu's method. 180 | 181 | The time axis is assumed to be the last one. 182 | 183 | The mask is computed slice-wise on the time average of the array. 184 | 185 | Parameters 186 | ---------- 187 | array : numpy.ndarray 188 | Array to compute mask for. 189 | convex : bool, default False 190 | If True, the mask is convex for each slice. 191 | 192 | Returns 193 | ------- 194 | numpy.ndarray 195 | Mask for array. 196 | """ 197 | from skimage.filters import threshold_otsu 198 | from skimage.morphology import convex_hull_image 199 | 200 | mean = array.mean(axis=-1) 201 | mask = np.zeros(mean.shape, dtype=bool) 202 | for i in range(mean.shape[-1]): 203 | mask[..., i] = mean[..., i] > threshold_otsu(mean[..., i]) 204 | if convex: 205 | for i in range(mean.shape[-1]): 206 | mask[..., i] = convex_hull_image(mask[..., i]) 207 | return mask 208 | -------------------------------------------------------------------------------- /src/patch_denoise/denoise.py: -------------------------------------------------------------------------------- 1 | """Provides a functional entry point for denoising methods.""" 2 | from ._docs import fill_doc 3 | from .space_time.lowrank import ( 4 | AdaptiveDenoiser, 5 | HybridPCADenoiser, 6 | MPPCADenoiser, 7 | NordicDenoiser, 8 | OptimalSVDDenoiser, 9 | RawSVDDenoiser, 10 | ) 11 | 12 | 13 | @fill_doc 14 | def mp_pca( 15 | input_data, 16 | patch_shape, 17 | patch_overlap, 18 | mask=None, 19 | mask_threshold=50, 20 | recombination="weighted", 21 | threshold_scale=1.0, 22 | progbar=None, 23 | ): 24 | """ 25 | Marshenko-Pastur PCA denoising method. 26 | 27 | Parameters 28 | ---------- 29 | $standard_config 30 | threshold_scale: float 31 | An extra factor for the patch denoising. 32 | 33 | Returns 34 | ------- 35 | $denoise_return 36 | 37 | Notes 38 | ----- 39 | Follows implementation of [#]_ and the one available in dipy. 40 | 41 | References 42 | ---------- 43 | .. [#] Manjón, José V., Pierrick Coupé, Luis Concha, Antonio Buades, 44 | D. Louis Collins, and Montserrat Robles. 45 | “Diffusion Weighted Image Denoising Using Overcomplete Local PCA.” 46 | PLOS ONE 8, no. 9 (September 3, 2013): e73021. 47 | https://doi.org/10.1371/journal.pone.0073021. 48 | 49 | See Also 50 | -------- 51 | patch_denoise.space_time.lowrank.MPPCADenoiser 52 | """ 53 | denoiser = MPPCADenoiser( 54 | patch_shape, 55 | patch_overlap, 56 | recombination=recombination, 57 | threshold_scale=threshold_scale, 58 | ) 59 | return denoiser.denoise(input_data, mask=mask, mask_threshold=mask_threshold) 60 | 61 | 62 | @fill_doc 63 | def hybrid_pca( 64 | input_data, 65 | patch_shape, 66 | patch_overlap, 67 | mask=None, 68 | mask_threshold=50, 69 | noise_std=1.0, 70 | recombination="weighted", 71 | progbar=None, 72 | ): 73 | """ 74 | Hybrid PCA denoising method. 75 | 76 | Parameters 77 | ---------- 78 | $standard_config 79 | $noise_std 80 | 81 | Returns 82 | ------- 83 | $denoise_return 84 | 85 | Notes 86 | ----- 87 | Follows implementation of [#]_ . 88 | 89 | References 90 | ---------- 91 | .. [#] 92 | https://submissions.mirasmart.com/ISMRM2022/Itinerary/Files/PDFFiles/2688.html 93 | 94 | See Also 95 | -------- 96 | patch_denoise.space_time.lowrank.HybridPCADenoiser 97 | """ 98 | denoiser = HybridPCADenoiser( 99 | patch_shape, 100 | patch_overlap, 101 | recombination=recombination, 102 | ) 103 | return denoiser.denoise( 104 | input_data, 105 | mask=mask, 106 | mask_threshold=mask_threshold, 107 | noise_std=noise_std, 108 | progbar=progbar, 109 | ) 110 | 111 | 112 | @fill_doc 113 | def raw_svt( 114 | input_data, 115 | patch_shape, 116 | patch_overlap, 117 | mask_threshold=50, 118 | mask=None, 119 | threshold=1.0, 120 | recombination="weighted", 121 | progbar=None, 122 | ): 123 | """ 124 | Raw singular value thresholding. 125 | 126 | Parameters 127 | ---------- 128 | $standard_config 129 | threshold: float 130 | threshold use for singular value hard thresholding. 131 | 132 | Returns 133 | ------- 134 | tuple 135 | numpy.ndarray: The denoised sequence of volume 136 | numpy.ndarray: The weight of each pixel after the processing. 137 | numpy.ndarray: If possible, the noise variance distribution in the volume. 138 | 139 | Notes 140 | ----- 141 | Simple raw hard thresholding of singular values. 142 | TODO: add support for soft thresholding. 143 | 144 | See Also 145 | -------- 146 | patch_denoise.space_time.lowrank.MPPCADenoiser 147 | """ 148 | denoiser = RawSVDDenoiser( 149 | patch_shape, 150 | patch_overlap, 151 | recombination=recombination, 152 | threshold_value=threshold, 153 | ) 154 | return denoiser.denoise( 155 | input_data, 156 | mask=mask, 157 | mask_threshold=mask_threshold, 158 | threshold_scale=1.0, 159 | progbar=progbar, 160 | ) 161 | 162 | 163 | @fill_doc 164 | def nordic( 165 | input_data, 166 | patch_shape, 167 | patch_overlap, 168 | mask_threshold=50, 169 | mask=None, 170 | noise_std=1.0, 171 | recombination="weighted", 172 | n_iter_threshold=10, 173 | progbar=None, 174 | ): 175 | """ 176 | NORDIC denoising method. 177 | 178 | Parameters 179 | ---------- 180 | $standard_config 181 | $noise_std 182 | n_iter_threshold: int 183 | The number of Monte-Carlo Simulation to estimate the global threshold. 184 | 185 | Returns 186 | ------- 187 | $denoise_return 188 | 189 | Notes 190 | ----- 191 | Follows implementation of [#]_ 192 | 193 | References 194 | ---------- 195 | .. [#] Moeller, Steen, Pramod Kumar Pisharady, Sudhir Ramanna, Christophe Lenglet, 196 | Xiaoping Wu, Logan Dowdle, Essa Yacoub, Kamil Uğurbil, and Mehmet Akçakaya. 197 | “NOise Reduction with DIstribution Corrected (NORDIC) PCA in DMRI with 198 | Complex-Valued Parameter-Free Locally Low-Rank Processing.” 199 | NeuroImage 226 (February 1, 2021): 117539. 200 | https://doi.org/10.1016/j.neuroimage.2020.117539. 201 | 202 | See Also 203 | -------- 204 | patch_denoise.space_time.lowrank.NordicDenoiser 205 | """ 206 | denoiser = NordicDenoiser( 207 | patch_shape, 208 | patch_overlap, 209 | recombination=recombination, 210 | ) 211 | return denoiser.denoise( 212 | input_data, 213 | mask=mask, 214 | mask_threshold=mask_threshold, 215 | noise_std=noise_std, 216 | n_iter_threshold=n_iter_threshold, 217 | progbar=progbar, 218 | ) 219 | 220 | 221 | @fill_doc 222 | def optimal_thresholding( 223 | input_data, 224 | patch_shape, 225 | patch_overlap, 226 | mask=None, 227 | mask_threshold=50, 228 | loss="fro", 229 | noise_std=None, 230 | recombination="weighted", 231 | eps_marshenko_pastur=1e-7, 232 | progbar=None, 233 | ): 234 | """ 235 | Optimal thresholing denoising method. 236 | 237 | Parameters 238 | ---------- 239 | $standard_config 240 | $noise_std 241 | loss: str 242 | The loss for which the optimal thresholding is perform. 243 | eps_marshenko_pastur: float 244 | The precision with which the optimal threshold is computed. 245 | 246 | Returns 247 | ------- 248 | $denoise_return 249 | 250 | Notes 251 | ----- 252 | Reimplement of the original Matlab code [#]_ in python. 253 | 254 | References 255 | ---------- 256 | .. [#] Gavish, Matan, and David L. Donoho. “Optimal Shrinkage of Singular Values.” 257 | IEEE Transactions on Information Theory 63, no. 4 (April 2017): 2137–52. 258 | https://doi.org/10.1109/TIT.2017.2653801. 259 | 260 | 261 | See Also 262 | -------- 263 | patch_denoise.space_time.lowrank.OptimalSVDDenoiser 264 | """ 265 | denoiser = OptimalSVDDenoiser( 266 | patch_shape, 267 | patch_overlap, 268 | recombination=recombination, 269 | loss=loss, 270 | ) 271 | return denoiser.denoise( 272 | input_data, 273 | mask=mask, 274 | noise_std=noise_std, 275 | mask_threshold=mask_threshold, 276 | eps_marshenko_pastur=eps_marshenko_pastur, 277 | progbar=progbar, 278 | ) 279 | 280 | 281 | @fill_doc 282 | def adaptive_thresholding( 283 | input_data, 284 | patch_shape, 285 | patch_overlap, 286 | mask=None, 287 | mask_threshold=50, 288 | recombination="weighted", 289 | method="SURE", 290 | nbsim=500, 291 | tau0=None, 292 | gamma0=None, 293 | noise_std=1.0, 294 | progbar=None, 295 | ): 296 | """ 297 | Optimal thresholing denoising method. 298 | 299 | Parameters 300 | ---------- 301 | $input_config 302 | $noise_std 303 | method: str 304 | The adaptive method to use "SURE" or "GSURE" 305 | nbsim: 306 | Number of simulation for computing sure estimator 307 | tau: 308 | Simulation parameter. 309 | gamma0: 310 | Simulation parameter. 311 | 312 | Returns 313 | ------- 314 | $denoise_return 315 | 316 | Notes 317 | ----- 318 | Reimplements the R package [#]_ in python. 319 | 320 | References 321 | ---------- 322 | .. [#] J. Josse and S. Sardy, “Adaptive Shrinkage of singular values.” 323 | arXiv, Nov. 22, 2014. 324 | doi: 10.48550/arXiv.1310.6602. 325 | 326 | See Also 327 | -------- 328 | patch_denoise.space_time.AdaptiveDenoiser 329 | """ 330 | denoiser = AdaptiveDenoiser( 331 | patch_shape, 332 | patch_overlap, 333 | recombination=recombination, 334 | method=method, 335 | nbsim=nbsim, 336 | ) 337 | return denoiser.denoise( 338 | input_data, 339 | mask, 340 | mask_threshold, 341 | tau0, 342 | noise_std, 343 | gamma0, 344 | progbar=progbar, 345 | ) 346 | -------------------------------------------------------------------------------- /src/patch_denoise/simulation/__init__.py: -------------------------------------------------------------------------------- 1 | """Simple data simulation for testing purposes.""" 2 | -------------------------------------------------------------------------------- /src/patch_denoise/simulation/activations.py: -------------------------------------------------------------------------------- 1 | """Create Dynamical Data from phantom.""" 2 | 3 | import numpy as np 4 | 5 | 6 | def add_frames(volume, n_frames): 7 | """Add Activation to ground truth volume. 8 | 9 | Parameters 10 | ---------- 11 | volume: numpy.ndarray 12 | The Static volume to augment 13 | n_frames: int 14 | The number of temporal frames to create 15 | 16 | Returns 17 | ------- 18 | numpy.ndarray 19 | The temporal sequence of volume with activations. 20 | """ 21 | return np.repeat(volume[..., np.newaxis], n_frames, axis=-1) 22 | -------------------------------------------------------------------------------- /src/patch_denoise/simulation/noise.py: -------------------------------------------------------------------------------- 1 | """Functions to add different source of temporal noise to data.""" 2 | import numpy as np 3 | from numpy.random import default_rng 4 | 5 | 6 | def add_temporal_gaussian_noise(array, sigma=1, g_factor_map=None, rng=None): 7 | """Add gaussian noise to array. 8 | 9 | Parameters 10 | ---------- 11 | array: numpy.ndarray 12 | The noise_free ND-array, where the last dimension is a dynamical one 13 | (e.g. time) 14 | sigma: float 15 | gaussian noise variance 16 | g_factor_map: numpy.ndarray, optional 17 | Spatial variation of the noise ((N-1)D array). default is identity. 18 | 19 | Returns 20 | ------- 21 | numpy.ndarray 22 | A noisy array 23 | """ 24 | if rng is None: 25 | rng = default_rng() 26 | shape = array.shape 27 | 28 | g_noise = sigma * rng.standard_normal(shape) 29 | if g_factor_map is None: 30 | g_factor_map = np.ones(shape[:-1]) 31 | 32 | if np.iscomplex(array).any(): 33 | g_noise += 1j * sigma * rng.standard_normal(shape) 34 | return array + (g_noise * g_factor_map[..., None]) 35 | 36 | 37 | def add_temporal_rician_noise(array, scale=1, rng=None): 38 | """Add center rician noise to array. 39 | 40 | Parameters 41 | ---------- 42 | array: numpy.ndarray 43 | The noise-free array 44 | sigma: float 45 | The scale of the Rice distribution 46 | 47 | Notes 48 | ----- 49 | This function considered centered Rician noise [1]_, 50 | and thus the noise generated follows a Rayleigh distribution [2]_. 51 | 52 | References 53 | ---------- 54 | .. [1] https://en.m.wikipedia.org/wiki/Rice_distribution 55 | .. [2] https://en.m.wikipedia.org/wiki/Rayleigh_distribution 56 | """ 57 | if rng is None: 58 | rng = default_rng() 59 | noise = rng.rayleigh(scale, array.shape) 60 | 61 | return array + noise 62 | -------------------------------------------------------------------------------- /src/patch_denoise/simulation/phantom.py: -------------------------------------------------------------------------------- 1 | """Shepp-Logan phantom for use with MR simulations. 2 | 3 | From https://github.com/mckib2/phantominator/blob/master/phantominator/mr_shepp_logan.py 4 | """ 5 | 6 | import numpy as np 7 | 8 | 9 | def mr_shepp_logan_t2_star(N, B0=3): 10 | """Return a 3D T2*-weighted Shepp-Logan phantom.""" 11 | return mr_shepp_logan(N, E=None, B0=B0, T2star=True)[-1] 12 | 13 | 14 | def mr_shepp_logan(N, E=None, B0=3, T2star=False, zlims=(-1, 1)): 15 | """Shepp-Logan phantom with MR tissue parameters. 16 | 17 | Parameters 18 | ---------- 19 | N : int or array_like 20 | Matrix size, (N, N, N), or (L, M, N). 21 | E : array_like, optional 22 | ex13 numeric matrix defining e ellipses. The columns of E 23 | are: 24 | 25 | - x-coordinate of the center of the ellipsoid (in [-1, 1]) 26 | - y-coordinate of the center of the ellipsoid (in [-1, 1]) 27 | - z-coordinate of the center of the ellipsoid (in [-1, 1]) 28 | - x principal axis of the ellipsoid 29 | - y principal axis of the ellipsoid 30 | - z principal axis of the ellipsoid 31 | - Angle of the ellipsoid (in rad) 32 | - spin density, M0 33 | - Parameter A for T1 determination 34 | - Parameter C for T1 determination 35 | - Explicit T1 value (in sec, or np.nan if model is used) 36 | - T2 value (in sec) 37 | - chi (change in magnetic susceptibility) 38 | 39 | If spin density is negative, M0, T1, and T2 will be subtracted 40 | instead of cummulatively added. 41 | B0 : float, optimal 42 | Field strength (in Tesla). 43 | T2star : bool, optional 44 | Use magnetic susceptibility values to return T2star values 45 | instead of T2. Gyromagnetic ratio is assumed to be that of 46 | hydrogen. 47 | zlims : tuple, optional 48 | Only for 3D. Specify bounds along z. Often we only want the 49 | middle portion of a 3D phantom, e.g., zlim=(-.5, .5). 50 | 51 | Returns 52 | ------- 53 | M0 : array_like 54 | The proton density. 55 | T1 : array_like 56 | The T1 values. 57 | T2 : array_like 58 | The T2 values. If T2star is True, then these will be T2 star 59 | values. 60 | 61 | Notes 62 | ----- 63 | Implements the phantoms described in [1]_. 64 | 65 | If parameters A, C are given and T1 is None, T1 is determined 66 | according to the equation: 67 | 68 | T1 = A*B0^C 69 | 70 | The original source code [2]_ 71 | 72 | References 73 | ---------- 74 | .. [1] Gach, H. Michael, Costin Tanase, and Fernando Boada. 75 | "2D & 3D Shepp-Logan phantom standards for MRI." 2008 19th 76 | International Conference on Systems Engineering. IEEE, 77 | 2008. 78 | .. [2] https://github.com/mckib2/phantominator/blob/master/phantominator \ 79 | /mr_shepp_logan.py 80 | """ 81 | # Determine size of phantom 82 | if np.isscalar(N): 83 | L, M, N = N, N, N 84 | else: 85 | L, M, N = N[:] 86 | 87 | # Make sure zlims are appropriate 88 | assert len(zlims) == 2, ( 89 | "zlims must be a tuple with 2 entries: upper and lower " "bounds!" 90 | ) 91 | assert zlims[0] <= zlims[1], "zlims: lower bound must be first entry!" 92 | 93 | # Get parameters from paper if None provided 94 | if E is None: 95 | E = mr_ellipsoid_parameters() 96 | 97 | # Extract some parameters so we can use them 98 | xs = E[:, 0] 99 | ys = E[:, 1] 100 | zs = E[:, 2] 101 | xaxis = E[:, 3] 102 | yaxis = E[:, 4] 103 | zaxis = E[:, 5] 104 | theta = E[:, 6] 105 | M0 = E[:, 7] 106 | As = E[:, 8] 107 | Cs = E[:, 9] 108 | T1 = E[:, 10] 109 | T2 = E[:, 11] 110 | chis = E[:, 12] 111 | 112 | # Initialize array 113 | X, Y, Z = np.meshgrid( # meshgrid does X, Y backwards 114 | np.linspace(-1, 1, M), np.linspace(-1, 1, L), np.linspace(zlims[0], zlims[1], N) 115 | ) 116 | ct = np.cos(theta) 117 | st = np.sin(theta) 118 | sgn = np.sign(M0) 119 | T1s = np.zeros((L, M, N)) 120 | T2s = np.zeros((L, M, N)) 121 | M0s = np.zeros((L, M, N)) 122 | 123 | # We'll need the gyromagnetic ratio if returning T2star values 124 | if T2star: 125 | # see https://en.wikipedia.org/wiki/Gyromagnetic_ratio: 126 | gamma0 = 267.52219 # 10^6 rad⋅s−1⋅T⋅−1 127 | 128 | # Put ellipses where they need to be 129 | for ii in range(E.shape[0]): 130 | xc, yc, zc = xs[ii], ys[ii], zs[ii] 131 | a, b, c = xaxis[ii], yaxis[ii], zaxis[ii] 132 | ct0, st0 = ct[ii], st[ii] 133 | 134 | # Find indices falling inside the ellipsoid, ellipses only 135 | # rotated in xy plane 136 | idx = ((X - xc) * ct0 + (Y - yc) * st0) ** 2 / a**2 + ( 137 | (X - xc) * st0 - (Y - yc) * ct0 138 | ) ** 2 / b**2 + (Z - zc) ** 2 / c**2 <= 1 139 | 140 | # Add ellipses together -- subtract of M0 is negative 141 | M0s[idx] += M0[ii] 142 | 143 | # Use T2star values if user asked for them 144 | if T2star: 145 | T2s[idx] += sgn[ii] / (1 / T2[ii] + gamma0 * np.abs(B0 * chis[ii])) 146 | else: 147 | T2s[idx] += sgn[ii] * T2[ii] 148 | 149 | # Use T1 model if not given explicit T1 value 150 | if np.isnan(T1[ii]): 151 | T1s[idx] += sgn[ii] * As[ii] * (B0 ** Cs[ii]) 152 | else: 153 | T1s[idx] += sgn[ii] * T1[ii] 154 | 155 | return (M0s, T1s, T2s) 156 | 157 | 158 | def mr_ellipsoid_parameters(): 159 | """Return parameters of ellipsoids. 160 | 161 | Returns 162 | ------- 163 | E : array_like 164 | Parameters for the ellipsoids used to construct the phantom. 165 | """ 166 | params = _mr_relaxation_parameters() 167 | 168 | E = np.zeros((15, 13)) 169 | # [:, [x, y, z, a, b, c, theta, m0, A, C, (t1), t2, chi]] 170 | E[0, :] = [0, 0, 0, 0.72, 0.95, 0.93, 0, 0.8, *params["scalp"]] 171 | E[1, :] = [0, 0, 0, 0.69, 0.92, 0.9, 0, 0.12, *params["marrow"]] 172 | E[2, :] = [0, -0.0184, 0, 0.6624, 0.874, 0.88, 0, 0.98, *params["csf"]] 173 | E[3, :] = [0, -0.0184, 0, 0.6524, 0.864, 0.87, 0, 0.745, *params["gray-matter"]] 174 | E[4, :] = [-0.22, 0, -0.25, 0.41, 0.16, 0.21, np.deg2rad(-72), 0.98, *params["csf"]] 175 | E[5, :] = [0.22, 0, -0.25, 0.31, 0.11, 0.22, np.deg2rad(72), 0.98, *params["csf"]] 176 | E[6, :] = [0, 0.35, -0.25, 0.21, 0.25, 0.35, 0, 0.617, *params["white-matter"]] 177 | E[7, :] = [0, 0.1, -0.25, 0.046, 0.046, 0.046, 0, 0.95, *params["tumor"]] 178 | E[8, :] = [-0.08, -0.605, -0.25, 0.046, 0.023, 0.02, 0, 0.95, *params["tumor"]] 179 | E[9, :] = [ 180 | 0.06, 181 | -0.605, 182 | -0.25, 183 | 0.046, 184 | 0.023, 185 | 0.02, 186 | np.deg2rad(-90), 187 | 0.95, 188 | *params["tumor"], 189 | ] 190 | E[10, :] = [0, -0.1, -0.25, 0.046, 0.046, 0.046, 0, 0.95, *params["tumor"]] 191 | E[11, :] = [0, -0.605, -0.25, 0.023, 0.023, 0.023, 0, 0.95, *params["tumor"]] 192 | E[12, :] = [ 193 | 0.06, 194 | -0.105, 195 | 0.0625, 196 | 0.056, 197 | 0.04, 198 | 0.1, 199 | np.deg2rad(-90), 200 | 0.93, 201 | *params["tumor"], 202 | ] 203 | E[13, :] = [0, 0.1, 0.625, 0.056, 0.056, 0.1, 0, 0.98, *params["csf"]] 204 | E[14, :] = [ 205 | 0.56, 206 | -0.4, 207 | -0.25, 208 | 0.2, 209 | 0.03, 210 | 0.1, 211 | np.deg2rad(70), 212 | 0.85, 213 | *params["blood-clot"], 214 | ] 215 | 216 | # Need to subtract some ellipses here... 217 | Eneg = np.zeros(E.shape) 218 | for ii in range(E.shape[0]): 219 | # Ellipsoid geometry 220 | Eneg[ii, :7] = E[ii, :7] 221 | 222 | # Tissue property differs after 4th subtracted ellipsoid 223 | if ii > 3: 224 | Eneg[ii, 7:] = E[3, 7:] 225 | else: 226 | Eneg[ii, 7:] = E[ii - 1, 7:] 227 | 228 | # Throw out first as we skip this one in the paper's table 229 | Eneg = Eneg[1:, :] 230 | 231 | # Spin density is negative for subtraction 232 | Eneg[:, 7] *= -1 233 | 234 | # Paper doesn't use last blood-clot ellipsoid 235 | E = E[:-1, :] 236 | Eneg = Eneg[:-1, :] 237 | 238 | # Put both ellipsoid groups together 239 | E = np.concatenate((E, Eneg), axis=0) 240 | 241 | return E 242 | 243 | 244 | def _mr_relaxation_parameters(): 245 | """Return MR relaxation parameters for certain tissues. 246 | 247 | Returns 248 | ------- 249 | params : dict 250 | Gives entries as [A, C, (t1), t2, chi] 251 | 252 | Notes 253 | ----- 254 | If t1 is None, the model T1 = A*B0^C will be used. If t1 is not 255 | np.nan, then specified t1 will be used. 256 | """ 257 | # params['tissue-name'] = [A, C, (t1 value if explicit), t2, chi] 258 | params = dict() 259 | params["scalp"] = [0.324, 0.137, np.nan, 0.07, -7.5e-6] 260 | params["marrow"] = [0.533, 0.088, np.nan, 0.05, -8.85e-6] 261 | params["csf"] = [np.nan, np.nan, 4.2, 1.99, -9e-6] 262 | params["blood-clot"] = [1.35, 0.34, np.nan, 0.2, -9e-6] 263 | params["gray-matter"] = [0.857, 0.376, np.nan, 0.1, -9e-6] 264 | params["white-matter"] = [0.583, 0.382, np.nan, 0.08, -9e-6] 265 | params["tumor"] = [0.926, 0.217, np.nan, 0.1, -9e-6] 266 | return params 267 | 268 | 269 | def _hamming1d(n): 270 | """Compute the 1D Hamming window.""" 271 | return 0.54 - (0.46 * np.cos(np.arange(n) * 2 * np.pi / (n - 1))) 272 | 273 | 274 | def g_factor_map(volume_shape, window_type="hamming"): 275 | """ 276 | Return a g-factor map using a window function. 277 | 278 | Parameters 279 | ---------- 280 | volume_shape: tuple 281 | The volume shape, it should be 2 or 3 element tuple. 282 | window_type: "hamming" 283 | other type not implemented yet. 284 | """ 285 | if window_type != "hamming": 286 | raise NotImplementedError 287 | 288 | window = _hamming1d 289 | 290 | w1 = window(volume_shape[0]) 291 | w2 = window(volume_shape[1]) 292 | w1 = w1 - min(w1) + 1 293 | w2 = w2 - min(w2) + 1 294 | g_map = np.outer(w1, w2) 295 | 296 | if len(volume_shape) == 3: 297 | w3 = window(volume_shape[2]) 298 | w3 = w3 - min(w3) + 1 299 | g_map = g_map[..., np.newaxis] * w3[np.newaxis, np.newaxis, :] 300 | 301 | return g_map 302 | -------------------------------------------------------------------------------- /src/patch_denoise/space_time/__init__.py: -------------------------------------------------------------------------------- 1 | """Denoising Operator working on space-time dimension.""" 2 | from .base import BaseSpaceTimeDenoiser 3 | from .lowrank import ( 4 | HybridPCADenoiser, 5 | MPPCADenoiser, 6 | NordicDenoiser, 7 | OptimalSVDDenoiser, 8 | RawSVDDenoiser, 9 | ) 10 | 11 | __all__ = [ 12 | "BaseSpaceTimeDenoiser", 13 | "MPPCADenoiser", 14 | "HybridPCADenoiser", 15 | "NordicDenoiser", 16 | "OptimalSVDDenoiser", 17 | "RawSVDDenoiser", 18 | ] 19 | -------------------------------------------------------------------------------- /src/patch_denoise/space_time/base.py: -------------------------------------------------------------------------------- 1 | """Base Structure for patch-based denoising on spatio-temporal dimension.""" 2 | 3 | import abc 4 | import logging 5 | 6 | import numpy as np 7 | from tqdm.auto import tqdm 8 | 9 | from .._docs import fill_doc 10 | 11 | 12 | class PatchedArray: 13 | """A container for accessing custom view of array easily. 14 | 15 | Parameters 16 | ---------- 17 | array: np.ndarray 18 | patch_shape: tuple 19 | patch_overlap: tuple 20 | 21 | """ 22 | 23 | def __init__( 24 | self, 25 | array, 26 | patch_shape, 27 | patch_overlap, 28 | dtype=None, 29 | padding_mode="edge", 30 | **kwargs, 31 | ): 32 | if isinstance(array, tuple): 33 | array = np.zeros(array, dtype=dtype) 34 | self._arr = array 35 | 36 | self._ps = np.asarray(patch_shape) 37 | self._po = np.asarray(patch_overlap) 38 | self._po = patch_overlap 39 | 40 | dimensions = self._arr.ndim 41 | step = self._ps - self._po 42 | if np.any(step < 0): 43 | raise ValueError("overlap should be smaller than patch on every dimension.") 44 | 45 | if self._ps.size != dimensions or step.size != dimensions: 46 | raise ValueError( 47 | "self._ps and step must have the same number of dimensions as the " 48 | "input self._array." 49 | ) 50 | 51 | # Ensure patch size is not larger than self._array size along each axis 52 | self._ps = np.minimum(self._ps, self._arr.shape) 53 | 54 | # Calculate the shape and strides of the sliding view 55 | grid_shape = tuple( 56 | ((self._arr.shape[i] - self._ps[i]) // step[i] + 1) 57 | if self._ps[i] < self._arr.shape[i] 58 | else 1 59 | for i in range(dimensions) 60 | ) 61 | shape = grid_shape + tuple(self._ps) 62 | strides = ( 63 | tuple( 64 | self._arr.strides[i] * step[i] 65 | if self._ps[i] < self._arr.shape[i] 66 | else 0 67 | for i in range(dimensions) 68 | ) 69 | + self._arr.strides 70 | ) 71 | 72 | # Create the sliding view 73 | self.sliding_view = np.lib.stride_tricks.as_strided( 74 | self._arr, shape=shape, strides=strides 75 | ) 76 | 77 | self._grid_shape = grid_shape 78 | 79 | @property 80 | def n_patches(self): 81 | """Get number of patches.""" 82 | return np.prod(self._grid_shape) 83 | 84 | def get_patch(self, idx): 85 | """Get patch at linear index ``idx``.""" 86 | return self.sliding_view[np.unravel_index(idx, self._grid_shape)] 87 | 88 | def set_patch(self, idx, value): 89 | """Set patch at linear index ``idx`` with value.""" 90 | self.sliding_view[np.unravel_index(idx, self._grid_shape)] 91 | 92 | def add2patch(self, idx, value): 93 | """Add to patch, in place.""" 94 | patch = self.get_patch(idx) 95 | # self.set_patch(idx, patch + value) 96 | patch += value 97 | 98 | # def sync(self): 99 | # """Apply the padded value to the array back.""" 100 | # np.copyto( 101 | # self._array, 102 | # self._padded_array[ 103 | # tuple( 104 | # np.s_[: (s + 1 - ps) if (s - ps) else s] 105 | # for ps, s in zip(self._ps, self._padded_array.shape) 106 | # ) 107 | # ], 108 | # ) 109 | 110 | # def get(self): 111 | # """Return the regular array, after applying the padded values.""" 112 | # self.sync() 113 | # return self._array 114 | 115 | def __getattr__(self, name): 116 | """Get attribute of underlying array.""" 117 | return getattr(self._arr, name) 118 | 119 | 120 | @fill_doc 121 | class BaseSpaceTimeDenoiser(abc.ABC): 122 | """ 123 | Base Class for Patch-based denoising methods for dynamical data. 124 | 125 | Parameters 126 | ---------- 127 | $patch_config 128 | """ 129 | 130 | def __init__(self, patch_shape, patch_overlap, recombination="weighted"): 131 | self.p_shape = patch_shape 132 | self.p_ovl = patch_overlap 133 | 134 | if recombination not in ["weighted", "average", "center"]: 135 | raise ValueError( 136 | "recombination must be one of 'weighted', 'average', 'center'" 137 | ) 138 | 139 | self.recombination = recombination 140 | 141 | self.input_denoising_kwargs = dict() 142 | 143 | @fill_doc 144 | def denoise(self, input_data, mask=None, mask_threshold=50, progbar=None): 145 | """Denoise the input_data, according to mask. 146 | 147 | Patches are extracted sequentially and process by the implemented 148 | `_patch_processing` function. 149 | Only patches which have at least a voxel in the mask ROI are processed. 150 | 151 | Parameters 152 | ---------- 153 | $input_config 154 | $mask_config 155 | 156 | Returns 157 | ------- 158 | $denoise_return 159 | """ 160 | data_shape = input_data.shape 161 | p_s, p_o = self._get_patch_param(data_shape) 162 | 163 | input_data = PatchedArray(input_data, p_s, p_o) 164 | output_data = PatchedArray(data_shape, p_s, p_o, dtype=input_data.dtype) 165 | patch_weights = PatchedArray(data_shape, p_s, p_o, dtype=np.float32) 166 | rank_map = PatchedArray(data_shape, p_s, p_o, dtype=np.int32) 167 | noise_std_estimate = PatchedArray(data_shape, p_s, p_o, dtype=np.float32) 168 | # Create Default mask 169 | if mask is None: 170 | process_mask = np.full(data_shape, True) 171 | elif mask.shape == input_data.shape[:-1]: 172 | process_mask = np.broadcast_to(mask, input_data.shape) 173 | 174 | process_mask = PatchedArray( 175 | process_mask, p_s, p_o, padding_mode="constant", constant_values=0 176 | ) 177 | 178 | center_pos = tuple(p // 2 for p in p_s) 179 | patch_space_size = np.prod(p_s[:-1]) 180 | # select only queue index where process_mask is valid. 181 | get_it = np.zeros(input_data.n_patches, dtype=bool) 182 | 183 | for i in range(len(get_it)): 184 | pm = process_mask.get_patch(i) 185 | if 100 * np.sum(pm) / pm.size > mask_threshold: 186 | get_it[i] = True 187 | 188 | select_patches = np.nonzero(get_it)[0] 189 | del get_it 190 | 191 | if progbar is None: 192 | progbar = tqdm(total=len(select_patches)) 193 | elif progbar is not False: 194 | progbar.reset(total=len(select_patches)) 195 | 196 | for i in select_patches: 197 | input_patch_casorati = input_data.get_patch(i).reshape(patch_space_size, -1) 198 | p_denoise, maxidx, noise_var = self._patch_processing( 199 | input_patch_casorati, 200 | patch_idx=i, 201 | **self.input_denoising_kwargs, 202 | ) 203 | 204 | p_denoise = np.reshape(p_denoise, p_s) 205 | if self.recombination == "center": 206 | output_data.get_patch(i)[center_pos] = p_denoise[center_pos] 207 | elif self.recombination == "weighted": 208 | theta = 1 / (2 + maxidx) 209 | output_data.add2patch(i, p_denoise * theta) 210 | patch_weights.add2patch(i, theta) 211 | elif self.recombination == "average": 212 | output_data.add2patch(i, p_denoise) 213 | patch_weights.add2patch(i, 1) 214 | else: 215 | raise ValueError( 216 | "recombination must be one of 'weighted', 'average', 'center'" 217 | ) 218 | if progbar: 219 | progbar.update() 220 | # Averaging the overlapping pixels. 221 | # this is only required for averaging recombinations. 222 | 223 | output_data = output_data._arr 224 | patch_weights = patch_weights._arr 225 | 226 | if self.recombination in ["average", "weighted"]: 227 | output_data /= patch_weights 228 | 229 | output_data[~process_mask._arr] = 0 230 | 231 | return output_data, patch_weights, noise_std_estimate, rank_map 232 | 233 | # if self.recombination == "center": 234 | # patch_center = ( 235 | # *(slice(ps // 2, ps // 2 + 1) for ps in patch_shape), 236 | # slice(None, None, None), 237 | # ) 238 | # patchs_weight = np.zeros(data_shape[:-1], np.float32) 239 | # noise_std_estimate = np.zeros(data_shape[:-1], dtype=np.float32) 240 | 241 | # # discard useless patches 242 | # patch_locs = get_patch_locs(patch_shape, patch_overlap, data_shape) 243 | # get_it = np.zeros(len(patch_locs), dtype=bool) 244 | 245 | # for i, patch_tl in enumerate(patch_locs): 246 | # patch_slice = tuple( 247 | # slice(tl, tl + ps) for tl, ps in zip(patch_tl, patch_shape) 248 | # ) 249 | # if 100 * np.sum(process_mask[patch_slice]) / patch_size > mask_threshold: 250 | # get_it[i] = True 251 | 252 | # logging.info(f"Denoise {100 * np.sum(get_it) / len(patch_locs):.2f}% patches") 253 | # patch_locs = np.ascontiguousarray(patch_locs[get_it]) 254 | 255 | # if progbar is None: 256 | # progbar = tqdm(total=len(patch_locs)) 257 | # elif progbar is not False: 258 | # progbar.reset(total=len(patch_locs)) 259 | 260 | # for patch_tl in patch_locs: 261 | # patch_slice = tuple( 262 | # slice(tl, tl + ps) for tl, ps in zip(patch_tl, patch_shape) 263 | # ) 264 | # process_mask[patch_slice] = 1 265 | # # building the casoratti matrix 266 | # patch = np.reshape(input_data[patch_slice], (-1, input_data.shape[-1])) 267 | 268 | # # Replace all nan by mean value of patch. 269 | # # FIXME this behaviour should be documented 270 | # # And ideally chosen by the user. 271 | 272 | # patch[np.isnan(patch)] = np.mean(patch) 273 | # p_denoise, maxidx, noise_var = self._patch_processing( 274 | # patch, 275 | # patch_slice=patch_slice, 276 | # **self.input_denoising_kwargs, 277 | # ) 278 | 279 | # p_denoise = np.reshape(p_denoise, (*patch_shape, -1)) 280 | # patch_center_img = tuple( 281 | # ptl + ps // 2 for ptl, ps in zip(patch_tl, patch_shape) 282 | # ) 283 | # if self.recombination == "center": 284 | # output_data[patch_center_img] = p_denoise[patch_center] 285 | # noise_std_estimate[patch_center_img] += noise_var 286 | # elif self.recombination == "weighted": 287 | # theta = 1 / (2 + maxidx) 288 | # output_data[patch_slice] += p_denoise * theta 289 | # patchs_weight[patch_slice] += theta 290 | # elif self.recombination == "average": 291 | # output_data[patch_slice] += p_denoise 292 | # patchs_weight[patch_slice] += 1 293 | # else: 294 | # raise ValueError( 295 | # "recombination must be one of 'weighted', 'average', 'center'" 296 | # ) 297 | # if not np.isnan(noise_var): 298 | # noise_std_estimate[patch_slice] += noise_var 299 | # # the top left corner of the patch is used as id for the patch. 300 | # rank_map[patch_center_img] = maxidx 301 | # if progbar: 302 | # progbar.update() 303 | # # Averaging the overlapping pixels. 304 | # # this is only required for averaging recombinations. 305 | # if self.recombination in ["average", "weighted"]: 306 | # output_data /= patchs_weight[..., None] 307 | # noise_std_estimate /= patchs_weight 308 | 309 | # output_data[~process_mask] = 0 310 | 311 | # return output_data, patchs_weight, noise_std_estimate, rank_map 312 | 313 | @abc.abstractmethod 314 | def _patch_processing(self, patch, patch_slice=None, **kwargs): 315 | """Process a patch. 316 | 317 | Implemented by child classes. 318 | """ 319 | 320 | def _get_patch_param(self, data_shape): 321 | """Return tuple for patch_shape and patch_overlap. 322 | 323 | It works from whatever the input format was (int or list). 324 | This method also ensure that the patch will provide tall and skinny matrices. 325 | """ 326 | pp = [None, None] 327 | for i, attr in enumerate(["p_shape", "p_ovl"]): 328 | p = getattr(self, attr) 329 | if isinstance(p, list): 330 | p = tuple(p) 331 | elif isinstance(p, (int, np.integer)): 332 | p = (p,) * (len(data_shape) - 1) 333 | if len(p) == len(data_shape) - 1: 334 | # add the time dimension 335 | p = (*p, data_shape[-1]) 336 | pp[i] = p 337 | 338 | if np.prod(pp[0][:-1]) < data_shape[-1]: 339 | logging.warning( 340 | f"the number of voxel in patch ({np.prod(pp[0])}) is smaller than the" 341 | f" last dimension ({data_shape[-1]}), this makes an ill-conditioned" 342 | "matrix for SVD.", 343 | stacklevel=2, 344 | ) 345 | return tuple(pp) 346 | -------------------------------------------------------------------------------- /src/patch_denoise/space_time/lowrank.py: -------------------------------------------------------------------------------- 1 | """Low Rank methods.""" 2 | 3 | from types import MappingProxyType 4 | 5 | import numpy as np 6 | from scipy.linalg import svd 7 | from scipy.optimize import minimize 8 | 9 | from .base import BaseSpaceTimeDenoiser, PatchedArray 10 | from .utils import ( 11 | eig_analysis, 12 | eig_synthesis, 13 | marchenko_pastur_median, 14 | svd_analysis, 15 | svd_synthesis, 16 | ) 17 | from .._docs import fill_doc 18 | 19 | NUMBA_AVAILABLE = True 20 | try: 21 | import numba as nb 22 | except ImportError: 23 | NUMBA_AVAILABLE = False 24 | pass 25 | 26 | 27 | @fill_doc 28 | class MPPCADenoiser(BaseSpaceTimeDenoiser): 29 | """Denoising using Marchenko-Pastur principal components analysis thresholding. 30 | 31 | Parameters 32 | ---------- 33 | $patch_config 34 | threshold_scale: float 35 | An extra factor multiplying the threshold. 36 | """ 37 | 38 | def __init__(self, patch_shape, patch_overlap, threshold_scale, **kwargs): 39 | super().__init__(patch_shape, patch_overlap, **kwargs) 40 | self.input_denoising_kwargs["threshold_scale"] = threshold_scale 41 | 42 | def _patch_processing(self, patch, patch_idx=None, threshold_scale=1.0): 43 | """Process a patch with the MP-PCA method.""" 44 | p_center, eig_vals, eig_vec, p_tmean = eig_analysis(patch) 45 | maxidx = 0 46 | meanvar = np.mean(eig_vals) 47 | meanvar *= 4 * np.sqrt((len(eig_vals) - maxidx + 1) / len(patch)) 48 | while maxidx < len(eig_vals) and meanvar < eig_vals[~maxidx] - eig_vals[0]: 49 | maxidx += 1 50 | meanvar = np.mean(eig_vals[:-maxidx]) 51 | meanvar *= 4 * np.sqrt((len(eig_vec) - maxidx + 1) / len(patch)) 52 | var_noise = np.mean(eig_vals[: len(eig_vals) - maxidx]) 53 | 54 | maxidx = np.sum(eig_vals > (var_noise * threshold_scale ** 2)) 55 | 56 | if maxidx == 0: 57 | patch_new = np.zeros_like(patch) + p_tmean 58 | else: 59 | patch_new = eig_synthesis(p_center, eig_vec, p_tmean, maxidx) 60 | 61 | # Equation (3) of Manjon 2013 62 | 63 | return patch_new, maxidx, var_noise 64 | 65 | 66 | @fill_doc 67 | class HybridPCADenoiser(BaseSpaceTimeDenoiser): 68 | """Denoising using the Hybrid-PCA thresholding method. 69 | 70 | Parameters 71 | ---------- 72 | $patch_config 73 | """ 74 | 75 | @fill_doc 76 | def denoise( 77 | self, input_data, mask=None, mask_threshold=50, noise_std=1.0, progbar=None 78 | ): 79 | """Denoise using the Hybrid-PCA method. 80 | 81 | Along with the input data a noise std map or value should be provided. 82 | 83 | Parameters 84 | ---------- 85 | $input_config 86 | $mask_config 87 | $noise_std 88 | 89 | Returns 90 | ------- 91 | $denoise_return 92 | """ 93 | p_s, p_o = self._get_patch_param(input_data.shape) 94 | if isinstance(noise_std, (float, np.floating)): 95 | var_apriori = noise_std ** 2 * np.ones(input_data.shape[:-1]) 96 | else: 97 | var_apriori = noise_std ** 2 98 | var_apriori = PatchedArray( 99 | np.broadcast_to(var_apriori[..., None], input_data.shape), p_s, p_o 100 | ) 101 | self.input_denoising_kwargs["var_apriori"] = var_apriori 102 | return super().denoise(input_data, mask, mask_threshold, progbar=progbar) 103 | 104 | def _patch_processing(self, patch, patch_idx=None, var_apriori=None): 105 | """Process a patch with the Hybrid-PCA method.""" 106 | varest = np.mean(var_apriori.get_patch(patch_idx)) 107 | p_center, eig_vals, eig_vec, p_tmean = eig_analysis(patch) 108 | maxidx = 0 109 | var_noise = np.mean(eig_vals) 110 | while var_noise > varest and maxidx < len(eig_vals) - 2: 111 | maxidx += 1 112 | var_noise = np.mean(eig_vals[:-maxidx]) 113 | if maxidx == 0: # all eigen values are noise 114 | patch_new = np.zeros_like(patch) + p_tmean 115 | else: 116 | patch_new = eig_synthesis(p_center, eig_vec, p_tmean, maxidx) 117 | # Equation (3) of Manjon2013 118 | 119 | return patch_new, maxidx, var_noise 120 | 121 | 122 | @fill_doc 123 | class RawSVDDenoiser(BaseSpaceTimeDenoiser): 124 | """ 125 | Classical Patch wise singular value thresholding denoiser. 126 | 127 | Parameters 128 | ---------- 129 | $patch_config 130 | threshold_vlue: float 131 | threshold value for the singular values. 132 | """ 133 | 134 | def __init__( 135 | self, patch_shape, patch_overlap, threshold_value=1.0, recombination="weighted" 136 | ): 137 | self._threshold_val = threshold_value 138 | 139 | super().__init__(patch_shape, patch_overlap, recombination) 140 | 141 | @fill_doc 142 | def denoise( 143 | self, 144 | input_data, 145 | mask=None, 146 | mask_threshold=50, 147 | threshold_scale=1.0, 148 | progbar=None, 149 | ): 150 | """Denoise the input_data, according to mask. 151 | 152 | Patches are extracted sequentially and process by the implemented 153 | `_patch_processing` function. 154 | Only patches which have at least a voxel in the mask ROI are processed. 155 | 156 | Parameters 157 | ---------- 158 | $input_config 159 | $mask_config 160 | threshold_scale: float 161 | Extra factor for the threshold of singular values. 162 | 163 | Returns 164 | ------- 165 | $denoise_return 166 | """ 167 | self._threshold = self._threshold_val * threshold_scale 168 | return super().denoise(input_data, mask, mask_threshold, progbar=progbar) 169 | 170 | def _patch_processing(self, patch, patch_idx=None, **kwargs): 171 | """Process a patch with the simple SVT method.""" 172 | # Centering for better precision in SVD 173 | u_vec, s_values, v_vec, p_tmean = svd_analysis(patch) 174 | 175 | maxidx = np.sum(s_values > self._threshold) 176 | if maxidx == 0: 177 | p_new = np.zeros_like(patch) + p_tmean 178 | else: 179 | s_values[s_values < self._threshold] = 0 180 | p_new = svd_synthesis(u_vec, s_values, v_vec, p_tmean, maxidx) 181 | 182 | # Equation (3) in Manjon 2013 183 | 184 | return p_new, maxidx, np.nan 185 | 186 | 187 | @fill_doc 188 | class NordicDenoiser(RawSVDDenoiser): 189 | """Denoising using the NORDIC method. 190 | 191 | Parameters 192 | ---------- 193 | $patch_config 194 | """ 195 | 196 | @fill_doc 197 | def denoise( 198 | self, 199 | input_data, 200 | mask=None, 201 | mask_threshold=50, 202 | noise_std=1.0, 203 | n_iter_threshold=10, 204 | progbar=None, 205 | ): 206 | """Denoise using the NORDIC method. 207 | 208 | Along with the input data a noise std map or value should be provided. 209 | 210 | Parameters 211 | ---------- 212 | $input_config 213 | $mask_config 214 | $noise_std 215 | 216 | Returns 217 | ------- 218 | $denoise_return 219 | 220 | """ 221 | patch_shape, _ = self._get_patch_param(input_data.shape) 222 | # compute the threshold using Monte-Carlo Simulations. 223 | max_sval = sum( 224 | max( 225 | svd( 226 | np.random.randn(np.prod(patch_shape), input_data.shape[-1]), 227 | compute_uv=False, 228 | ) 229 | ) 230 | for _ in range(n_iter_threshold) 231 | ) 232 | max_sval /= n_iter_threshold 233 | 234 | if isinstance(noise_std, np.ndarray): 235 | noise_std = np.mean(noise_std) 236 | if not isinstance(noise_std, (float, np.floating)): 237 | raise ValueError( 238 | "For NORDIC the noise level must be either an" 239 | + " array or a float specifying the std in the volume.", 240 | ) 241 | 242 | self._threshold = noise_std * max_sval 243 | 244 | return super(RawSVDDenoiser, self).denoise( 245 | input_data, mask, mask_threshold=mask_threshold, progbar=progbar 246 | ) 247 | 248 | 249 | # From MATLAB implementation 250 | def _opt_loss_x(y, beta): 251 | """Compute (8) of donoho2017.""" 252 | tmp = y ** 2 - beta - 1 253 | return np.sqrt(0.5 * (tmp + np.sqrt((tmp ** 2) - (4 * beta)))) * ( 254 | y >= (1 + np.sqrt(beta)) 255 | ) 256 | 257 | 258 | def _opt_ope_shrink(singvals, beta=1): 259 | """Perform optimal threshold of singular values for operator norm.""" 260 | return np.maximum(_opt_loss_x(singvals, beta), 0) 261 | 262 | 263 | def _opt_nuc_shrink(singvals, beta=1): 264 | """Perform optimal threshold of singular values for nuclear norm.""" 265 | tmp = _opt_loss_x(singvals, beta) 266 | return ( 267 | np.maximum( 268 | 0, 269 | (tmp ** 4 - (np.sqrt(beta) * tmp * singvals) - beta), 270 | ) 271 | / ((tmp ** 2) * singvals) 272 | ) 273 | 274 | 275 | def _opt_fro_shrink(singvals, beta=1): 276 | """Perform optimal threshold of singular values for frobenius norm.""" 277 | return np.sqrt( 278 | np.maximum( 279 | (((singvals ** 2) - beta - 1) ** 2 - 4 * beta), 280 | 0, 281 | ) 282 | / singvals 283 | ) 284 | 285 | 286 | @fill_doc 287 | class OptimalSVDDenoiser(BaseSpaceTimeDenoiser): 288 | """ 289 | Optimal Shrinkage of singular values for a specific norm. 290 | 291 | Parameters 292 | ---------- 293 | $patch_config 294 | loss: str 295 | The loss determines the choice of the optimal thresholding function 296 | associated to it. The losses `"fro"`, `"nuc"` and `"op"` are supported, 297 | for the frobenius, nuclear and operator norm, respectively. 298 | """ 299 | 300 | _OPT_LOSS_SHRINK = MappingProxyType( 301 | { 302 | "fro": _opt_fro_shrink, 303 | "nuc": _opt_nuc_shrink, 304 | "ope": _opt_ope_shrink, 305 | } 306 | ) 307 | 308 | def __init__( 309 | self, 310 | patch_shape, 311 | patch_overlap, 312 | loss="fro", 313 | recombination="weighted", 314 | ): 315 | super().__init__(patch_shape, patch_overlap, recombination=recombination) 316 | self.input_denoising_kwargs[ 317 | "shrink_func" 318 | ] = OptimalSVDDenoiser._OPT_LOSS_SHRINK[loss] 319 | 320 | @fill_doc 321 | def denoise( 322 | self, 323 | input_data, 324 | mask=None, 325 | mask_threshold=50, 326 | noise_std=None, 327 | eps_marshenko_pastur=1e-7, 328 | progbar=None, 329 | ): 330 | """ 331 | Optimal thresholing denoising method. 332 | 333 | Parameters 334 | ---------- 335 | $input_config 336 | $mask_config 337 | $noise_std 338 | loss: str 339 | The loss for which the optimal thresholding is performed. 340 | eps_marshenko_pastur: float 341 | The precision with which the optimal threshold is computed. 342 | 343 | Returns 344 | ------- 345 | $denoise_return 346 | 347 | Notes 348 | ----- 349 | Reimplementation of the original Matlab code [#]_ in python. 350 | 351 | References 352 | ---------- 353 | .. [#] Gavish, Matan, and David L. Donoho. \ 354 | "Optimal Shrinkage of Singular Values." 355 | IEEE Transactions on Information Theory 63, no. 4 (April 2017): 2137–52. 356 | https://doi.org/10.1109/TIT.2017.2653801. 357 | """ 358 | p_s, p_o = self._get_patch_param(input_data.shape) 359 | 360 | self.input_denoising_kwargs["mp_median"] = marchenko_pastur_median( 361 | beta=input_data.shape[-1] / np.prod(p_s), 362 | eps=eps_marshenko_pastur, 363 | ) 364 | 365 | if noise_std is None: 366 | self.input_denoising_kwargs["var_apriori"] = None 367 | else: 368 | if isinstance(noise_std, (float, np.floating)): 369 | var_apriori = noise_std ** 2 * np.ones(input_data.shape[:-1]) 370 | else: 371 | var_apriori = noise_std ** 2 372 | var_apriori = PatchedArray( 373 | np.broadcast_to(var_apriori[..., None], input_data.shape), p_s, p_o 374 | ) 375 | self.input_denoising_kwargs["var_apriori"] = var_apriori 376 | return super().denoise(input_data, mask, mask_threshold, progbar=progbar) 377 | 378 | def _patch_processing( 379 | self, 380 | patch, 381 | patch_idx=None, 382 | shrink_func=None, 383 | mp_median=None, 384 | var_apriori=None, 385 | ): 386 | u_vec, s_values, v_vec, p_tmean = svd_analysis(patch) 387 | if var_apriori is not None: 388 | sigma = np.mean(np.sqrt(var_apriori.get_patch(patch_idx))) 389 | else: 390 | sigma = np.median(s_values) / np.sqrt(patch.shape[1] * mp_median) 391 | 392 | scale_factor = np.sqrt(patch.shape[1]) * sigma 393 | thresh_s_values = scale_factor * shrink_func( 394 | s_values / scale_factor, 395 | beta=patch.shape[1] / patch.shape[0], 396 | ) 397 | thresh_s_values[np.isnan(thresh_s_values)] = 0 398 | 399 | if np.any(thresh_s_values): 400 | maxidx = np.max(np.nonzero(thresh_s_values)) + 1 401 | p_new = svd_synthesis(u_vec, thresh_s_values, v_vec, p_tmean, maxidx) 402 | else: 403 | maxidx = 0 404 | p_new = np.zeros_like(patch) + p_tmean 405 | 406 | return p_new, maxidx, np.nan 407 | 408 | 409 | def _sure_atn_cost(X, method, sing_vals, gamma, sigma=None, tau=None): 410 | """ 411 | Compute the SURE cost function. 412 | 413 | Parameters 414 | ---------- 415 | X: np.ndarray 416 | sing_vals : singular values of X 417 | gamma: float 418 | sigma: float 419 | tau: float 420 | """ 421 | n, p = np.shape(X) 422 | if method == "qut": 423 | gamma = np.exp(gamma) + 1 424 | else: 425 | tau = np.exp(tau) 426 | 427 | sing_vals2 = sing_vals ** 2 428 | n_vals = len(sing_vals) 429 | D = np.zeros((n_vals, n_vals), dtype=np.float32) 430 | dhat = sing_vals * np.maximum(1 - ((tau / sing_vals) ** gamma), 0) 431 | tmp = sing_vals * dhat 432 | for i in range(n_vals): 433 | diff2i = sing_vals2[i] - sing_vals2 434 | diff2i[i] = np.inf 435 | D[i, :] = tmp[i] / diff2i 436 | 437 | gradd = (1 + (gamma - 1) * (tau / sing_vals) ** gamma) * (sing_vals >= tau) 438 | div = np.sum(gradd + abs(n - p) * dhat / sing_vals) + 2 * np.sum(D) 439 | 440 | rss = np.sum((dhat - sing_vals) ** 2) 441 | if method == "gsure": 442 | return rss / (1 - div / n / p) ** 2 443 | return (sigma ** 2) * ((-n * p) + (2 * div)) + rss 444 | 445 | 446 | if NUMBA_AVAILABLE: 447 | s = nb.float32 448 | d = nb.float64 449 | sure_atn_cost = nb.njit( 450 | [ 451 | s(s[:, :], nb.types.unicode_type, s[:], s, s, s), 452 | s(s[:, :], nb.types.unicode_type, s[:], s, d, d), 453 | ], 454 | fastmath=True, 455 | )(_sure_atn_cost) 456 | 457 | 458 | def _atn_shrink(singvals, gamma, tau): 459 | """Adaptive trace norm shrinkage.""" 460 | return singvals * np.maximum(1 - (tau / singvals) ** gamma, 0) 461 | 462 | 463 | def _get_gamma_tau_qut(patch, sing_vals, stdest, gamma0, nbsim): 464 | """Estimate gamma and tau using the quantile method.""" 465 | maxd = np.ones(nbsim) 466 | for i in range(nbsim): 467 | maxd[i] = np.max( 468 | svd( 469 | np.random.randn(*patch.shape) * stdest, 470 | compute_uv=False, 471 | overwrite_a=True, 472 | ) 473 | ) 474 | # auto estimation of tau. 475 | tau = np.quantile(maxd, 1 - 1 / np.sqrt(np.log(max(*patch.shape)))) 476 | # single value for gamma not provided, estimating it. 477 | if not isinstance(gamma0, (float, np.floating)): 478 | 479 | def sure_gamma(gamma): 480 | return _sure_atn_cost( 481 | X=patch, 482 | method="qut", 483 | sing_vals=sing_vals, 484 | gamma=gamma, 485 | sigma=stdest, 486 | tau=tau, 487 | ) 488 | 489 | res_opti = minimize(sure_gamma, 0) 490 | gamma = np.exp(res_opti.x) + 1 491 | else: 492 | gamma = gamma0 493 | return gamma, tau 494 | 495 | 496 | def _get_gamma_tau(patch, sing_vals, stdest, method, gamma0, tau0): 497 | """Estimate gamma and tau.""" 498 | # estimation of tau 499 | def sure_tau(tau, *args): 500 | return _sure_atn_cost(*args, tau[0]) 501 | 502 | if tau0 is None: 503 | tau0 = np.log(np.median(sing_vals)) 504 | cost_glob = np.inf 505 | for g in gamma0: 506 | res_opti = minimize( 507 | lambda x: _sure_atn_cost( 508 | X=patch, 509 | method=method, 510 | gamma=g, # noqa: B023 511 | sing_vals=sing_vals, 512 | sigma=stdest, 513 | tau=x, 514 | ), 515 | tau0, 516 | ) 517 | # get cost value. 518 | cost = _sure_atn_cost( 519 | X=patch, 520 | method=method, 521 | gamma=g, 522 | sing_vals=sing_vals, 523 | sigma=stdest, 524 | tau=res_opti.x, 525 | ) 526 | if cost < cost_glob: 527 | gamma = g 528 | tau = np.exp(res_opti.x) 529 | cost_glob = cost 530 | return gamma, tau 531 | 532 | 533 | @fill_doc 534 | class AdaptiveDenoiser(BaseSpaceTimeDenoiser): 535 | """Adaptive Denoiser. 536 | 537 | Parameters 538 | ---------- 539 | $patch_config 540 | """ 541 | 542 | _SUPPORTED_METHOD = ["sure", "qut", "gsure"] 543 | 544 | def __init__( 545 | self, 546 | patch_shape, 547 | patch_overlap, 548 | method="SURE", 549 | recombination="weighted", 550 | nbsim=500, 551 | ): 552 | super().__init__(patch_shape, patch_overlap, recombination) 553 | if method.lower() not in self._SUPPORTED_METHOD: 554 | raise ValueError( 555 | f"Unsupported method: '{method}', use any of {self._SUPPORTED_METHOD}" 556 | ) 557 | self.input_denoising_kwargs["method"] = method.lower() 558 | self.input_denoising_kwargs["nbsim"] = nbsim 559 | 560 | @fill_doc 561 | def denoise( 562 | self, 563 | input_data, 564 | mask=None, 565 | mask_threshold=50, 566 | tau0=None, 567 | noise_std=None, 568 | gamma0=None, 569 | progbar=None, 570 | ): 571 | """ 572 | Adaptive denoiser. 573 | 574 | Perform the denoising using the adaptive trace norm estimator. [#]_ 575 | 576 | Parameters 577 | ---------- 578 | $input_config 579 | $mask_config 580 | $noise_std 581 | 582 | 583 | References 584 | ---------- 585 | .. [#] J. Josse and S. Sardy, “Adaptive Shrinkage of singular values.” 586 | arXiv, Nov. 22, 2014. 587 | doi: 10.48550/arXiv.1310.6602. 588 | 589 | 590 | """ 591 | self.input_denoising_kwargs["gamma0"] = gamma0 592 | self.input_denoising_kwargs["tau0"] = tau0 593 | 594 | p_s, p_o = self._get_patch_param(input_data.shape) 595 | if isinstance(noise_std, (float, np.floating)): 596 | var_apriori = noise_std ** 2 * np.ones(input_data.shape[:-1]) 597 | else: 598 | var_apriori = noise_std ** 2 599 | var_apriori = PatchedArray( 600 | np.broadcast_to(var_apriori[..., None], input_data.shape), p_s, p_o 601 | ) 602 | self.input_denoising_kwargs["var_apriori"] = var_apriori 603 | return super().denoise(input_data, mask, mask_threshold, progbar=progbar) 604 | 605 | def _patch_processing( 606 | self, 607 | patch, 608 | patch_idx=None, 609 | gamma0=None, 610 | tau0=None, 611 | var_apriori=None, 612 | method=None, 613 | nbsim=None, 614 | ): 615 | stdest = np.sqrt(np.mean(var_apriori.get_patch(patch_idx))) 616 | 617 | u_vec, sing_vals, v_vec, p_tmean = svd_analysis(patch) 618 | 619 | if method == "qut": 620 | gamma, tau = _get_gamma_tau_qut(patch, sing_vals, stdest, gamma0, nbsim) 621 | else: 622 | gamma, tau = _get_gamma_tau(patch, sing_vals, stdest, method, gamma0, tau0) 623 | # end of parameter selection 624 | # Perform thresholding 625 | 626 | thresh_s_values = _atn_shrink(sing_vals, gamma=gamma, tau=tau) 627 | if np.any(thresh_s_values): 628 | maxidx = np.max(np.nonzero(thresh_s_values)) + 1 629 | p_new = svd_synthesis(u_vec, thresh_s_values, v_vec, p_tmean, maxidx) 630 | else: 631 | maxidx = 0 632 | p_new = np.zeros_like(patch) + p_tmean 633 | 634 | return p_new, maxidx, np.nan 635 | -------------------------------------------------------------------------------- /src/patch_denoise/space_time/utils.py: -------------------------------------------------------------------------------- 1 | """Utilities for space-time denoising.""" 2 | 3 | import numpy as np 4 | from scipy.integrate import quad 5 | from scipy.linalg import eigh, svd 6 | 7 | 8 | def get_patch_locs(p_shape, p_ovl, v_shape): 9 | """ 10 | Get all the patch top-left corner locations. 11 | 12 | Parameters 13 | ---------- 14 | vol_shape : tuple 15 | The volume shape 16 | patch_shape : tuple 17 | The patch shape 18 | patch_overlap : tuple 19 | The overlap of patch for each dimension. 20 | 21 | Returns 22 | ------- 23 | numpy.ndarray 24 | All the patch top-left corner locations. 25 | 26 | Notes 27 | ----- 28 | This is a legacy function, you probably want to use the PatchedArray class. 29 | """ 30 | # Create an iterator for all the possible patches top-left corner location. 31 | if len(v_shape) != len(p_shape) or len(v_shape) != len(p_ovl): 32 | raise ValueError( 33 | f"Dimension mismatch between the arguments. {p_shape}{p_ovl}, {v_shape}" 34 | ) 35 | 36 | ranges = [] 37 | for v_s, p_s, p_o in zip(v_shape, p_shape, p_ovl): 38 | if p_o >= p_s: 39 | raise ValueError( 40 | "Overlap should be a non-negative integer smaller than patch_size", 41 | ) 42 | last_idx = v_s - p_s 43 | range_ = np.arange(0, last_idx, p_s - p_o, dtype=np.int32) 44 | if range_[-1] < last_idx: 45 | range_ = np.append(range_, last_idx) 46 | ranges.append(range_) 47 | # fast ND-Cartesian product from https://stackoverflow.com/a/11146645 48 | patch_locs = np.empty( 49 | [len(arr) for arr in ranges] + [len(p_shape)], 50 | dtype=np.int32, 51 | ) 52 | for idx, coords in enumerate(np.ix_(*ranges)): 53 | patch_locs[..., idx] = coords 54 | 55 | return patch_locs.reshape(-1, len(p_shape)) 56 | 57 | 58 | def svd_analysis(input_data): 59 | """Return the centered SVD decomposition. 60 | 61 | U, S, Vt and M are compute such that: 62 | ``X = U @ (S * Vt) + M.`` 63 | 64 | Parameters 65 | ---------- 66 | input_data : numpy.ndarray 67 | The patch 68 | 69 | Returns 70 | ------- 71 | u_vec, s_vals, v_vec, mean 72 | """ 73 | mean = np.mean(input_data, axis=0) 74 | data_centered = input_data - mean 75 | # TODO benchmark svd vs svds and order of data. 76 | u_vec, s_vals, v_vec = svd(data_centered, full_matrices=False) 77 | 78 | return u_vec, s_vals, v_vec, mean 79 | 80 | 81 | def svd_synthesis(u_vec, s_vals, v_vec, mean, idx): 82 | """ 83 | Reconstruct ``X = (U @ (S * V)) + M`` with only the max_idx greatest component. 84 | 85 | U, S, V must be sorted in decreasing order. 86 | 87 | Parameters 88 | ---------- 89 | u_vec : numpy.ndarray 90 | s_vals : numpy.ndarray 91 | v_vec : numpy.ndarray 92 | mean : numpy.ndarray 93 | idx : int 94 | 95 | Returns 96 | ------- 97 | np.ndarray: The reconstructed matrix. 98 | """ 99 | return (u_vec[:, :idx] @ (s_vals[:idx, None] * v_vec[:idx, :])) + mean 100 | 101 | 102 | def eig_analysis(input_data, max_eig_val=10): 103 | """ 104 | Return the eigen values and vectors of the autocorrelation of the patch. 105 | 106 | This method is faster than the svd, but the eigen values 107 | are in increasing order. 108 | 109 | Parameters 110 | ---------- 111 | input_data : np.ndarray 112 | A 2D Array 113 | max_eig_val : int, optional 114 | For faster results, only the ``max_eig_val`` biggest eigenvalues are 115 | computed. default = 10 116 | 117 | Returns 118 | ------- 119 | A : numpy.ndarray 120 | The centered patch A = X - M 121 | d : numpy.ndarray 122 | The eigenvalues of A^H A 123 | W : numpy.ndarray 124 | The eigenvector matrix of A^H A 125 | M : numpy.ndarray 126 | The mean of the patch along the time axis 127 | """ 128 | mean = np.mean(input_data, axis=0) 129 | data_centered = input_data - mean 130 | M, N = data_centered.shape 131 | eig_vals, eig_vec = eigh( 132 | data_centered.conj().T @ data_centered / (M - 1), 133 | driver="evx", 134 | subset_by_index=(len(mean) - max_eig_val, len(mean) - 1), 135 | ) 136 | 137 | return data_centered, eig_vals, eig_vec, mean 138 | 139 | 140 | def eig_synthesis(data_centered, eig_vec, mean, max_val): 141 | """Reconstruction the denoise patch with truncated eigen decomposition. 142 | 143 | This implements equations (1) and (2) of manjon2013 144 | """ 145 | eig_vec[:, :-max_val] = 0 146 | return ((data_centered @ eig_vec) @ eig_vec.conj().T) + mean 147 | 148 | 149 | def marchenko_pastur_median(beta, eps=1e-7): 150 | r"""Compute the median of the Marchenko-Pastur Distribution. 151 | 152 | Parameters 153 | ---------- 154 | beta: float 155 | aspect ratio of a matrix. 156 | eps: float 157 | Precision Parameter 158 | Return 159 | ------ 160 | float: the estimated median 161 | 162 | Notes 163 | ----- 164 | This method Solve :math:`F(x) = 1/2` by dichotomy with 165 | .. math :: 166 | 167 | F(x) = \int_{\beta_-}^{x} \frac{\sqrt{(\beta_+-t)(t-\beta_-)}}{2\pi\beta t} dt 168 | 169 | The integral is computed using scipy.integrate.quad 170 | """ 171 | if not (0 <= beta <= 1): 172 | raise ValueError("Aspect Ratio should be between 0 and 1") 173 | 174 | beta_p = (1 + np.sqrt(beta)) ** 2 175 | beta_m = (1 - np.sqrt(beta)) ** 2 176 | 177 | def mp_pdf(x): 178 | """Marchenko Pastur Probability density function.""" 179 | if beta_p >= x >= beta_m: 180 | return np.sqrt((beta_p - x) * (x - beta_m)) / (2 * np.pi * x * beta) 181 | else: 182 | return 0 183 | 184 | change = True 185 | hibnd = beta_p 186 | lobnd = beta_m 187 | # quad return (value, upperbound_error). 188 | # We only need the integral value 189 | 190 | n = 0 191 | while change and (hibnd - lobnd) > eps and n < 20: 192 | change = False 193 | midpoints = np.linspace(lobnd, hibnd, 5) 194 | int_estimates = np.array( 195 | list(map(lambda xx: quad(lambda x: mp_pdf(x), beta_m, xx)[0], midpoints)) 196 | ) 197 | if np.any(int_estimates < 0.5): 198 | lobnd = np.max(midpoints[int_estimates < 0.5]) 199 | change = True 200 | if np.any(int_estimates > 0.5): 201 | hibnd = np.min(midpoints[int_estimates > 0.5]) 202 | change = True 203 | n += 1 204 | return (lobnd + hibnd) / 2 205 | 206 | 207 | def estimate_noise(noise_sequence, block_size=1): 208 | """Estimate a noise map from a noise only sequence. 209 | 210 | The noise map is the standard deviation of the noise in each patch. 211 | 212 | Parameters 213 | ---------- 214 | noise_sequence : np.ndarray of shape (X, Y, Z, T) 215 | The noise-only data. 216 | block_size : int 217 | The size of the patch used to estimate the noise. 218 | 219 | Returns 220 | ------- 221 | np.ndarray of shape (X, Y, Z) 222 | The estimated noise map. 223 | """ 224 | volume_shape = noise_sequence.shape[:-1] 225 | noise_map = np.empty(volume_shape) 226 | patch_shape = (block_size,) * len(volume_shape) 227 | patch_overlap = (block_size - 1,) * len(volume_shape) 228 | 229 | for patch_tl in get_patch_locs(patch_shape, patch_overlap, volume_shape): 230 | # Get the index of voxels in the patch 231 | patch_slice = tuple( 232 | slice(ptl, ptl + ps) for ptl, ps in zip(patch_tl, patch_shape) 233 | ) 234 | # Identify the voxel in the center of the patch 235 | patch_center_img = tuple( 236 | slice(ptl + ps // 2, ptl + ps // 2 + 1) 237 | for ptl, ps in zip(patch_tl, patch_shape) 238 | ) 239 | # Set the value of the voxel in the center of the patch to the SD of 240 | # the patch 241 | noise_map[patch_center_img] = np.std(noise_sequence[patch_slice]) 242 | return noise_map 243 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paquiteau/patch-denoising/8d8bd61cb2181488f0c7cdb08daf268e46fb160f/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | 4 | from patch_denoise.simulation.activations import add_frames 5 | from patch_denoise.simulation.noise import add_temporal_gaussian_noise 6 | from patch_denoise.simulation.phantom import g_factor_map, mr_shepp_logan_t2_star 7 | 8 | 9 | @pytest.fixture(scope="session") 10 | def rng(): 11 | return np.random.RandomState(42) 12 | 13 | 14 | @pytest.fixture(scope="session") 15 | def phantom(N_rep=20): 16 | """Create a dummy phantom with fake activations.""" 17 | return add_frames(mr_shepp_logan_t2_star(64)[32], N_rep) 18 | 19 | 20 | @pytest.fixture(scope="session") 21 | def noisy_phantom(phantom, rng): 22 | """Create noisy version of phantom.""" 23 | g_map = g_factor_map(phantom.shape[:-1]) 24 | return add_temporal_gaussian_noise( 25 | phantom, 26 | sigma=1, 27 | rng=rng, 28 | g_factor_map=g_map, 29 | ) 30 | -------------------------------------------------------------------------------- /tests/test_bindings.py: -------------------------------------------------------------------------------- 1 | """Test for the binding module.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | import numpy.testing as npt 7 | import pytest 8 | 9 | MODOPT_AVAILABLE = True 10 | NIPYPE_AVAILABLE = True 11 | try: 12 | import modopt 13 | except ImportError as e: 14 | MODOPT_AVAILABLE = False 15 | try: 16 | import nipype 17 | import nibabel as nib 18 | except ImportError as e: 19 | NIPYPE_AVAILABLE = False 20 | 21 | from patch_denoise.bindings.modopt import LLRDenoiserOperator 22 | from patch_denoise.bindings.nipype import PatchDenoise 23 | from patch_denoise.bindings.utils import DenoiseParameters 24 | from patch_denoise.denoise import mp_pca 25 | 26 | 27 | @pytest.fixture(scope="module") 28 | def denoised_ref(noisy_phantom): 29 | return mp_pca( 30 | noisy_phantom, 31 | patch_shape=6, 32 | patch_overlap=5, 33 | threshold_scale=2.3, 34 | recombination="weighted", 35 | )[0] 36 | 37 | 38 | @pytest.fixture 39 | def nifti_noisy_phantom(noisy_phantom, tmpdir_factory): 40 | tempdir = tmpdir_factory.mktemp("test") 41 | tempdir.chdir() 42 | nii_img = nib.Nifti1Image(noisy_phantom, affine=np.eye(4)) 43 | nib.nifti1.save(nii_img, "noisy_phantom.nii") 44 | return os.path.abspath("noisy_phantom.nii") 45 | 46 | 47 | def test_modopt(noisy_phantom, denoised_ref): 48 | """test the Modopt Operator.""" 49 | operator = LLRDenoiserOperator( 50 | "mp-pca", 51 | patch_shape=6, 52 | patch_overlap=5, 53 | threshold_scale=2.3, 54 | recombination="weighted", 55 | ) 56 | 57 | denoised_modopt = operator.op(noisy_phantom) 58 | 59 | npt.assert_allclose(denoised_modopt, denoised_ref) 60 | 61 | 62 | def test_entrypoint(): 63 | """Test entrypoint of patch-denoise function.""" 64 | exit_status = os.system("patch-denoise --help") 65 | assert exit_status == 0 66 | 67 | 68 | def test_cli(nifti_noisy_phantom, tmpdir_factory, denoised_ref): 69 | tempdir = tmpdir_factory.mktemp("test") 70 | tempdir.chdir() 71 | outfile = "out.nii" 72 | print(nifti_noisy_phantom, tempdir) 73 | exit_status = os.system( 74 | f"patch-denoise {nifti_noisy_phantom} {outfile} --conf mp-pca_6_5_weighted --extra threshold_scale=2.3" 75 | ) 76 | assert exit_status == 0 77 | npt.assert_allclose( 78 | nib.load(outfile).get_fdata(dtype=np.float32), 79 | denoised_ref, 80 | rtol=1e-6, 81 | atol=1e-2, 82 | ) 83 | 84 | 85 | def test_denoise_param(): 86 | """Test the Denoise parameter structure.""" 87 | d = DenoiseParameters("optimal-fro", 11, 10, "weighted", 10) 88 | d2 = DenoiseParameters.from_str(str(d)) 89 | assert d2 == d 90 | 91 | 92 | def test_nipype_mag(nifti_noisy_phantom, denoised_ref): 93 | """Test the Nipye Interfaces.""" 94 | 95 | interface = PatchDenoise() 96 | interface.inputs.in_mag = nifti_noisy_phantom 97 | interface.inputs.denoise_str = "mp-pca_6_5_weighted" 98 | interface.inputs.extra_kwargs = {"threshold_scale": 2.3} 99 | 100 | output_file = interface.run().outputs.denoised_file 101 | 102 | output_data = nib.load(output_file).get_fdata(dtype=np.float32) 103 | npt.assert_allclose(output_data, denoised_ref, rtol=1e-2) 104 | 105 | 106 | def test_nipype_cpx(nifti_noisy_phantom): 107 | """Test the Nipye Interfaces.""" 108 | 109 | interface = PatchDenoise() 110 | interface.inputs.in_real = nifti_noisy_phantom 111 | interface.inputs.in_imag = nifti_noisy_phantom 112 | interface.inputs.denoise_str = "mp-pca_6_5_weighted" 113 | interface.inputs.extra_kwargs = {"threshold_scale": 2.3} 114 | 115 | output_file = interface.run().outputs.denoised_file 116 | 117 | 118 | def test_denoise_paramter_pretty_par(): 119 | pretty_par = DenoiseParameters("optimal-fro", 11, 10, "weighted", 10).pretty_par 120 | 121 | assert pretty_par == "11_10w" 122 | 123 | 124 | def test_denoise_parameter_pretty(): 125 | """Test the pretty_name.""" 126 | 127 | pretty_string = "optimal-fro_11_10_weighted_10" 128 | pretty_name = DenoiseParameters.from_str(pretty_string).pretty_name 129 | 130 | assert pretty_name == pretty_string 131 | -------------------------------------------------------------------------------- /tests/test_denoiser.py: -------------------------------------------------------------------------------- 1 | """Test for the different denoising methods.""" 2 | import numpy as np 3 | import numpy.testing as npt 4 | import pytest 5 | 6 | from patch_denoise.denoise import ( 7 | adaptive_thresholding, 8 | hybrid_pca, 9 | mp_pca, 10 | nordic, 11 | optimal_thresholding, 12 | raw_svt, 13 | ) 14 | 15 | from patch_denoise.simulation.phantom import g_factor_map 16 | 17 | 18 | @pytest.mark.parametrize("recombination", ["weighted", "average", "center"]) 19 | def test_mppca_denoiser(phantom, noisy_phantom, recombination): 20 | """Test the MP-PCA denoiser.""" 21 | denoised, weights, noise, rank_map = mp_pca( 22 | noisy_phantom, 23 | patch_shape=6, 24 | patch_overlap=5, 25 | threshold_scale=2.3, 26 | recombination=recombination, 27 | ) 28 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 29 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 30 | assert noise_std_after < noise_std_before 31 | 32 | 33 | @pytest.mark.parametrize("recombination", ["weighted", "average", "center"]) 34 | def test_hybridpca_denoiser(phantom, noisy_phantom, recombination): 35 | """Test the Hybrid-PCA denoiser.""" 36 | denoised, weights, noise, rank_map = hybrid_pca( 37 | noisy_phantom, 38 | patch_shape=6, 39 | patch_overlap=5, 40 | noise_std=1.0, 41 | recombination=recombination, 42 | ) 43 | 44 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 45 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 46 | assert noise_std_after < noise_std_before 47 | 48 | 49 | @pytest.mark.parametrize("recombination", ["weighted", "average", "center"]) 50 | def test_nordic_denoiser(phantom, noisy_phantom, recombination): 51 | """Test the Hybrid-PCA denoiser.""" 52 | denoised, weights, noise, rank_map = nordic( 53 | noisy_phantom, 54 | patch_shape=6, 55 | patch_overlap=5, 56 | noise_std=1.0, 57 | recombination=recombination, 58 | ) 59 | 60 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 61 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 62 | assert noise_std_after < noise_std_before 63 | 64 | 65 | @pytest.mark.parametrize("recombination", ["weighted", "average", "center"]) 66 | def test_rawsvt_denoiser(phantom, noisy_phantom, recombination): 67 | """Test the Hybrid-PCA denoiser.""" 68 | denoised, weights, noise, rank_map = raw_svt( 69 | noisy_phantom, 70 | patch_shape=6, 71 | patch_overlap=5, 72 | threshold=10, 73 | recombination=recombination, 74 | ) 75 | 76 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 77 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 78 | assert noise_std_after < noise_std_before 79 | 80 | 81 | @pytest.mark.parametrize("recombination", ["weighted", "average", "center"]) 82 | @pytest.mark.parametrize("loss", ["fro", "nuc", "ope"]) 83 | def test_optimal_denoiser(phantom, noisy_phantom, recombination, loss): 84 | """Test the Optimal Thresholding denoiser.""" 85 | denoised, weights, noise, rank_map = optimal_thresholding( 86 | noisy_phantom, 87 | patch_shape=6, 88 | patch_overlap=5, 89 | recombination=recombination, 90 | loss=loss, 91 | ) 92 | 93 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 94 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 95 | assert noise_std_after < noise_std_before 96 | 97 | 98 | @pytest.mark.parametrize("recombination", ["weighted", "average", "center"]) 99 | @pytest.mark.parametrize("loss", ["fro", "nuc", "ope"]) 100 | def test_optimal_denoiser2(phantom, noisy_phantom, recombination, loss): 101 | """Test the Optimal Thresholding denoiser with noise apriori provided.""" 102 | denoised, weights, noise, rank_map = optimal_thresholding( 103 | noisy_phantom, 104 | patch_shape=10, 105 | patch_overlap=9, 106 | noise_std=1.414 * g_factor_map(phantom.shape[:-1]), 107 | recombination=recombination, 108 | loss=loss, 109 | ) 110 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 111 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 112 | assert noise_std_after < noise_std_before 113 | 114 | 115 | # center is not tested, it takes too much time. 116 | @pytest.mark.parametrize("recombination", ["weighted", "average"]) 117 | @pytest.mark.parametrize( 118 | "method, gamma", 119 | [("qut", None), ("gsure", np.linspace(1, 5, 10)), ("sure", np.linspace(1, 5, 10))], 120 | ) 121 | def test_adaptive_denoiser(phantom, noisy_phantom, recombination, method, gamma): 122 | """Test the Adaptive Thresholding denoiser.""" 123 | denoised, weights, noise, rank_map = adaptive_thresholding( 124 | noisy_phantom, 125 | patch_shape=10, 126 | patch_overlap=0, 127 | recombination=recombination, 128 | method=method, 129 | noise_std=2 * g_factor_map(phantom.shape[:-1]), 130 | gamma0=gamma, 131 | nbsim=500, 132 | ) 133 | 134 | noise_std_before = np.sqrt(np.nanmean(np.nanvar(noisy_phantom - phantom, axis=-1))) 135 | noise_std_after = np.sqrt(np.nanmean(np.nanvar(denoised - phantom, axis=-1))) 136 | assert noise_std_after < noise_std_before 137 | 138 | 139 | def test_raise_nordic(phantom, noisy_phantom): 140 | """Test raise error for nordic.""" 141 | npt.assert_raises( 142 | ValueError, 143 | nordic, 144 | noisy_phantom, 145 | patch_shape=6, 146 | patch_overlap=5, 147 | noise_std="not_a_float_or_array", 148 | ) 149 | -------------------------------------------------------------------------------- /tests/test_spacetime_utils.py: -------------------------------------------------------------------------------- 1 | """Test space-times utilities.""" 2 | from itertools import product 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | from patch_denoise.space_time.utils import ( 8 | eig_analysis, 9 | eig_synthesis, 10 | estimate_noise, 11 | marchenko_pastur_median, 12 | svd_analysis, 13 | svd_synthesis, 14 | ) 15 | 16 | parametrize_random_matrix = pytest.mark.parametrize( 17 | "matrix", 18 | [ 19 | {"cplx": a, "shape": b, "sigma": c} 20 | for a, b, c in product([False], [(100, 50), (100, 10)], [0.1, 1, 10]) 21 | ], 22 | indirect=True, 23 | ) 24 | 25 | 26 | @pytest.fixture(scope="function") 27 | def medium_random_matrix(rng): 28 | """Create random 3D array. with size (200, 200, 100).""" 29 | shape = (200, 200, 100) 30 | return rng.randn(*shape) 31 | 32 | 33 | @pytest.fixture() 34 | def matrix(request): 35 | """Create random matrix on command with shape and noise level.""" 36 | rng = np.random.RandomState(42) 37 | sigma = request.param["sigma"] 38 | M, N = request.param["shape"] 39 | if not request.param["cplx"]: 40 | return rng.randn(M, N) * sigma 41 | return rng.randn(M, N) * sigma + 1j * rng.randn(M, N) * sigma 42 | 43 | 44 | @pytest.mark.parametrize("beta", np.arange(1, 10) * 0.1) 45 | def test_marshenko_pastur_median(beta, rng, n_runs=10000, n_samples=1000): 46 | """Test the median estimation of Marshenko Pastur law.""" 47 | print(beta) 48 | beta_p = (1 + np.sqrt(beta)) ** 2 49 | beta_m = (1 - np.sqrt(beta)) ** 2 50 | 51 | def f(x): 52 | """Marchenko Pastur Probability density function.""" 53 | if beta_p >= x >= beta_m: 54 | return np.sqrt((beta_p - x) * (x - beta_m)) / (2 * np.pi * x * beta) 55 | else: 56 | return 0 57 | 58 | integral_median = marchenko_pastur_median(beta, eps=1e-7) 59 | 60 | vals = np.linspace(beta_m, beta_p, n_samples) 61 | proba = np.array(list(map(f, vals))) 62 | proba /= np.sum(proba) 63 | samples = np.zeros(n_runs) 64 | for i in range(n_runs): 65 | samples[i] = np.median(rng.choice(vals, size=n_runs, p=proba)) 66 | # montecarlo_median = np.mean(samples) 67 | 68 | # TODO: increase precision of montecarlo simulation 69 | assert np.std(samples) <= 0.1 * integral_median 70 | 71 | 72 | @pytest.mark.parametrize("block_dim", range(5, 10)) 73 | def test_noise_estimation(block_dim): 74 | """Test noise estimation. 75 | 76 | The mean patch-wise standard deviation should be close to the overall 77 | standard deviation. 78 | """ 79 | for seed in range(15): 80 | print(f"Seed: {seed}") 81 | rng = np.random.RandomState(seed) 82 | medium_random_matrix = rng.randn(200, 200, 100) 83 | print(f"Mean of raw: {np.nanmean(medium_random_matrix)}") 84 | print(f"Max of raw: {np.nanmax(medium_random_matrix)}") 85 | print(f"Min of raw: {np.nanmin(medium_random_matrix)}") 86 | real_std = np.nanstd(medium_random_matrix) 87 | print(f"SD of raw: {real_std}") 88 | 89 | noise_map = estimate_noise(medium_random_matrix, block_dim) 90 | print(f"Mean of noise map: {np.nanmean(noise_map)}") 91 | print(f"Max of noise map: {np.nanmax(noise_map)}") 92 | print(f"Min of noise map: {np.nanmin(noise_map)}") 93 | print(f"SD of noise map: {np.nanstd(noise_map)}") 94 | err = np.nanmean(noise_map - real_std) 95 | print(f"Err: {err}") 96 | assert err <= 0.1 * real_std 97 | 98 | 99 | @parametrize_random_matrix 100 | def test_svd(matrix): 101 | """Test SVD functions.""" 102 | U, S, V, M = svd_analysis(matrix) 103 | new_matrix = svd_synthesis(U, S, V, M, idx=len(S)) 104 | 105 | # TODO Refine the precision criteria 106 | assert np.sqrt(np.mean(np.square(matrix - new_matrix))) <= np.std(matrix) / 10 107 | 108 | 109 | @parametrize_random_matrix 110 | def test_eig(matrix): 111 | """Test SVD via eigenvalue decomposition.""" 112 | A, d, W, M = eig_analysis(matrix) 113 | new_matrix = eig_synthesis(A, W, M, max_val=len(M)) 114 | # TODO Refine the precision criteria 115 | assert np.sqrt(np.mean(np.square(matrix - new_matrix))) <= np.std(matrix) 116 | --------------------------------------------------------------------------------