├── .git_archival.txt ├── .gitattributes ├── .github └── workflows │ ├── apptainer.yml │ ├── codeql.yml │ ├── python-publish.yml │ ├── source-publish.yml │ └── unit-tests.yml ├── .gitignore ├── .pep8speaks.yml ├── .readthedocs.yml ├── LICENSE ├── MANIFEST.in ├── README.rst ├── apptainer └── tike.def ├── docs ├── CONTRIBUTING.rst ├── INSTALL.rst ├── Makefile ├── make.bat ├── pull_request_template.md ├── requirements.txt └── source │ ├── api │ ├── align.rst │ ├── align.solvers.rst │ ├── api-class-diagram.mmd │ ├── api-class-diagram.svg │ ├── cluster.rst │ ├── communicators.rst │ ├── constants.rst │ ├── index.rst │ ├── lamino.rst │ ├── lamino.solvers.rst │ ├── linalg.rst │ ├── operators.rst │ ├── opt.rst │ ├── precision.rst │ ├── ptycho.exitwave.rst │ ├── ptycho.fresnel.rst │ ├── ptycho.io.rst │ ├── ptycho.learn.rst │ ├── ptycho.object.rst │ ├── ptycho.position.rst │ ├── ptycho.probe.rst │ ├── ptycho.rst │ ├── ptycho.solvers.rst │ ├── random.rst │ ├── scan.rst │ ├── trajectory.rst │ └── view.rst │ ├── bibtex │ └── zrefs.bib │ ├── conf.py │ ├── contributing.rst │ ├── examples.rst │ ├── examples │ ├── admm.ipynb │ ├── align.ipynb │ ├── ptycho.ipynb │ ├── scan.ipynb │ └── tomo.ipynb │ ├── index.rst │ ├── install.rst │ └── license.rst ├── profile ├── profile_admm.py ├── profile_lamino.py ├── profile_ptycho.py ├── profile_tomo.py └── requirements.txt ├── pyproject.toml ├── requirements-container.txt ├── requirements.txt ├── setup.cfg ├── src ├── .clang-format ├── broken │ ├── communicator.py │ ├── operators │ │ └── tomo.py │ ├── ptycho │ │ └── solvers │ │ │ └── admm.py │ ├── reg.py │ ├── tike.py │ └── tomo │ │ ├── __init__.py │ │ ├── solvers.py │ │ └── tomo.py └── tike │ ├── __init__.py │ ├── align │ ├── __init__.py │ ├── align.py │ └── solvers │ │ ├── __init__.py │ │ ├── cross_correlation.py │ │ └── farneback.py │ ├── cluster.py │ ├── communicators │ ├── __init__.py │ ├── comm.py │ ├── mpi.py │ ├── pool.py │ └── stream.py │ ├── constants.py │ ├── lamino │ ├── __init__.py │ ├── bucket.py │ ├── lamino.py │ └── solvers │ │ ├── __init__.py │ │ ├── bucket.py │ │ └── cgrad.py │ ├── linalg.py │ ├── operators │ ├── __init__.py │ └── cupy │ │ ├── __init__.py │ │ ├── alignment.py │ │ ├── bucket.cu │ │ ├── bucket.py │ │ ├── cache.py │ │ ├── convolution.cu │ │ ├── convolution.py │ │ ├── flow.py │ │ ├── fresnelspectprop.py │ │ ├── grid.cu │ │ ├── interp.cu │ │ ├── lamino.py │ │ ├── multislice.py │ │ ├── objective.py │ │ ├── operator.py │ │ ├── pad.py │ │ ├── patch.py │ │ ├── propagation.py │ │ ├── ptycho.py │ │ ├── rotate.py │ │ ├── shift.py │ │ ├── usfft.cu │ │ └── usfft.py │ ├── opt.py │ ├── precision.py │ ├── ptycho │ ├── __init__.py │ ├── exitwave.py │ ├── fresnel.py │ ├── io.py │ ├── learn.py │ ├── object.py │ ├── position.py │ ├── probe.py │ ├── ptycho.py │ └── solvers │ │ ├── __init__.py │ │ ├── _preconditioner.py │ │ ├── lstsq.py │ │ ├── options.py │ │ └── rpie.py │ ├── random.py │ ├── scan.py │ ├── trajectory.py │ └── view.py └── tests ├── .coveragerc ├── communicators ├── test_comm.py ├── test_communicator.py ├── test_mpi.py ├── test_pool.py └── test_streams.py ├── compare_gradients.py ├── data ├── algin_setup.pickle.lzma ├── beta-chip-128.tiff ├── delta-chip-128.tiff ├── images.bib ├── lamino_bucket.pickle.lzma ├── lamino_cgrad.pickle.lzma ├── lamino_setup.pickle.lzma ├── nalm256.pickle.lzma ├── position-error-247.pickle.bz2 ├── ptycho_gaussian.pickle.lzma ├── ptycho_setup.pickle.lzma ├── siemens-star-small.npz.bz2 ├── singers.npz.bz2 ├── tomo_grad.pickle.lzma └── tomo_setup.pickle.lzma ├── operators ├── __init__.py ├── test_alignment.py ├── test_checkerboard.py ├── test_convolution.py ├── test_flow.py ├── test_lamino.py ├── test_multislice.py ├── test_pad.py ├── test_patch.py ├── test_propagation.py ├── test_ptycho.py ├── test_rotate.py ├── test_shift.py ├── test_sum.py ├── test_usfft.py └── util.py ├── print-gpu-info.py ├── ptycho ├── __init__.py ├── hermite.mat ├── io.py ├── ortho-in.mat ├── ortho-out.mat ├── templates.py ├── test_multigrid.py ├── test_online.py ├── test_position.py ├── test_probe.py └── test_ptycho.py ├── test_align.py ├── test_lamino.py ├── test_linalg.py ├── test_opt.py ├── test_random.py ├── test_tomo.py ├── test_trajectory.py ├── test_view.py └── theta_coverage.npy /.git_archival.txt: -------------------------------------------------------------------------------- 1 | node: 8e1f93724921be1a443edccc669b4c88ae4a9b6a 2 | node-date: 2024-08-22T15:21:41-05:00 3 | describe-name: v0.26.0-67-g8e1f9372 4 | ref-names: HEAD -> main 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .git_archival.txt export-subst 2 | -------------------------------------------------------------------------------- /.github/workflows/apptainer.yml: -------------------------------------------------------------------------------- 1 | # This workflow builds an apptainer with tike installed 2 | 3 | name: Publish Apptainer 4 | 5 | on: 6 | workflow_dispatch: 7 | release: 8 | types: [published] 9 | push: 10 | branches: [main] 11 | 12 | permissions: 13 | contents: read 14 | packages: write 15 | 16 | jobs: 17 | 18 | publish-apptainer-to-ghcr: 19 | runs-on: ubuntu-latest 20 | strategy: 21 | matrix: 22 | cuda-version: 23 | - "11.8" 24 | - "12.0" 25 | target-arch: 26 | - "x86_64" 27 | - "aarch64" 28 | steps: 29 | - uses: actions/checkout@v4 30 | - name: Set up QEMU 31 | uses: docker/setup-qemu-action@v3 32 | with: 33 | platforms: arm64 34 | - uses: eWaterCycle/setup-apptainer@v2 35 | with: 36 | apptainer-version: 1.3.0 37 | - name: Build container from definition 38 | run: > 39 | apptainer build 40 | --build-arg cuda_version=${{ matrix.cuda-version }} 41 | --build-arg target_arch=${{ matrix.target-arch }} 42 | --build-arg pkg_version=${{ github.ref_name }} 43 | apptainer.sif 44 | apptainer/${{ github.event.repository.name }}.def 45 | - name: Upload to container registry 46 | run: | 47 | echo ${{ secrets.GITHUB_TOKEN }} | apptainer registry login -u ${{ github.actor }} --password-stdin oras://ghcr.io 48 | apptainer push apptainer.sif oras://ghcr.io/${GITHUB_REPOSITORY,,}:${{ github.ref_name }}-${{ matrix.target-arch }}-cuda${{ matrix.cuda-version }} 49 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ "main" ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ "main" ] 20 | schedule: 21 | - cron: '44 3 * * 2' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v3 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v2 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | 52 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 53 | # queries: security-extended,security-and-quality 54 | 55 | 56 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). 57 | # If this step fails, then you should remove it and run the build manually (see below) 58 | - name: Autobuild 59 | uses: github/codeql-action/autobuild@v2 60 | 61 | # ℹ️ Command-line programs to run using the OS shell. 62 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 63 | 64 | # If the Autobuild fails above, remove it and uncomment the following three lines. 65 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 66 | 67 | # - run: | 68 | # echo "Run, Build Application using script" 69 | # ./location_of_script_within_repo/buildscript.sh 70 | 71 | - name: Perform CodeQL Analysis 72 | uses: github/codeql-action/analyze@v2 73 | with: 74 | category: "/language:${{matrix.language}}" 75 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /.github/workflows/source-publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload a source archive and hash at every tagged release 2 | 3 | on: 4 | push: 5 | tags: 6 | # Use pattern matching to only run on version release tags 7 | - "v[0-9]+.[0-9]+.[0-9]+" 8 | 9 | # Allows you to run this workflow manually from the Actions tab 10 | workflow_dispatch: 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: carterbox/release-archive-with-hash@v1 17 | with: 18 | token: ${{ secrets.SOURCE_REL_TOKEN }} 19 | -------------------------------------------------------------------------------- /.github/workflows/unit-tests.yml: -------------------------------------------------------------------------------- 1 | name: Run unit tests 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - "tests/**" 10 | - "src/tike/**" 11 | - pyproject.toml 12 | - .github/workflows/unit-tests.yml 13 | pull_request: 14 | branches: 15 | - main 16 | paths: 17 | - "tests/**" 18 | - "src/tike/**" 19 | - pyproject.toml 20 | - .github/workflows/unit-tests.yml 21 | 22 | jobs: 23 | linux-x86-no-mpi: 24 | runs-on: self-hosted 25 | continue-on-error: true 26 | 27 | strategy: 28 | matrix: 29 | python-version: 30 | - "3.10" 31 | - "3.11" 32 | - "3.9" 33 | 34 | steps: 35 | - uses: actions/checkout@v3 36 | 37 | - run: | 38 | echo ${CUDA_VISIBLE_DEVICES} 39 | echo "CUDA_VERSION=$(nvidia-smi -q | grep "CUDA Version" | sed 's/CUDA Version\s*: //' | sed 's/\.[0-9]*//')" >> "$GITHUB_ENV" 40 | 41 | - run: | 42 | conda config --remove channels defaults || true 43 | conda config --add channels conda-forge 44 | conda config --show channels 45 | name: Configure Conda to only use conda-forge 46 | 47 | - run: > 48 | conda create --quiet --yes --force 49 | -n tike 50 | --channel conda-forge 51 | --file requirements.txt 52 | pytest 53 | python=${{ matrix.python-version }} 54 | cuda-version=$CUDA_VERSION 55 | name: Create build environment 56 | 57 | - run: conda remove -n tike mpi4py --yes --quiet || true 58 | name: Remove MPI from test environment 59 | 60 | - run: conda list -n tike 61 | name: List build environment 62 | 63 | - run: | 64 | source activate tike 65 | pip install . --no-deps --no-build-isolation 66 | name: Setup and install 67 | 68 | - run: | 69 | source activate tike 70 | python tests/print-gpu-info.py 71 | name: Print GPU info 72 | 73 | - run: | 74 | source activate tike 75 | export TIKE_TEST_CI 76 | pytest -vs tests 77 | name: Run tests 78 | 79 | - run: | 80 | cd tests/result 81 | zip -r9 ../../result.zip . 82 | name: Zip results 83 | 84 | - uses: actions/upload-artifact@v3 85 | if: ${{ matrix.python-version == '3.9' }} 86 | with: 87 | path: result.zip 88 | name: Without MPI results 89 | 90 | - run: conda remove -n tike --all 91 | name: Clean up environment 92 | 93 | linux-x86-with-mpi: 94 | runs-on: self-hosted 95 | continue-on-error: true 96 | 97 | strategy: 98 | matrix: 99 | python-version: 100 | - "3.8" 101 | 102 | steps: 103 | - uses: actions/checkout@v3 104 | 105 | - run: | 106 | echo ${CUDA_VISIBLE_DEVICES} 107 | echo "CUDA_VERSION=$(nvidia-smi -q | grep "CUDA Version" | sed 's/CUDA Version\s*: //' | sed 's/\.[0-9]*//')" >> "$GITHUB_ENV" 108 | 109 | - run: | 110 | conda config --remove channels defaults || true 111 | conda config --add channels conda-forge 112 | conda config --show channels 113 | name: Configure Conda to only use conda-forge 114 | 115 | - run: > 116 | conda create --quiet --yes --force 117 | -n tike 118 | --channel conda-forge 119 | --file requirements.txt 120 | pytest 121 | python=${{ matrix.python-version }} 122 | 'openmpi=*=h*' 123 | mpi4py 124 | cuda-version=$CUDA_VERSION 125 | name: Create build environment 126 | 127 | - run: conda list -n tike 128 | name: List build environment 129 | 130 | - run: | 131 | source activate tike 132 | pip install . --no-deps --no-build-isolation 133 | name: Setup and install 134 | 135 | - run: | 136 | source activate tike 137 | python tests/print-gpu-info.py 138 | name: Print GPU info 139 | 140 | - run: | 141 | source activate tike 142 | export OMPI_MCA_opal_cuda_support=true 143 | export TIKE_TEST_CI 144 | mpiexec -n 2 python -m pytest -vs tests 145 | name: Run tests with MPI 146 | 147 | - run: | 148 | cd tests/result 149 | zip -r9 ../../result.zip . 150 | name: Zip results 151 | 152 | - uses: actions/upload-artifact@v3 153 | with: 154 | path: result.zip 155 | name: MPI results 156 | 157 | - run: conda remove -n tike --all 158 | name: Clean up environment 159 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /tests/result/ 2 | archive/ 3 | 4 | # apptainer images 5 | *.sif 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | *.o 15 | 16 | # Distribution / packaging 17 | config/Mk.config 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *.cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # celery beat schedule file 91 | celerybeat-schedule 92 | 93 | # SageMath parsed files 94 | *.sage.py 95 | 96 | # Environments 97 | .env 98 | .venv 99 | env/ 100 | venv/ 101 | ENV/ 102 | env.bak/ 103 | venv.bak/ 104 | 105 | # Spyder project settings 106 | .spyderproject 107 | .spyproject 108 | 109 | # Rope project settings 110 | .ropeproject 111 | 112 | # mkdocs documentation 113 | /site 114 | 115 | # mypy 116 | .mypy_cache/ 117 | .dmypy.json 118 | dmypy.json 119 | 120 | # OS generated files 121 | core 122 | .DS_Store 123 | .nfs* 124 | 125 | # MPI host files 126 | host_list 127 | 128 | # vscode setting 129 | .vscode/ 130 | -------------------------------------------------------------------------------- /.pep8speaks.yml: -------------------------------------------------------------------------------- 1 | # File : .pep8speaks.yml 2 | 3 | no_blank_comment: False # If True, no comment is made on PR without any errors. 4 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | version: 2 6 | 7 | build: 8 | os: ubuntu-22.04 9 | tools: 10 | python: "3.10" 11 | 12 | sphinx: 13 | configuration: docs/source/conf.py 14 | 15 | python: 16 | install: 17 | - requirements: docs/requirements.txt 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. 2 | 3 | Copyright 2018. UChicago Argonne, LLC. This software was produced 4 | under U.S. Government contract DE-AC02-06CH11357 for Argonne National 5 | Laboratory (ANL), which is operated by UChicago Argonne, LLC for the 6 | U.S. Department of Energy. The U.S. Government has rights to use, 7 | reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR 8 | UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR 9 | ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is 10 | modified to produce derivative works, such modified software should 11 | be clearly marked, so as not to confuse it with the version available 12 | from ANL. 13 | 14 | Additionally, redistribution and use in source and binary forms, with 15 | or without modification, are permitted provided that the following 16 | conditions are met: 17 | 18 | * Redistributions of source code must retain the above copyright 19 | notice, this list of conditions and the following disclaimer. 20 | 21 | * Redistributions in binary form must reproduce the above copyright 22 | notice, this list of conditions and the following disclaimer in 23 | the documentation and/or other materials provided with the 24 | distribution. 25 | 26 | * Neither the name of UChicago Argonne, LLC, Argonne National 27 | Laboratory, ANL, the U.S. Government, nor the names of its 28 | contributors may be used to endorse or promote products derived 29 | from this software without specific prior written permission. 30 | 31 | THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS 32 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 34 | FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago 35 | Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 36 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 37 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 38 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 39 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 41 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 42 | POSSIBILITY OF SUCH DAMAGE. 43 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include LICENSE 3 | recursive-include src/tike/ *.cu 4 | prune _skbuild 5 | global-exclude *.py[co] 6 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | #### 2 | Tike 3 | #### 4 | 5 | Tike is a toolbox for tomographic reconstruction of 3D objects from ptychography 6 | data. 7 | 8 | The aim of Tike is to provide fast and accurate implementations of a wide 9 | variety of reconstruction algorithms, and to provide a common platform for the 10 | synchrotron research community. 11 | 12 | ***************************** 13 | Current Features (March 2021) 14 | ***************************** 15 | 16 | Scan 17 | ==== 18 | - Lissajous and 2D spiral trajectories 19 | - hexagonal and rectangular grids 20 | 21 | Ptychography 22 | ============ 23 | 24 | - FFT-based operator with linear position interpolation 25 | - single-energy 26 | - multiple probes per diffraction pattern (multi-mode probes) 27 | - probe variation correction (eigen probes) 28 | - one shared (multi-modal) probe per angular view 29 | - multi-GPU conjugate-gradient descent solver 30 | - multi-GPU least-squares + gradient descent solver 31 | 32 | Laminography 33 | ============ 34 | 35 | - USFFT-based operator for cubic field-of-view 36 | - single tilt angle 37 | - multi-GPU conjugate-gradient descent solver 38 | 39 | Alignment 40 | ========== 41 | - Lanczos-based rotation and flow operators 42 | - Cross-correlation rigid alignment solver 43 | 44 | ********* 45 | Citations 46 | ********* 47 | 48 | This software has a DOI for use in citations. 49 | 50 | Gursoy, Doga, and Ching, Daniel J. "Tike." Computer software. December 01, 2022. https://github.com/AdvancedPhotonSource/tike.git. https://doi.org/10.11578/dc.20230202.1. 51 | -------------------------------------------------------------------------------- /apptainer/tike.def: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: registry.fedoraproject.org/fedora-minimal:40-{{ target_arch }} 3 | 4 | %arguments 5 | target_arch=x86_64 6 | cuda_version=12.0 7 | pkg_version=main 8 | 9 | %post 10 | curl -L -o conda-installer.sh https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-{{ target_arch }}.sh 11 | bash conda-installer.sh -b -p "/opt/miniconda" 12 | rm conda-installer.sh 13 | /opt/miniconda/bin/conda install unzip --yes 14 | curl -L -o source.zip https://github.com/AdvancedPhotonSource/tike/archive/{{ pkg_version }}.zip 15 | /opt/miniconda/bin/unzip source.zip 16 | rm source.zip 17 | cd tike* 18 | CONDA_OVERRIDE_CUDA={{ cuda_version }} /opt/miniconda/bin/conda install cuda-version={{ cuda_version }} --file requirements.txt --file requirements-container.txt -c conda-forge --yes 19 | /opt/miniconda/bin/conda clean --all --yes 20 | /opt/miniconda/bin/pip install . --no-deps --no-build-isolation 21 | /opt/miniconda/bin/pip check 22 | cd .. 23 | rm tike* -rf 24 | cd /opt/miniconda 25 | rm -r man cmake lib/cmake lib/pkgconfig include share var 26 | 27 | %runscript 28 | /opt/miniconda/bin/python "$@" 29 | -------------------------------------------------------------------------------- /docs/CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | ############ 2 | Contributing 3 | ############ 4 | 5 | Thank you for reading this. We're happy that you have decided to report a bug or 6 | request a feature; your contribution will help make `tike` better. 7 | 8 | ********** 9 | Philosophy 10 | ********** 11 | 12 | Simple - Our contributors include non-computer scientists. Our code should be 13 | easy to understand in order to lower the barrier to entry and promote 14 | maintainability. 15 | 16 | Suboptimal - Your time is precious. Save optimization for 10x improvements not 17 | 2x or 1.2x improvements. 18 | 19 | Scalable - We need our code to scale for large data sets. Design should 20 | consider multi-node applications. 21 | 22 | *********** 23 | Report bugs 24 | *********** 25 | 26 | Please open an Issue on our GitHub project page. Be sure to include the steps 27 | that we can take to reproduce your bug. Be prepared for the bug hunting process 28 | to take multiple days. We may ask for more information. Please be sure to close 29 | the issue or let us know when the problem is resolved! 30 | 31 | **************** 32 | Feature requests 33 | **************** 34 | 35 | If you have an idea about how to improve our code, open an Issue on our GitHub 36 | project page. Discussion about and planning for the best way to implement the 37 | idea will reduce its development time. 38 | 39 | ************* 40 | Pull Requests 41 | ************* 42 | 43 | We want the `tike` codebase to be maintainable, simple, and lightweight. Please 44 | expect pull requests to be reviewed with the following criterion in mind: 45 | 46 | - Commit messages follow our guidelines. 47 | - Documentation and tests are present. 48 | - Variable names are explanatory. 49 | - Code comments are used to clarify algorithms. 50 | - Code structure is modular. 51 | - Use of external dependencies is minimized. 52 | - Code generally adheres to `PEP8 `_ style. 53 | 54 | Commit messages 55 | =============== 56 | 57 | Clear commit messages help us understand what and why changes were made. They 58 | should follow the format below which we copied from the `NumPy development 59 | workflow `_ . 60 | 61 | For example: 62 | 63 | .. code-block:: none 64 | 65 | ENH: add functionality X to numpy.. 66 | 67 | The first line of the commit message starts with a capitalized acronym 68 | (options listed below) indicating what type of commit this is. Then a blank 69 | line, then more text if needed. Lines shouldn't be longer than 72 70 | characters. If the commit is related to a ticket, indicate that with 71 | "See #3456", "See ticket 3456", "Closes #3456" or similar. 72 | 73 | Standard acronyms to start the commit message with are: 74 | 75 | .. code-block:: none 76 | 77 | API: an (incompatible) API change 78 | BENCH: changes to the benchmark suite 79 | BLD: change related to building numpy 80 | BUG: bug fix 81 | DEP: deprecate something, or remove a deprecated object 82 | DEV: development tool or utility 83 | DOC: documentation 84 | ENH: enhancement 85 | MAINT: maintenance commit (refactoring, typos, etc.) 86 | REV: revert an earlier commit 87 | STY: style fix (whitespace, PEP8) 88 | TST: addition or modification of tests 89 | REL: related to releasing numpy 90 | 91 | Linting 92 | ======= 93 | 94 | As part of our continuous integration tests, we `lint 95 | `_ our code using `pycodestyle 96 | `_ and `pydocstyle 97 | `_. 98 | -------------------------------------------------------------------------------- /docs/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ######################### 2 | Installation Instructions 3 | ######################### 4 | 5 | Tike is build on the `CuPy `_ framework which uses 6 | NVidia CUDA to accelerate computation. Thus, a CUDA compatible GPU on Windows_x64, 7 | Linux_aarch64, or Linux_x64 is required. Other platforms are not supported at this time. 8 | 9 | ****************************************************** 10 | From the conda-forge channel using Conda (recommended) 11 | ****************************************************** 12 | 13 | Tike is available via conda from the conda-forge channel. This distribution is 14 | updated whenever there is a tagged release. This project is still below 15 | version 1.0, so expect API breaking changes at every MINOR version. 16 | 17 | ********************************** 18 | From the Github Container Registry 19 | ********************************** 20 | 21 | Tike is available as a preinstalled python module in an OCI container available 22 | from the GitHub Container Registry. 23 | 24 | .. code-block:: bash 25 | 26 | apptainer pull oras://ghcr.io/advancedphotonsource/tike:main-x86_64-cuda12.0 27 | 28 | ************************************* 29 | From the source code (for developers) 30 | ************************************* 31 | 32 | The build and runtime requirements are listed together in `requirements.txt`. 33 | Install these packages before installing tike using conda. 34 | 35 | Install the package using typical installation methods: navigate to the 36 | directory with `setup.cfg` and ask `pip` to install tike. 37 | 38 | .. code-block:: bash 39 | 40 | pip install . --no-deps 41 | 42 | The `-e` option for `pip install` makes the installation editable; this means 43 | whenever you import `tike`, any changes that you make to the source code will be 44 | included. 45 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = Tike 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=Tike 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | ## Purpose 7 | 13 | 14 | ## Approach 15 | 19 | 20 | ## Pre-Merge Checklists 21 | 22 | ### Submitter 23 | - [ ] Write a helpfully descriptive pull request title. 24 | - [ ] Organize changes into logically grouped commits with descriptive commit messages. 25 | - [ ] Document all new functions. 26 | - [ ] Click 'details' on the readthedocs check to view the updated docs. 27 | - [ ] Write tests for new functions or explain why they are not needed. 28 | - [ ] Address any complaints from pep8speaks. 29 | 30 | ### Reviewer 31 | - [ ] Actually read all of the code. 32 | - [ ] Run the new code yourself; the included tests should make this easy. 33 | - [ ] Write a summary of the changes as you understand them. 34 | - [ ] Thank the submitter. 35 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | nbsphinx 2 | sphinx ==5.* 3 | sphinx_book_theme 4 | sphinxcontrib-bibtex 5 | -------------------------------------------------------------------------------- /docs/source/api/align.rst: -------------------------------------------------------------------------------- 1 | align 2 | ===== 3 | .. automodule:: tike.align 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. automodule:: tike.align.align 10 | :inherited-members: 11 | :members: 12 | :show-inheritance: 13 | :undoc-members: 14 | 15 | .. toctree:: 16 | :maxdepth: 3 17 | :hidden: 18 | 19 | align.solvers 20 | 21 | 22 | -------------------------------------------------------------------------------- /docs/source/api/align.solvers.rst: -------------------------------------------------------------------------------- 1 | solvers 2 | ------- 3 | .. automodule:: tike.align.solvers 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/api-class-diagram.mmd: -------------------------------------------------------------------------------- 1 | classDiagram 2 | 3 | class Operator { 4 | +fwd() 5 | +adj() 6 | } 7 | 8 | class ContextManager { 9 | - __enter__() 10 | - __exit__() 11 | } 12 | 13 | class Tomo { 14 | +int ntheta 15 | +int n 16 | +int nz 17 | +fwd(self, obj, **kwargs) 18 | +adj(self, tomo, **kwargs) 19 | } 20 | 21 | class Lamino { 22 | +int theta 23 | +int n 24 | +int tilt 25 | +fwd(self, obj, **kwargs) 26 | +adj(self, tomo, **kwargs) 27 | } 28 | 29 | class Ptycho { 30 | +int nscan 31 | +int probe_shape 32 | +int detector_shape 33 | +int nz 34 | +int n 35 | +int ntheta 36 | +Propagation propagation 37 | +Convolution convolution 38 | +fwd(self, probe, scan, psi, **kwargs) 39 | +adj(self, farplane, probe, scan, **kwargs) 40 | +adj_probe(self, farplane, scan, psi, **kwargs) 41 | } 42 | 43 | 44 | 45 | class Convolution { 46 | +int nscan 47 | +int probe_shape 48 | +int nz 49 | +int n 50 | +int ntheta 51 | +fwd() 52 | +adj() 53 | } 54 | 55 | class Propagation { 56 | +int detector_shape 57 | +fwd() 58 | +adj() 59 | } 60 | 61 | ContextManager <|-- Operator 62 | 63 | Operator <|-- Tomo 64 | Operator <|-- Lamino 65 | Operator <|-- Ptycho 66 | Operator <|-- Convolution 67 | Operator <|-- Propagation 68 | 69 | Ptycho --o Convolution 70 | Ptycho --o Propagation 71 | 72 | Operator <|-- Alignment 73 | class Alignment { 74 | +Flow flow 75 | +Rotate rotate 76 | +Pad pad 77 | +Shift shift 78 | +fwd() 79 | +adj() 80 | } 81 | 82 | Operator <|-- Rotate 83 | class Rotate { 84 | +fwd() 85 | +adj() 86 | } 87 | 88 | Operator <|-- Pad 89 | class Pad { 90 | +fwd() 91 | +adj() 92 | } 93 | 94 | Operator <|-- Flow 95 | class Flow { 96 | +fwd() 97 | +adj() 98 | } 99 | 100 | Operator <|-- Shift 101 | class Shift { 102 | +fwd() 103 | +adj() 104 | } 105 | 106 | Alignment --o Rotate 107 | Alignment --o Pad 108 | Alignment --o Shift 109 | Alignment --o Flow 110 | -------------------------------------------------------------------------------- /docs/source/api/cluster.rst: -------------------------------------------------------------------------------- 1 | cluster 2 | ======= 3 | .. automodule:: tike.cluster 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/communicators.rst: -------------------------------------------------------------------------------- 1 | communicators 2 | ============= 3 | .. automodule:: tike.communicators 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/constants.rst: -------------------------------------------------------------------------------- 1 | constants 2 | ========= 3 | .. automodule:: tike.constants 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/index.rst: -------------------------------------------------------------------------------- 1 | ############# 2 | API Reference 3 | ############# 4 | 5 | This part of the documentation explains the code structure and describes each 6 | function, class, and method in detail. 7 | 8 | ************ 9 | Key Concepts 10 | ************ 11 | The key concepts of the Tike code structure are the following: 12 | 13 | 1. Solvers are based on forward and adjoint operators 14 | 15 | 2. Solvers are organized into modules based on problem-type 16 | 17 | 18 | Solvers share fundamental operators 19 | =================================== 20 | 21 | An operator is a transforms data from one space to another. For example, the 22 | tomography forward operator is the radon transform; it maps a 2D Cartesian 23 | space to the sinogram space. The adjoint operator maps data from sinogram space 24 | back to 2D Cartesian. 25 | 26 | Solutions to inverse problems may be formulated in terms of forward and adjoint 27 | operations. This formulation is not only familiar to mathematicians, but we 28 | also avoid code duplication by reusing same forward and adjoint operators for 29 | all approaches to solving a given inverse problem. 30 | 31 | 32 | Modules by problem type 33 | ======================= 34 | The solvers for each problem-type (ptychography, tomography, etc) are separated 35 | into modules of their respective names. 36 | 37 | 38 | ************************ 39 | Operator class structure 40 | ************************ 41 | 42 | Forward and adjoint operations in Tike are implemented as a Python class called 43 | Operator. This allows a natural linking of the forward and adjoint methods and 44 | for Operators to be composed of other Operators. 45 | 46 | The chart below is a `class diagram 47 | `_. It shows the inheritance and 48 | composition relationships between the Operators in. For example, 49 | :py:class:`Ptycho` is an :py:class:`Operator` which is composed of 50 | :py:class:`Convolution` and :py:class:`Propagation` (which are also Operators). 51 | 52 | .. The class diagram is manually rendered using mermaidjs and saved as an svg 53 | the sphinxcontrib-mermaid package is out of date and doesn't work (21.3.2020) 54 | 55 | .. image:: api-class-diagram.svg 56 | 57 | ******* 58 | Modules 59 | ******* 60 | 61 | The modules in Tike are as follows: 62 | 63 | .. toctree:: 64 | :maxdepth: 1 65 | 66 | operators 67 | align 68 | cluster 69 | communicators 70 | constants 71 | lamino 72 | linalg 73 | opt 74 | precision 75 | ptycho 76 | random 77 | scan 78 | trajectory 79 | view 80 | -------------------------------------------------------------------------------- /docs/source/api/lamino.rst: -------------------------------------------------------------------------------- 1 | lamino 2 | ====== 3 | .. automodule:: tike.lamino 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. automodule:: tike.lamino.lamino 10 | :inherited-members: 11 | :members: 12 | :show-inheritance: 13 | :undoc-members: 14 | 15 | .. toctree:: 16 | :maxdepth: 3 17 | :hidden: 18 | 19 | lamino.solvers 20 | 21 | 22 | -------------------------------------------------------------------------------- /docs/source/api/lamino.solvers.rst: -------------------------------------------------------------------------------- 1 | solvers 2 | ------- 3 | .. automodule:: tike.lamino.solvers 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/linalg.rst: -------------------------------------------------------------------------------- 1 | linalg 2 | ====== 3 | .. automodule:: tike.linalg 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/operators.rst: -------------------------------------------------------------------------------- 1 | operators 2 | ========= 3 | .. automodule:: tike.operators 4 | :members: 5 | :undoc-members: 6 | 7 | .. autosummary:: 8 | Operator 9 | Alignment 10 | Bucket 11 | CachedFFT 12 | Convolution 13 | Flow 14 | Lamino 15 | Pad 16 | Patch 17 | Propagation 18 | Ptycho 19 | Rotate 20 | Shift 21 | 22 | .. autoclass:: Alignment 23 | .. autoclass:: Bucket 24 | .. autoclass:: CachedFFT 25 | .. autoclass:: Convolution 26 | .. autoclass:: Flow 27 | .. autoclass:: Lamino 28 | .. autoclass:: Operator 29 | .. autoclass:: Pad 30 | .. autoclass:: Patch 31 | .. autoclass:: Propagation 32 | .. autoclass:: Ptycho 33 | .. autoclass:: Rotate 34 | .. autoclass:: Shift 35 | 36 | .. autofunction:: gaussian 37 | .. autofunction:: gaussian_each_pattern 38 | .. autofunction:: gaussian_grad 39 | .. autofunction:: poisson 40 | .. autofunction:: poisson_each_pattern 41 | .. autofunction:: poisson_grad 42 | -------------------------------------------------------------------------------- /docs/source/api/opt.rst: -------------------------------------------------------------------------------- 1 | opt 2 | === 3 | .. automodule:: tike.opt 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/precision.rst: -------------------------------------------------------------------------------- 1 | precision 2 | ========= 3 | .. automodule:: tike.precision 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.exitwave.rst: -------------------------------------------------------------------------------- 1 | exitwave 2 | -------- 3 | .. automodule:: tike.ptycho.exitwave 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.fresnel.rst: -------------------------------------------------------------------------------- 1 | fresnel 2 | ------- 3 | .. automodule:: tike.ptycho.fresnel 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.io.rst: -------------------------------------------------------------------------------- 1 | io 2 | -- 3 | .. automodule:: tike.ptycho.io 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.learn.rst: -------------------------------------------------------------------------------- 1 | learn 2 | ----- 3 | .. automodule:: tike.ptycho.learn 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.object.rst: -------------------------------------------------------------------------------- 1 | object 2 | -------- 3 | .. automodule:: tike.ptycho.object 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.position.rst: -------------------------------------------------------------------------------- 1 | position 2 | -------- 3 | .. automodule:: tike.ptycho.position 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.probe.rst: -------------------------------------------------------------------------------- 1 | probe 2 | ----- 3 | .. automodule:: tike.ptycho.probe 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.rst: -------------------------------------------------------------------------------- 1 | ptycho 2 | ====== 3 | .. automodule:: tike.ptycho 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. automodule:: tike.ptycho.ptycho 10 | :inherited-members: 11 | :members: 12 | :show-inheritance: 13 | :undoc-members: 14 | 15 | .. autosummary:: 16 | :nosignatures: 17 | 18 | reconstruct 19 | reconstruct_multigrid 20 | Reconstruction 21 | simulate 22 | 23 | .. toctree:: 24 | :maxdepth: 3 25 | :hidden: 26 | 27 | ptycho.exitwave 28 | ptycho.fresnel 29 | ptycho.io 30 | ptycho.learn 31 | ptycho.object 32 | ptycho.position 33 | ptycho.probe 34 | ptycho.solvers 35 | -------------------------------------------------------------------------------- /docs/source/api/ptycho.solvers.rst: -------------------------------------------------------------------------------- 1 | solvers 2 | ------- 3 | .. automodule:: tike.ptycho.solvers 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | 9 | .. autosummary:: 10 | :nosignatures: 11 | :recursive: 12 | -------------------------------------------------------------------------------- /docs/source/api/random.rst: -------------------------------------------------------------------------------- 1 | random 2 | ====== 3 | .. automodule:: tike.random 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/scan.rst: -------------------------------------------------------------------------------- 1 | scan 2 | ==== 3 | .. automodule:: tike.scan 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/trajectory.rst: -------------------------------------------------------------------------------- 1 | trajectory 2 | ========== 3 | .. automodule:: tike.trajectory 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/api/view.rst: -------------------------------------------------------------------------------- 1 | view 2 | ==== 3 | .. automodule:: tike.view 4 | :inherited-members: 5 | :members: 6 | :show-inheritance: 7 | :undoc-members: 8 | -------------------------------------------------------------------------------- /docs/source/bibtex/zrefs.bib: -------------------------------------------------------------------------------- 1 | @article{gordon1970algebraic, 2 | title={Algebraic reconstruction techniques (ART) for three-dimensional electron microscopy and X-ray photography}, 3 | author={Gordon, Richard and Bender, Robert and Herman, Gabor T}, 4 | journal={Journal of theoretical Biology}, 5 | volume={29}, 6 | number={3}, 7 | pages={471--481}, 8 | year={1970}, 9 | publisher={Elsevier} 10 | } 11 | 12 | @article{Dwivedi2018, 13 | author = {Dwivedi, Priya and Konijnenberg, A.P. and Pereira, S.F. and Urbach, H.P.}, 14 | doi = {10.1016/j.ultramic.2018.04.004}, 15 | journal = {Ultramicroscopy}, 16 | pages = {29--36}, 17 | title = {Lateral position correction in ptychography using the gradient of intensity patterns}, 18 | url = {https://doi.org/10.1016/j.ultramic.2018.04.004}, 19 | volume = {192}, 20 | year = {2018} 21 | } 22 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Tike documentation build configuration file, created by 5 | # sphinx-quickstart on Tue Sep 12 16:06:17 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | 20 | import sys 21 | import os 22 | from pkg_resources import DistributionNotFound 23 | from pkg_resources import get_distribution 24 | 25 | # If extensions (or modules to document with autodoc) are in another directory, 26 | # add these directories to sys.path here. If the directory is relative to the 27 | # documentation root, use os.path.abspath to make it absolute, like shown here. 28 | sys.path.insert(0, os.path.abspath('../../src')) 29 | 30 | 31 | # -- General configuration ------------------------------------------------ 32 | 33 | # If your documentation needs a minimal Sphinx version, state it here. 34 | # 35 | # needs_sphinx = '1.0' 36 | 37 | # Add any Sphinx extension module names here, as strings. They can be 38 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 39 | # ones. 40 | extensions = ['sphinx.ext.autodoc', 41 | 'sphinx.ext.doctest', 42 | 'sphinx.ext.todo', 43 | 'sphinx.ext.napoleon', 44 | 'sphinx.ext.coverage', 45 | 'sphinx.ext.autosummary', 46 | 'sphinx.ext.imgmath', 47 | 'sphinx.ext.viewcode', 48 | 'sphinxcontrib.bibtex', 49 | 'sphinx.ext.extlinks', 50 | 'nbsphinx', 51 | ] 52 | 53 | # bibtex setting 54 | bibtex_bibfiles = [ 55 | 'bibtex/zrefs.bib', 56 | ] 57 | 58 | # extlinks settings 59 | extlinks = { 60 | 'doi': ('https://dx.doi.org/%s', 'doi:'), 61 | } 62 | 63 | # The suffix(es) of source filenames. 64 | source_suffix = ['.rst', '.md'] 65 | 66 | # The master toctree document. 67 | master_doc = 'index' 68 | 69 | # General information about the project. 70 | Argonne = u'Argonne National Laboratory' 71 | project = u'Tike' 72 | copyright = u'2017-2020, ' + Argonne 73 | 74 | # The version info for the project you're documenting, acts as replacement for 75 | # |version| and |release|, also used in various other places throughout the 76 | # built documents. 77 | try: 78 | release = get_distribution('tike').version 79 | # The short X.Y version. 80 | version = '.'.join(release.split('.')[:2]) 81 | except DistributionNotFound: 82 | # package is not installed 83 | pass 84 | 85 | # The language for content autogenerated by Sphinx. Refer to documentation 86 | # for a list of supported languages. 87 | # 88 | # This is also used if you do content translation via gettext catalogs. 89 | # Usually you set "language" from the command line for these cases. 90 | language = 'en_US' 91 | 92 | # List of patterns, relative to source directory, that match files and 93 | # directories to ignore when looking for source files. 94 | # This patterns also effect to html_static_path and html_extra_path 95 | exclude_patterns = ['_build', '**.ipynb_checkpoints'] 96 | 97 | # The name of the Pygments (syntax highlighting) style to use. 98 | pygments_style = 'sphinx' 99 | 100 | # If true, `todo` and `todoList` produce output, else they produce nothing. 101 | todo_include_todos = True 102 | 103 | # -- Options for HTML output ---------------------------------------------- 104 | 105 | # The theme to use for HTML and HTML Help pages. See the documentation for 106 | # a list of builtin themes. 107 | html_theme = 'sphinx_book_theme' 108 | 109 | html_theme_options = { 110 | 'navigation_depth': 4, 111 | } 112 | 113 | # -- Options for HTMLHelp output ------------------------------------------ 114 | 115 | htmlhelp_basename = project+'doc' 116 | 117 | # -- Options for autodoc output ------------------------------------------ 118 | 119 | autodoc_typehints = 'description' 120 | autodoc_class_signature = 'separated' 121 | 122 | autodoc_mock_imports = [ 123 | 'cupy', 124 | 'cupyx', 125 | 'h5py', 126 | 'importlib_resources', 127 | 'matplotlib', 128 | 'matplotlib.pyplot', 129 | 'mpi4py', 130 | 'numpy', 131 | 'scipy', 132 | 'cv2', 133 | ] 134 | -------------------------------------------------------------------------------- /docs/source/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/source/examples.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Examples 3 | ======== 4 | 5 | This section contains examples for using various TomoPy functions and plugins 6 | from other packages. 7 | 8 | .. toctree:: 9 | :titlesonly: 10 | :hidden: 11 | 12 | examples/align 13 | examples/ptycho 14 | examples/scan 15 | examples/tomo 16 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../README.rst 3 | 4 | .. toctree:: 5 | :maxdepth: 3 6 | :hidden: 7 | 8 | api/index 9 | install 10 | examples 11 | contributing 12 | license 13 | -------------------------------------------------------------------------------- /docs/source/install.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../INSTALL.rst 2 | -------------------------------------------------------------------------------- /docs/source/license.rst: -------------------------------------------------------------------------------- 1 | ####### 2 | License 3 | ####### 4 | 5 | .. include :: ../../LICENSE 6 | -------------------------------------------------------------------------------- /profile/profile_admm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """Benchmark ptychotomography reconstruction. 5 | 6 | Profile tike.simulate and tike.admm on the function level by running the main 7 | function of this script. Line by line profile hotspots for the file 8 | tike/foo.py can be obtained by using pprofile. As below: 9 | 10 | ``` 11 | $ pprofile --statistic 0.001 --include tike/foo.py profile_admm.py 12 | ``` 13 | """ 14 | import click 15 | import logging 16 | import os 17 | import pickle 18 | from pyinstrument import Profiler 19 | # These environmental variables must be set before numpy is imported anywhere. 20 | os.environ["MKL_NUM_THREADS"] = "1" 21 | os.environ["NUMEXPR_NUM_THREADS"] = "1" 22 | os.environ["OMP_NUM_THREADS"] = "1" 23 | os.environ["OPENBLAS_NUM_THREADS"] = "1" 24 | import numpy as np # noqa 25 | import tike # noqa 26 | 27 | 28 | logging.basicConfig(level=logging.INFO) 29 | logger = logging.getLogger(__name__) 30 | 31 | 32 | @click.command() 33 | @click.argument('data-file', type=click.Path(exists=True)) 34 | @click.argument('params-file', type=click.Path(exists=True)) 35 | @click.option('--recon-file', type=click.Path(file_okay=True, dir_okay=True, 36 | writable=True), 37 | default=None, 38 | help='Save reconstruction to this file.') 39 | @click.option('--profile', is_flag=True, 40 | help='Profile at function level using pyinstrument.',) 41 | @click.option('-A', '--admm-iters', default=1, type=click.INT, 42 | help='The number of ADMM interations.',) 43 | @click.option('-P', '--ptycho-iters', default=1, type=click.INT, 44 | help='The number of pytchography iterations.') 45 | @click.option('-T', '--tomo-iters', default=1, type=click.INT, 46 | help='The number of tomography iterations.') 47 | def admm_profile_workload( 48 | data_file, 49 | params_file, 50 | recon_file, 51 | profile, 52 | admm_iters, 53 | ptycho_iters, 54 | tomo_iters, 55 | ): 56 | """Run some admm work which may be profiled.""" 57 | comm = tike.MPICommunicator() 58 | # Load data 59 | data = None 60 | if comm.rank == 0: 61 | with open(data_file, 'rb') as file: 62 | data = pickle.load(file) 63 | data = comm.scatter(data) 64 | # Load acquisition parameters 65 | ( 66 | obj, voxelsize, 67 | probe, energy, 68 | theta, v, h, 69 | detector_shape, 70 | ) = comm.load(params_file) 71 | recon = np.zeros(obj.shape, dtype=np.complex64) 72 | 73 | if comm.rank == 0: 74 | logger.info(""" 75 | recon shape is {} 76 | voxelsize is {} 77 | data shape is {} 78 | theta shape is {} 79 | v shape is {} 80 | energy is {} 81 | """.format(recon.shape, voxelsize, np.asarray(data).shape, 82 | theta.shape, v.shape, energy)) 83 | 84 | pkwargs = { 85 | 'algorithm': 'grad', 86 | 'num_iter': ptycho_iters, 87 | } 88 | 89 | tkwargs = { 90 | 'algorithm': 'grad', 91 | 'num_iter': tomo_iters, 92 | 'ncore': 1, 93 | 'reg_par': -1, 94 | } 95 | 96 | if profile and comm.rank == 0: 97 | profiler = Profiler() 98 | profiler.start() 99 | 100 | recon = tike.admm(obj=recon, voxelsize=voxelsize, 101 | data=data, 102 | probe=probe, theta=theta, v=v, h=h, energy=energy, 103 | num_iter=admm_iters, 104 | rho=0.5, gamma=0.25, 105 | comm=comm, pkwargs=pkwargs, tkwargs=tkwargs, 106 | ) 107 | 108 | if profile and comm.rank == 0: 109 | profiler.stop() 110 | print(profiler.output_text(unicode=True, color=False)) 111 | 112 | # Save result to disk 113 | logger.info("Rank {} complete.".format(comm.rank)) 114 | if recon_file is not None: 115 | recon = comm.gather(recon, root=0, axis=0) 116 | if comm.rank == 0: 117 | logger.info("Saving the result.") 118 | os.makedirs(os.path.dirname(recon_file), exist_ok=True) 119 | with open(recon_file, 'wb') as file: 120 | pickle.dump(recon, file) 121 | else: 122 | if comm.rank == 0: 123 | logger.info("Not saving the result.") 124 | 125 | 126 | if __name__ == '__main__': 127 | admm_profile_workload() 128 | -------------------------------------------------------------------------------- /profile/profile_lamino.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """Benchmark ptychography reconstruction.""" 4 | 5 | import logging 6 | import lzma 7 | import os 8 | import pickle 9 | from pyinstrument import Profiler 10 | import unittest 11 | 12 | # These environmental variables must be set before numpy is imported anywhere. 13 | os.environ["MKL_NUM_THREADS"] = "1" 14 | os.environ["NUMEXPR_NUM_THREADS"] = "1" 15 | os.environ["OMP_NUM_THREADS"] = "1" 16 | os.environ["OPENBLAS_NUM_THREADS"] = "1" 17 | 18 | import numpy as np # noqa 19 | import tike.lamino # noqa 20 | 21 | 22 | class BenchmarkPtycho(unittest.TestCase): 23 | """Run benchmarks for laminography reconstruction.""" 24 | 25 | def setUp(self): 26 | self.profiler = Profiler() 27 | dataset_file = '../tests/data/lamino_setup.pickle.lzma' 28 | with lzma.open(dataset_file, 'rb') as file: 29 | [ 30 | self.data, 31 | self.original, 32 | self.theta, 33 | self.tilt, 34 | ] = pickle.load(file) 35 | 36 | def template_algorithm(self, algorithm): 37 | result = { 38 | 'obj': np.zeros_like(self.original), 39 | } 40 | self.profiler.start() 41 | result = tike.lamino.reconstruct( 42 | **result, 43 | data=self.data, 44 | theta=self.theta, 45 | tilt=self.tilt, 46 | algorithm=algorithm, 47 | num_iter=10, 48 | ) 49 | self.profiler.stop() 50 | print('\n') 51 | print(self.profiler.output_text( 52 | unicode=True, 53 | color=True, 54 | )) 55 | 56 | def test_cgrad(self): 57 | """Use pyinstrument to benchmark the conjugate gradient algorithm.""" 58 | self.template_algorithm('cgrad') 59 | 60 | 61 | if __name__ == '__main__': 62 | unittest.main(verbosity=2) 63 | -------------------------------------------------------------------------------- /profile/profile_ptycho.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """Benchmark ptychography reconstruction.""" 4 | 5 | import logging 6 | import lzma 7 | import os 8 | import pickle 9 | import unittest 10 | 11 | import cupy as cp 12 | import numpy as np 13 | from pyinstrument import Profiler 14 | import tike.ptycho 15 | 16 | 17 | class BenchmarkPtycho(unittest.TestCase): 18 | """Run benchmarks for pychography reconstruction.""" 19 | 20 | def setUp(self): 21 | """Create a test dataset.""" 22 | self.profiler = Profiler() 23 | dataset_file = '../tests/data/ptycho_setup.pickle.lzma' 24 | with lzma.open(dataset_file, 'rb') as file: 25 | [ 26 | self.data, 27 | self.scan, 28 | self.probe, 29 | self.original, 30 | ] = pickle.load(file) 31 | 32 | def start(self): 33 | self.profiler.start() 34 | cp.cuda.profiler.start() 35 | 36 | def stop(self): 37 | cp.cuda.profiler.stop() 38 | self.profiler.stop() 39 | print('\n') 40 | print(self.profiler.output_text( 41 | unicode=True, 42 | color=True, 43 | )) 44 | 45 | @unittest.skip('Demonstrate skipped tests.') 46 | def test_never(self): 47 | """Never run this test.""" 48 | pass 49 | 50 | def template_algorithm(self, algorithm): 51 | """Use pyinstrument to benchmark a ptycho algorithm on one core.""" 52 | logging.disable(logging.WARNING) 53 | result = { 54 | 'psi': np.ones_like(self.original), 55 | 'probe': self.probe, 56 | 'scan': self.scan, 57 | } 58 | # Do one iteration to complete JIT compilation 59 | result = tike.ptycho.reconstruct( 60 | **result, 61 | data=self.data, 62 | algorithm=algorithm, 63 | num_iter=1, 64 | rtol=-1, 65 | ) 66 | self.start() 67 | result = tike.ptycho.reconstruct( 68 | **result, 69 | data=self.data, 70 | algorithm=algorithm, 71 | num_iter=100, 72 | rtol=-1, 73 | ) 74 | self.stop() 75 | 76 | def test_combined(self): 77 | """Use pyinstrument to benchmark the combined algorithm.""" 78 | self.template_algorithm('cgrad') 79 | 80 | def test_divided(self): 81 | """Use pyinstrument to benchmark the divided algorithm.""" 82 | self.template_algorithm('lstsq_grad') 83 | 84 | 85 | if __name__ == '__main__': 86 | unittest.main(verbosity=2) 87 | -------------------------------------------------------------------------------- /profile/profile_tomo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """Benchmark tomography reconstruction.""" 5 | 6 | import os 7 | import logging 8 | import lzma 9 | import pickle 10 | from pyinstrument import Profiler 11 | import unittest 12 | # These environmental variables must be set before numpy is imported anywhere. 13 | os.environ["MKL_NUM_THREADS"] = "1" 14 | os.environ["NUMEXPR_NUM_THREADS"] = "1" 15 | os.environ["OMP_NUM_THREADS"] = "1" 16 | os.environ["OPENBLAS_NUM_THREADS"] = "1" 17 | import numpy as np # noqa 18 | import tike.tomo # noqa 19 | 20 | 21 | class BenchmarkTomo(unittest.TestCase): 22 | """Run benchmarks for tomography reconstruction.""" 23 | 24 | def setUp(self): 25 | """Create a test dataset.""" 26 | self.profiler = Profiler() 27 | dataset_file = '../tests/data/tomo_setup.pickle.lzma' 28 | with lzma.open(dataset_file, 'rb') as file: 29 | [ 30 | self.data, 31 | self.theta, 32 | self.original, 33 | ] = pickle.load(file) 34 | 35 | @unittest.skip('Demonstrate skipped tests.') 36 | def test_never(self): 37 | """Never run this test.""" 38 | pass 39 | 40 | def test_cgrad(self): 41 | """Use pyinstrument to benchmark tomo.grad on one core.""" 42 | logging.disable(logging.WARNING) 43 | result = { 44 | 'obj': np.zeros(self.original.shape, dtype=np.complex64) 45 | } 46 | self.profiler.start() 47 | for i in range(50): 48 | result = tike.tomo.reconstruct( 49 | **result, 50 | theta=self.theta, 51 | integrals=self.data, 52 | algorithm='cgrad', 53 | num_iter=1, 54 | ) 55 | self.profiler.stop() 56 | print('\n') 57 | print(self.profiler.output_text(unicode=True, color=True)) 58 | 59 | 60 | if __name__ == '__main__': 61 | unittest.main(verbosity=2) 62 | -------------------------------------------------------------------------------- /profile/requirements.txt: -------------------------------------------------------------------------------- 1 | pyinstrument 2 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = [ 4 | "setuptools_scm>=7", 5 | "setuptools>=61", 6 | "toml", 7 | "wheel", 8 | ] 9 | 10 | [project] 11 | urls = { docs = 'http://tike.readthedocs.org', source = 'http://github.com/tomography/tike.git'} 12 | authors = [ 13 | {name = "Doga Gursoy", email = "dgursoy@anl.gov"}, 14 | {name = "Daniel Ching", email = "dching@anl.gov"}, 15 | {name = "Xiaodong Yu"}, 16 | {name = "Viktor Nikitin"}, 17 | {name = "Ash Tripathi"}, 18 | ] 19 | classifiers = [ 20 | 'Intended Audience :: Developers', 21 | 'Intended Audience :: Science/Research', 22 | 'License :: OSI Approved :: BSD License', 23 | 'Natural Language :: English', 24 | 'Operating System :: OS Independent', 25 | 'Programming Language :: Python :: 3 :: Only', 26 | 'Topic :: Scientific/Engineering', 27 | ] 28 | dependencies = [ 29 | # https://github.com/cupy/cupy/issues/8184 30 | "cupy >=10.0, !=10.3.0, !=13.0.*", 31 | 'importlib_resources; python_version<"3.9"', 32 | "matplotlib ==3.*", 33 | "numpy >=1.17", 34 | "opencv-python >=3.4, <5.0", 35 | "scipy >=1.6.0", 36 | ] 37 | license = {text = "BSD 3-Clause License"} 38 | name = "tike" 39 | requires-python = "~=3.8" 40 | dynamic = ["version", "readme"] 41 | optional-dependencies = {mpi = [ 42 | "mpi4py ==3.*" 43 | ]} 44 | 45 | [tool.setuptools.dynamic] 46 | readme = {file = ["README.rst", "LICENSE"]} 47 | 48 | [tool.setuptools.packages.find] 49 | where = ["src"] 50 | include = ["tike*"] 51 | 52 | [tool.setuptools_scm] 53 | -------------------------------------------------------------------------------- /requirements-container.txt: -------------------------------------------------------------------------------- 1 | h5py 2 | hdf5plugin 3 | scikit-image 4 | scipy 5 | toml 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cupy>=10,!=10.3.0,!=13.0.* 2 | importlib_resources # backport for python<3.9 3 | matplotlib-base=3.* 4 | mpi4py>=3.* 5 | numpy>=1.17 6 | py-opencv>=3.4,<5.0 7 | python>=3.8 8 | scipy>=1.6.0 9 | setuptools_scm>=7 10 | setuptools>=61 11 | toml 12 | wheel 13 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [yapf] 2 | based_on_style = google 3 | split_before_logical_operator = true 4 | SPLIT_COMPLEX_COMPREHENSION = true 5 | COALESCE_BRACKETS = true 6 | SPLIT_BEFORE_DOT = true 7 | 8 | [pycodestyle] 9 | max-line-length=80 10 | count=true 11 | 12 | [pydocstyle] 13 | ignore = D100,D213,D416,D203,D107,D105 14 | -------------------------------------------------------------------------------- /src/.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Google 2 | AlwaysBreakAfterReturnType: All 3 | BreakBeforeBinaryOperators: All 4 | -------------------------------------------------------------------------------- /src/broken/communicator.py: -------------------------------------------------------------------------------- 1 | """Define an communication class to move data between processes.""" 2 | 3 | __author__ = "Doga Gursoy, Daniel Ching" 4 | __copyright__ = "Copyright (c) 2018, UChicago Argonne, LLC." 5 | __docformat__ = 'restructuredtext en' 6 | __all__ = ['MPICommunicator'] 7 | 8 | import logging 9 | import pickle 10 | 11 | from mpi4py import MPI 12 | import numpy as np 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | class MPICommunicator(object): 18 | """Communicate between processes using MPI. 19 | 20 | Use this class to astract away all of the MPI communication that needs to 21 | occur in order to switch between the tomography and ptychography problems. 22 | """ 23 | 24 | def __init__(self): 25 | """Load the MPI params and get initial data.""" 26 | super(MPICommunicator, self).__init__() 27 | self.comm = MPI.COMM_WORLD 28 | self.rank = self.comm.Get_rank() 29 | self.size = self.comm.Get_size() 30 | logger.info("Node {:,d} is running.".format(self.rank)) 31 | 32 | def scatter(self, *args): 33 | """Send and recieve constant data that must be divided.""" 34 | if len(args) == 1: 35 | arg = args[0] 36 | if self.rank == 0: 37 | chunks = np.array_split(arg, self.size) 38 | else: 39 | chunks = None 40 | return self.comm.scatter(chunks, root=0) 41 | out = list() 42 | for arg in args: 43 | if self.rank == 0: 44 | chunks = np.array_split(arg, self.size) 45 | else: 46 | chunks = None 47 | out.append(self.comm.scatter(chunks, root=0)) 48 | return out 49 | 50 | def broadcast(self, *args): 51 | """Synchronize parameters that are the same for all processses.""" 52 | if len(args) == 1: 53 | return self.comm.bcast(args[0], root=0) 54 | out = list() 55 | for arg in args: 56 | out.append(self.comm.bcast(arg, root=0)) 57 | return out 58 | 59 | def get_ptycho_slice(self, tomo_slice): 60 | """Switch to slicing for the pytchography problem.""" 61 | # Break the tomo data along the theta axis 62 | t_chunks = np.array_split(tomo_slice, self.size, axis=0) # Theta, V, H 63 | # Each rank takes a turn scattering its tomo v slice to the others 64 | p_chunks = list() 65 | for i in range(self.size): 66 | p_chunks.append(self.comm.scatter(t_chunks, root=i)) 67 | # Recombine the along vertical axis so each rank now has a theta slice 68 | return np.concatenate(p_chunks, axis=1) # Theta, V, H 69 | 70 | def get_tomo_slice(self, ptych_slice): 71 | """Switch to slicing for the tomography problem.""" 72 | # Break the ptych data along the vertical axis 73 | p_chunks = np.array_split(ptych_slice, self.size, axis=1) 74 | # Each rank takes a turn scattering its ptych theta slice to the others 75 | t_chunks = list() 76 | for i in range(self.size): 77 | t_chunks.append(self.comm.scatter(p_chunks, root=i)) 78 | # Recombine along the theta axis so each rank now has a vertical slice 79 | return np.concatenate(t_chunks, axis=0) # Theta, V, H 80 | 81 | def gather(self, arg, root=0, axis=0): 82 | """Gather arg to one node.""" 83 | arg = self.comm.gather(arg, root=root) 84 | if self.rank == root: 85 | return np.concatenate(arg, axis=axis) 86 | return None 87 | 88 | def allgather(self, arg, axis=0): 89 | """All nodes gather arg.""" 90 | return self.comm.allgather(arg) 91 | 92 | def load(self, filename): 93 | """Load all of the variables from a pickle.""" 94 | # Initally set all variables to None 95 | ( 96 | obj, voxelsize, 97 | probe, energy, 98 | theta, v, h, 99 | detector_shape, 100 | ) = [None] * 8 # yapf: disable 101 | # Load the data on one rank 102 | if self.rank == 0: 103 | with open(filename, 'rb') as file: 104 | ( 105 | obj, voxelsize, 106 | probe, energy, 107 | theta, v, h, 108 | detector_shape, 109 | ) = pickle.load(file) # yapf: disable 110 | # Distribute the variables appropriately to each rank 111 | ( 112 | voxelsize, 113 | probe, energy, 114 | theta, 115 | detector_shape, 116 | ) = self.broadcast( 117 | voxelsize, 118 | probe, energy, 119 | theta, 120 | detector_shape, 121 | ) # yapf: disable 122 | obj, v, h, = self.scatter(obj, v, h) 123 | return ( 124 | obj, voxelsize, 125 | probe, energy, 126 | theta, v, h, 127 | detector_shape, 128 | ) # yapf: disable 129 | -------------------------------------------------------------------------------- /src/broken/operators/tomo.py: -------------------------------------------------------------------------------- 1 | """Defines a tomography operator based on the NumPy FFT module.""" 2 | 3 | from .operator import Operator 4 | 5 | 6 | class Tomo(Operator): 7 | """A base class for tomography solvers. 8 | 9 | This class is a context manager which provides the basic operators required 10 | to implement a tomography solver. Specific implementations of this class 11 | can either inherit from this class or just provide the same interface. 12 | 13 | Solver implementations should inherit from TomoBackend which is an alias 14 | for whichever TomoCore implementation is selected at import time. 15 | 16 | Attributes 17 | ---------- 18 | ntheta : int 19 | The number of projections. 20 | n, nz : int 21 | The pixel width and height of the projection. 22 | 23 | Parameters 24 | ---------- 25 | obj : (nz, n, n) complex64 26 | The complex object to be transformed or recovered. 27 | tomo : (ntheta, nz, n) complex64 28 | The radon transform of `obj`. 29 | angles : (ntheta, ) float32 30 | The radian angles at which the radon transform is sampled. 31 | centers : (nz, ) float32 32 | The center of rotation in `obj` pixels for each slice along z. 33 | 34 | """ 35 | 36 | array_module = None 37 | asnumpy = None 38 | 39 | def __init__(self, angles, ntheta, nz, n, centers): 40 | """Please see help(TomoCore) for more info.""" 41 | 42 | def __enter__(self): 43 | """Return self at start of a with-block.""" 44 | return self 45 | 46 | def __exit__(self, type, value, traceback): 47 | """Free GPU memory due at interruptions or with-block exit.""" 48 | pass 49 | 50 | def run(self, tomo, obj, **kwargs): 51 | """Implement a specific tomography solving algorithm. 52 | 53 | See help(TomoCore) for more information. 54 | """ 55 | raise NotImplementedError("Cannot run a base class.") 56 | 57 | def fwd(self, obj, **kwargs): 58 | """Perform the forward Radon transform (R). 59 | 60 | See help(TomoCore) for more information. 61 | """ 62 | raise NotImplementedError("Cannot run a base class.") 63 | 64 | def adj(self, tomo, **kwargs): 65 | """Perform the adjoint Radon transform (R*). 66 | 67 | See help(TomoCore) for more information. 68 | """ 69 | raise NotImplementedError("Cannot run a base class.") 70 | -------------------------------------------------------------------------------- /src/broken/reg.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def run(xp, u, mu, tau, alpha): 4 | """Provide some kind of regularization.""" 5 | z = fwd(xp, u) + mu / tau 6 | # Soft-thresholding 7 | # za = xp.sqrt(xp.sum(xp.abs(z), axis=0)) 8 | za = xp.sqrt(xp.real(xp.sum(z*xp.conj(z), 0))) 9 | zeros = (za <= alpha / tau) 10 | z[:, zeros] = 0 11 | z[:, ~zeros] -= z[:, ~zeros] * alpha / (tau * za[~zeros]) 12 | return z 13 | 14 | def fwd(xp, u): 15 | """Forward operator for regularization (J).""" 16 | res = xp.zeros((3, *u.shape), dtype=u.dtype, order='C') 17 | res[0, :, :, :-1] = u[:, :, 1:] - u[:, :, :-1] 18 | res[1, :, :-1, :] = u[:, 1:, :] - u[:, :-1, :] 19 | res[2, :-1, :, :] = u[1:, :, :] - u[:-1, :, :] 20 | res *= 2 / np.sqrt(3) # normalization 21 | return res 22 | 23 | def adj(xp, gr): 24 | """Adjoint operator for regularization (J^*).""" 25 | res = xp.zeros(gr.shape[1:], gr.dtype, order='C') 26 | res[:, :, 1:] = gr[0, :, :, 1:] - gr[0, :, :, :-1] 27 | res[:, :, 0] = gr[0, :, :, 0] 28 | res[:, 1:, :] += gr[1, :, 1:, :] - gr[1, :, :-1, :] 29 | res[:, 0, :] += gr[1, :, 0, :] 30 | res[1:, :, :] += gr[2, 1:, :, :] - gr[2, :-1, :, :] 31 | res[0, :, :] += gr[2, 0, :, :] 32 | res *= -2 / np.sqrt(3) # normalization 33 | return res 34 | -------------------------------------------------------------------------------- /src/broken/tomo/__init__.py: -------------------------------------------------------------------------------- 1 | """Provides tomography solvers. 2 | 3 | The reference implementation uses NumPy's FFT library. Select a non-default 4 | backend by setting the TIKE_TOMO_BACKEND environment variable. 5 | 6 | Coordinate Systems 7 | ------------------ 8 | 9 | `theta, v, h`. `v, h` are the horizontal vertical directions perpendicular 10 | to the probe direction where positive directions are to the right and up. 11 | `theta` is the rotation angle around the vertical reconstruction 12 | space axis, `z`. `z` is parallel to `v`, and uses the right hand rule to 13 | determine reconstruction space coordinates `z, x, y`. `theta` is measured 14 | from the `x` axis, so when `theta = 0`, `h` is parallel to `y`. 15 | 16 | Functions 17 | --------- 18 | 19 | Each public function in this module should have the following interface: 20 | 21 | Parameters 22 | ---------- 23 | obj : (Z, X, Y, P) :py:class:`numpy.array` float32 24 | An array of material properties. The first three dimensions `Z, X, Y` 25 | are spatial dimensions. The fourth dimension, `P`, holds properties at 26 | each grid position: refractive indices, attenuation coefficents, etc. 27 | integrals : (M, V, H, P) :py:class:`numpy.array` float32 28 | Integrals across the `obj` for each of the `probe` rays and 29 | P parameters. 30 | theta, v, h : (M, ) :py:class:`numpy.array` float32 31 | The min corner (theta, v, h) of the `probe` for each measurement. 32 | kwargs 33 | Keyword arguments specific to this function. `**kwargs` should always be 34 | included so that extra parameters are ignored instead of raising an error. 35 | 36 | """ 37 | import os 38 | 39 | # Search available entry points for requested backend. Must set the 40 | # TomoBackend variable BEFORE importing the rest of the module. 41 | if "TIKE_TOMO_BACKEND" in os.environ: 42 | import pkg_resources 43 | _backend_options = {} 44 | for _entry_point in pkg_resources.iter_entry_points('tike.TomoBackend'): 45 | _backend_options[_entry_point.name] = _entry_point.load() 46 | _requested_backend = os.environ["TIKE_TOMO_BACKEND"] 47 | if _requested_backend in _backend_options: 48 | TomoBackend = _backend_options[_requested_backend] 49 | else: 50 | raise ImportError( 51 | "Cannot set TomoBackend to '{}'. " 52 | "Available options are: {}".format(_requested_backend, 53 | _backend_options) 54 | ) 55 | else: 56 | from tike.operators import Tomo as TomoBackend 57 | 58 | from tike.tomo.tomo import * # noqa 59 | from tike.tomo.solvers import * # noqa 60 | -------------------------------------------------------------------------------- /src/broken/tomo/solvers.py: -------------------------------------------------------------------------------- 1 | """Provides Solver implementations for a variety of algorithms.""" 2 | 3 | from tike.opt import conjugate_gradient 4 | import tike.reg as tv 5 | from tike.tomo import TomoBackend 6 | 7 | __all__ = [ 8 | "available_solvers", 9 | "ConjugateGradientTomoSolver", 10 | ] 11 | 12 | 13 | class ConjugateGradientTomoSolver(TomoBackend): 14 | """Solve the ptychography problem using gradient descent.""" 15 | 16 | def run(self, tomo, obj, theta, num_iter, 17 | rho=1.0, tau=0.0, reg=0j, K=1 + 0j, **kwargs 18 | ): # yapf: disable 19 | """Use conjugate gradient to estimate `obj`. 20 | 21 | Parameters 22 | ---------- 23 | tomo: array-like float32 24 | Line integrals through the object. 25 | obj : array-like float32 26 | The object to be recovered. 27 | num_iter : int 28 | Number of steps to take. 29 | rho, tau : float32 30 | Weights for data and variation components of the cost function 31 | reg : complex64 32 | The regularizer for total variation 33 | 34 | """ 35 | xp = self.array_module 36 | reg = xp.asarray(reg, dtype='complex64') 37 | K = xp.asarray(K, dtype='complex64') 38 | K_conj = xp.conj(K, dtype='complex64') 39 | 40 | def cost_function(obj): 41 | model = K * self.fwd(obj=obj, theta=theta) 42 | return ( 43 | + rho * xp.square(xp.linalg.norm(model - tomo)) 44 | + tau * xp.square(xp.linalg.norm(tv.fwd(xp, obj) - reg)) 45 | ) 46 | 47 | def grad(obj): 48 | model = K * self.fwd(obj, theta=theta) 49 | return ( 50 | + rho * self.adj(K_conj * (model - tomo), theta=theta) 51 | + tau * tv.adj(xp, tv.fwd(xp, obj) - reg) 52 | ) 53 | 54 | obj = conjugate_gradient( 55 | self.array_module, 56 | x=obj, 57 | cost_function=cost_function, 58 | grad=grad, 59 | num_iter=num_iter, 60 | ) 61 | 62 | return { 63 | 'obj': obj 64 | } 65 | 66 | # TODO: Add new algorithms here 67 | available_solvers = { 68 | "cgrad": ConjugateGradientTomoSolver, 69 | } 70 | -------------------------------------------------------------------------------- /src/tike/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # ######################################################################### 5 | # Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. # 6 | # # 7 | # Copyright 2018. UChicago Argonne, LLC. This software was produced # 8 | # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # 9 | # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # 10 | # U.S. Department of Energy. The U.S. Government has rights to use, # 11 | # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # 12 | # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # 13 | # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # 14 | # modified to produce derivative works, such modified software should # 15 | # be clearly marked, so as not to confuse it with the version available # 16 | # from ANL. # 17 | # # 18 | # Additionally, redistribution and use in source and binary forms, with # 19 | # or without modification, are permitted provided that the following # 20 | # conditions are met: # 21 | # # 22 | # * Redistributions of source code must retain the above copyright # 23 | # notice, this list of conditions and the following disclaimer. # 24 | # # 25 | # * Redistributions in binary form must reproduce the above copyright # 26 | # notice, this list of conditions and the following disclaimer in # 27 | # the documentation and/or other materials provided with the # 28 | # distribution. # 29 | # # 30 | # * Neither the name of UChicago Argonne, LLC, Argonne National # 31 | # Laboratory, ANL, the U.S. Government, nor the names of its # 32 | # contributors may be used to endorse or promote products derived # 33 | # from this software without specific prior written permission. # 34 | # # 35 | # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # 36 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # 37 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # 38 | # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # 39 | # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # 40 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # 41 | # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # 42 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # 43 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # 44 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # 45 | # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # 46 | # POSSIBILITY OF SUCH DAMAGE. # 47 | # ######################################################################### 48 | """Tike is a toolbox for ptycho-tomographic reconstruction of 3D objects. 49 | 50 | The aim of Tike is to provide fast, accurate, and modular implementations in an 51 | easy to maintain package. 52 | 53 | 54 | ******* 55 | License 56 | ******* 57 | The software is licensed under the BSD-3 license. 58 | 59 | """ 60 | try: 61 | from importlib.metadata import version, PackageNotFoundError 62 | except ImportError: 63 | # Backport to python<3.8 available as importlib_metadata package 64 | from importlib_metadata import version, PackageNotFoundError 65 | import logging 66 | 67 | logging.getLogger(__name__).addHandler(logging.NullHandler()) 68 | 69 | try: 70 | __version__ = version('tike') 71 | except PackageNotFoundError: 72 | # package is not installed 73 | pass 74 | -------------------------------------------------------------------------------- /src/tike/align/__init__.py: -------------------------------------------------------------------------------- 1 | """Provide alignment solvers and tooling.""" 2 | from .align import * 3 | -------------------------------------------------------------------------------- /src/tike/align/align.py: -------------------------------------------------------------------------------- 1 | __author__ = "Daniel Ching, Viktor Nikitin" 2 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 3 | __docformat__ = 'restructuredtext en' 4 | __all__ = [ 5 | "reconstruct", 6 | "simulate", 7 | "invert", 8 | ] 9 | 10 | import logging 11 | import numpy as np 12 | 13 | from tike.operators import Alignment 14 | from tike.align import solvers 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | def simulate( 20 | original, 21 | **kwargs 22 | ): # yapf: disable 23 | """Return original shifted by shift.""" 24 | with Alignment() as operator: 25 | for key, value in kwargs.items(): 26 | if not isinstance(value, tuple) and np.ndim(value) > 0: 27 | kwargs[key] = operator.asarray(value) 28 | unaligned = operator.fwd( 29 | operator.asarray(original, dtype='complex64'), 30 | **kwargs, 31 | ) 32 | assert unaligned.dtype == 'complex64', unaligned.dtype 33 | return operator.asnumpy(unaligned) 34 | 35 | 36 | def invert( 37 | original, 38 | **kwargs 39 | ): # yapf: disable 40 | """Return original shifted by shift.""" 41 | with Alignment() as operator: 42 | for key, value in kwargs.items(): 43 | if not isinstance(value, tuple) and np.ndim(value) > 0: 44 | kwargs[key] = operator.asarray(value) 45 | unaligned = operator.inv( 46 | operator.asarray(original, dtype='complex64'), 47 | **kwargs, 48 | ) 49 | assert unaligned.dtype == 'complex64', unaligned.dtype 50 | return operator.asnumpy(unaligned) 51 | 52 | 53 | def reconstruct( 54 | original, 55 | unaligned, 56 | algorithm, 57 | num_iter=1, rtol=-1, **kwargs 58 | ): # yapf: disable 59 | """Solve the alignment problem; returning either the original or the shift. 60 | 61 | Parameters 62 | ---------- 63 | unaligned, original: (..., H, W) complex64 64 | The images to be aligned. 65 | rtol : float 66 | Terminate early if the relative decrease of the cost function is 67 | less than this amount. 68 | 69 | """ 70 | if algorithm in solvers.__all__: 71 | with Alignment() as operator: 72 | for key, value in kwargs.items(): 73 | if not isinstance(value, tuple) and np.ndim(value) > 0: 74 | kwargs[key] = operator.asarray(value) 75 | logger.info("{} on {:,d} - {:,d} by {:,d} images for {:,d} " 76 | "iterations.".format(algorithm, *unaligned.shape, 77 | num_iter)) 78 | result = getattr(solvers, algorithm)( 79 | operator, 80 | original=operator.asarray(original, dtype='complex64'), 81 | unaligned=operator.asarray(unaligned, dtype='complex64'), 82 | num_iter=num_iter, 83 | **kwargs, 84 | ) 85 | return {k: operator.asnumpy(v) for k, v in result.items()} 86 | else: 87 | raise ValueError( 88 | "The '{}' algorithm is not an available.".format(algorithm)) 89 | -------------------------------------------------------------------------------- /src/tike/align/solvers/__init__.py: -------------------------------------------------------------------------------- 1 | """Contains different solver implementations.""" 2 | 3 | from .cross_correlation import cross_correlation 4 | from .farneback import farneback 5 | 6 | __all__ = [ 7 | "cross_correlation", 8 | "farneback", 9 | ] 10 | -------------------------------------------------------------------------------- /src/tike/align/solvers/farneback.py: -------------------------------------------------------------------------------- 1 | """Implements a 2D alignmnent algorithm by Gunnar Farneback.""" 2 | 3 | import numpy as np 4 | from cv2 import calcOpticalFlowFarneback 5 | 6 | 7 | def _rescale_8bit(a, b, hi=None, lo=None): 8 | """Return a, b rescaled into the same 8-bit range. 9 | 10 | The images are rescaled into the range [lo, hi] if provided; otherwise, the 11 | range is decided by clipping the histogram of all bins that are less than 12 | 0.5 percent of the fullest bin. 13 | 14 | """ 15 | 16 | if hi is None or lo is None: 17 | h, e = np.histogram(b, 1000) 18 | stend = np.where(h > np.max(h) * 0.005) 19 | st = stend[0][0] 20 | end = stend[0][-1] 21 | lo = e[st] 22 | hi = e[end + 1] 23 | 24 | # Force all values into range [0, 255] 25 | a = (255 * (a - lo) / (hi - lo)) 26 | b = (255 * (b - lo) / (hi - lo)) 27 | a[a < 0] = 0 28 | a[a > 255] = 255 29 | b[b < 0] = 0 30 | b[b > 255] = 255 31 | assert np.all(a >= 0), np.all(b >= 0) 32 | assert np.all(a <= 255), np.all(b <= 255) 33 | return a, b 34 | 35 | 36 | def farneback( 37 | op, 38 | original, 39 | unaligned, 40 | pyr_scale=0.5, 41 | levels=5, 42 | winsize=19, 43 | num_iter=16, 44 | poly_n=5, 45 | poly_sigma=1.1, 46 | flow=None, 47 | hi=None, 48 | lo=None, 49 | **kwargs, 50 | ): 51 | """Find the flow from unaligned to original using Farneback's algorithm 52 | 53 | For parameter descriptions see 54 | https://docs.opencv.org/4.3.0/dc/d6b/group__video__track.html 55 | 56 | Parameters 57 | ---------- 58 | original, unaligned (L, M, N) 59 | The images to be aligned. 60 | flow : (L, M, N, 2) float32 61 | The inital guess for the displacement field. 62 | 63 | References 64 | ---------- 65 | Farneback, Gunnar "Two-Frame Motion Estimation Based on Polynomial 66 | Expansion" 2003. 67 | """ 68 | shape = original.shape 69 | assert original.dtype == 'float32', original.dtype 70 | assert unaligned.dtype == 'float32', unaligned.dtype 71 | 72 | if flow is None: 73 | flow = np.zeros((*shape, 2), dtype='float32') 74 | else: 75 | flow = flow[..., ::-1].copy() 76 | 77 | # NOTE: Passing a reshaped view as any of the parameters breaks OpenCV's 78 | # Farneback implementation. 79 | for i in range(len(original)): 80 | flow[i] = calcOpticalFlowFarneback( 81 | *_rescale_8bit( 82 | original[i], 83 | unaligned[i], 84 | hi=hi[i] if hi is not None else None, 85 | lo=lo[i] if lo is not None else None, 86 | ), 87 | flow=flow[i], 88 | pyr_scale=pyr_scale, 89 | levels=levels, 90 | winsize=winsize, 91 | iterations=num_iter, 92 | poly_n=poly_n, 93 | poly_sigma=poly_sigma, 94 | flags=4, 95 | ) 96 | return {'flow': flow[..., ::-1], 'cost': -1} 97 | -------------------------------------------------------------------------------- /src/tike/communicators/__init__.py: -------------------------------------------------------------------------------- 1 | """Module for communicators using threadpool and MPI. 2 | 3 | This module implements both the p2p and collective communications 4 | among multiple GPUs and multiple nodes. 5 | 6 | """ 7 | 8 | from .comm import * 9 | from .mpi import * 10 | from .pool import * 11 | from .stream import * 12 | -------------------------------------------------------------------------------- /src/tike/constants.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # ######################################################################### 5 | # Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. # 6 | # # 7 | # Copyright 2018. UChicago Argonne, LLC. This software was produced # 8 | # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # 9 | # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # 10 | # U.S. Department of Energy. The U.S. Government has rights to use, # 11 | # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # 12 | # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # 13 | # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # 14 | # modified to produce derivative works, such modified software should # 15 | # be clearly marked, so as not to confuse it with the version available # 16 | # from ANL. # 17 | # # 18 | # Additionally, redistribution and use in source and binary forms, with # 19 | # or without modification, are permitted provided that the following # 20 | # conditions are met: # 21 | # # 22 | # * Redistributions of source code must retain the above copyright # 23 | # notice, this list of conditions and the following disclaimer. # 24 | # # 25 | # * Redistributions in binary form must reproduce the above copyright # 26 | # notice, this list of conditions and the following disclaimer in # 27 | # the documentation and/or other materials provided with the # 28 | # distribution. # 29 | # # 30 | # * Neither the name of UChicago Argonne, LLC, Argonne National # 31 | # Laboratory, ANL, the U.S. Government, nor the names of its # 32 | # contributors may be used to endorse or promote products derived # 33 | # from this software without specific prior written permission. # 34 | # # 35 | # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # 36 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # 37 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # 38 | # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # 39 | # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # 40 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # 41 | # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # 42 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # 43 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # 44 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # 45 | # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # 46 | # POSSIBILITY OF SUCH DAMAGE. # 47 | # ######################################################################### 48 | """Define universal constants and physical relation functions.""" 49 | 50 | __author__ = "Doga Gursoy, Daniel Ching" 51 | __copyright__ = "Copyright (c) 2018, UChicago Argonne, LLC." 52 | __docformat__ = "restructuredtext en" 53 | __all__ = [ 54 | "PLANCK_CONSTANT", 55 | "SPEED_OF_LIGHT", 56 | "wavelength", 57 | "wavenumber", 58 | "complex_amplitude", 59 | "complex_intensity", 60 | "complex_phase", 61 | "sum_square_norm", 62 | ] 63 | 64 | import numpy as np 65 | 66 | PLANCK_CONSTANT = 6.58211928e-19 # [keV*s] 67 | SPEED_OF_LIGHT = 299792458e+2 # [cm/s] 68 | 69 | 70 | def wavelength(energy): 71 | """Return the wavelength [cm] for a given energy [keV].""" 72 | return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy 73 | 74 | 75 | def wavenumber(energy): 76 | """Return the wavenumber [1/cm] given energy [keV].""" 77 | return energy / PLANCK_CONSTANT / SPEED_OF_LIGHT 78 | 79 | 80 | def complex_amplitude(probe_grid): 81 | """Amplitude of the complex probe wave.""" 82 | return np.abs(probe_grid) 83 | 84 | 85 | def complex_intensity(probe_grid): 86 | """Intensity of the complex wave.""" 87 | return np.square(np.abs(probe_grid)) 88 | 89 | 90 | def complex_phase(probe_grid): 91 | """Phase of the complex probe wave.""" 92 | return np.angle(probe_grid) 93 | 94 | 95 | def sum_square_norm(x, N=1): 96 | """Return x normalized such that the sum of squares of x equals N.""" 97 | x1 = np.sqrt(np.square(x) / np.sum(np.square(x)) * N) 98 | np.testing.assert_almost_equal(np.sum(np.square(np.abs(x1))), N, decimal=3) 99 | return x1 100 | -------------------------------------------------------------------------------- /src/tike/lamino/__init__.py: -------------------------------------------------------------------------------- 1 | """Provide laminography solvers and tooling.""" 2 | from .lamino import * 3 | -------------------------------------------------------------------------------- /src/tike/lamino/solvers/__init__.py: -------------------------------------------------------------------------------- 1 | """Contains different solver implementations.""" 2 | 3 | from .cgrad import cgrad 4 | from .bucket import bucket 5 | 6 | __all__ = [ 7 | "cgrad", 8 | "bucket", 9 | ] 10 | -------------------------------------------------------------------------------- /src/tike/lamino/solvers/bucket.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import tike.linalg 4 | from tike.opt import conjugate_gradient 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def _estimate_step_length(obj, fwd_data, theta, grid, op, comm, s): 10 | """Use norm of forward adjoint operations to estimate step length. 11 | 12 | Scaling the adjoint operation by |F*Fm| / |m| puts the step length in the 13 | proper order of magnitude. 14 | 15 | """ 16 | logger.debug('Estimate step length from forward adjoint operations.') 17 | 18 | def reduce_norm(data, workers): 19 | 20 | def f(data): 21 | return tike.linalg.norm(data)**2 22 | 23 | sqr = comm.pool.map(f, data, workers=workers) 24 | sqr_sum = comm.Allreduce_reduce_cpu(sqr).item() 25 | return sqr_sum**0.5 26 | 27 | outnback = comm.pool.map( 28 | op.adj, 29 | fwd_data, 30 | theta, 31 | grid, 32 | overwrite=False, 33 | ) 34 | comm.pool.reduce_gpu(outnback, stride=s) 35 | workers = comm.pool.workers[:s] 36 | objn = reduce_norm(obj, workers) 37 | # Multiply by 2 to because we prefer over-estimating the step 38 | return 2 * reduce_norm(outnback, workers) / objn if objn != 0.0 else 1.0 39 | 40 | 41 | def bucket( 42 | op, 43 | comm, 44 | data, theta, obj, grid, 45 | obj_split=1, 46 | cg_iter=4, 47 | step_length=1, 48 | **kwargs 49 | ): # yapf: disable 50 | """Solve the Laminogarphy problem using the conjugate gradients method.""" 51 | 52 | def fwd_op(u): 53 | fwd_data = comm.pool.map(op.fwd, u, theta, grid) 54 | return comm.Allreduce(fwd_data, obj_split) 55 | 56 | fwd_data = fwd_op(obj) 57 | if step_length == 1: 58 | step_length = _estimate_step_length( 59 | obj, 60 | fwd_data, 61 | theta, 62 | grid, 63 | op=op, 64 | comm=comm, 65 | s=obj_split, 66 | ) 67 | else: 68 | step_length = step_length 69 | 70 | obj, cost = update_obj( 71 | op, 72 | comm, 73 | data, 74 | theta, 75 | obj, 76 | grid, 77 | obj_split, 78 | fwd_op=fwd_op, 79 | num_iter=cg_iter, 80 | step_length=step_length, 81 | ) 82 | 83 | return {'obj': obj, 'cost': cost, 'step_length': step_length} 84 | 85 | 86 | def update_obj( 87 | op, 88 | comm, 89 | data, 90 | theta, 91 | obj, 92 | grid, 93 | obj_split, 94 | fwd_op, 95 | num_iter=1, 96 | step_length=1, 97 | ): 98 | """Solver the object recovery problem.""" 99 | 100 | def cost_function(obj): 101 | fwd_data = fwd_op(obj) 102 | workers = comm.pool.workers[::obj_split] 103 | cost_out = comm.pool.map( 104 | op.cost, 105 | data[::obj_split], 106 | fwd_data[::obj_split], 107 | workers=workers, 108 | ) 109 | return comm.Allreduce_reduce_cpu(cost_out).item() 110 | 111 | def grad(obj): 112 | fwd_data = fwd_op(obj) 113 | grad_list = comm.pool.map(op.grad, data, theta, fwd_data, grid) 114 | return comm.pool.reduce_gpu(grad_list, stride=obj_split) 115 | 116 | def direction_dy(xp, grad1, grad0=None, dir_=None): 117 | """Return the Dai-Yuan search direction.""" 118 | 119 | def init(grad1): 120 | return -grad1 121 | 122 | def f(grad1): 123 | return xp.linalg.norm(grad1.ravel())**2 124 | 125 | def d(grad0, grad1, dir_, norm_): 126 | return ( 127 | - grad1 128 | + dir_ * norm_ 129 | / (xp.sum(dir_.conj() * (grad1 - grad0)) + 1e-32) 130 | ) # yapf: disable 131 | 132 | workers = comm.pool.workers[:obj_split] 133 | 134 | if dir_ is None: 135 | return comm.pool.map(init, grad1, workers=workers) 136 | 137 | n = comm.pool.map(f, grad1, workers=workers) 138 | norm_ = comm.Allreduce_reduce_cpu(n).item() 139 | return comm.pool.map( 140 | d, 141 | grad0, 142 | grad1, 143 | dir_, 144 | norm_=norm_, 145 | workers=workers, 146 | ) 147 | 148 | def dir_multi(dir): 149 | """Scatter dir to all GPUs""" 150 | return comm.pool.bcast(dir, obj_split) 151 | 152 | def update_multi(x, gamma, dir): 153 | 154 | def f(x, dir): 155 | return x + gamma * dir 156 | 157 | return comm.pool.map(f, x, dir) 158 | 159 | obj, cost = conjugate_gradient( 160 | op.xp, 161 | x=obj, 162 | cost_function=cost_function, 163 | grad=grad, 164 | direction_dy=direction_dy, 165 | dir_multi=dir_multi, 166 | update_multi=update_multi, 167 | num_iter=num_iter, 168 | step_length=step_length, 169 | ) 170 | 171 | logger.info('%10s cost is %+12.5e', 'object', cost) 172 | return obj, cost 173 | -------------------------------------------------------------------------------- /src/tike/lamino/solvers/cgrad.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import tike.linalg 4 | from tike.opt import conjugate_gradient, line_search 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def _estimate_step_length(obj, theta, op): 10 | """Use norm of forward adjoint operations to estimate step length. 11 | 12 | Scaling the adjoint operation by |F*Fm| / |m| puts the step length in the 13 | proper order of magnitude. 14 | 15 | """ 16 | logger.debug('Estimate step length from forward adjoint operations.') 17 | outnback = op.adj( 18 | data=op.fwd(u=obj, theta=theta), 19 | theta=theta, 20 | overwrite=False, 21 | ) 22 | scaler = tike.linalg.norm(outnback) / tike.linalg.norm(obj) 23 | # Multiply by 2 to because we prefer over-estimating the step 24 | return 2 * scaler if op.xp.isfinite(scaler) else 1.0 25 | 26 | 27 | def cgrad( 28 | op, 29 | comm, 30 | data, theta, obj, 31 | cg_iter=4, 32 | step_length=1, 33 | **kwargs 34 | ): # yapf: disable 35 | """Solve the Laminogarphy problem using the conjugate gradients method.""" 36 | 37 | step_length = comm.pool.reduce_cpu( 38 | comm.pool.map( 39 | _estimate_step_length, 40 | obj, 41 | theta, 42 | op=op, 43 | )) / comm.pool.num_workers if step_length == 1 else step_length 44 | 45 | obj, cost = update_obj( 46 | op, 47 | comm, 48 | data, 49 | theta, 50 | obj, 51 | num_iter=cg_iter, 52 | step_length=step_length, 53 | ) 54 | 55 | return {'obj': obj, 'cost': cost, 'step_length': step_length} 56 | 57 | 58 | def update_obj(op, comm, data, theta, obj, num_iter=1, step_length=1): 59 | """Solver the object recovery problem.""" 60 | 61 | def cost_function(obj): 62 | cost_out = comm.pool.map(op.cost, data, theta, obj) 63 | return comm.Allreduce_reduce_cpu(cost_out) 64 | 65 | def grad(obj): 66 | grad_list = comm.pool.map(op.grad, data, theta, obj) 67 | return comm.Allreduce_reduce_gpu(grad_list) 68 | 69 | def dir_multi(dir): 70 | """Scatter dir to all GPUs""" 71 | return comm.pool.bcast(dir) 72 | 73 | def update_multi(x, gamma, dir): 74 | 75 | def f(x, dir): 76 | return x + gamma * dir 77 | 78 | return comm.pool.map(f, x, dir) 79 | 80 | obj, cost = conjugate_gradient( 81 | op.xp, 82 | x=obj, 83 | cost_function=cost_function, 84 | grad=grad, 85 | dir_multi=dir_multi, 86 | update_multi=update_multi, 87 | num_iter=num_iter, 88 | step_length=step_length, 89 | ) 90 | 91 | logger.info('%10s cost is %+12.5e', 'object', cost) 92 | return obj, cost 93 | -------------------------------------------------------------------------------- /src/tike/linalg.py: -------------------------------------------------------------------------------- 1 | """Linear algebra routines with broadcasting and complex value support. 2 | 3 | This module exists because support for broadcasting and complex values is 4 | spotty in the NumPy and CuPy libraries. 5 | """ 6 | import typing 7 | 8 | import numpy as np 9 | import numpy.typing as npt 10 | 11 | 12 | def mnorm(x, axis=None, keepdims=False): 13 | """Return the vector 2-norm of x but replace sum with mean.""" 14 | return np.sqrt(np.mean((x * x.conj()).real, axis=axis, keepdims=keepdims)) 15 | 16 | 17 | def norm(x, axis=None, keepdims=False): 18 | """Return the vector 2-norm of x along given axis.""" 19 | return np.sqrt(np.sum((x * x.conj()).real, axis=axis, keepdims=keepdims)) 20 | 21 | 22 | def projection(a, b, axis=None): 23 | """Return complex vector projection of a onto b for along given axis.""" 24 | bh = b / inner(b, b, axis=axis, keepdims=True) 25 | return inner(a, b, axis=axis, keepdims=True) * bh 26 | 27 | 28 | def inner(x, y, axis=None, keepdims=False): 29 | """Return the complex inner product; the order of the operands matters.""" 30 | return (x * y.conj()).sum(axis=axis, keepdims=keepdims) 31 | 32 | 33 | def lstsq(a, b, weights=None): 34 | """Return the least-squares solution for a @ x = b. 35 | 36 | This implementation, unlike cp.linalg.lstsq, allows a stack of matricies to 37 | be processed simultaneously. The input sizes of the matricies are as 38 | follows: 39 | a (..., M, N) 40 | b (..., M, K) 41 | x (..., N, K) 42 | 43 | Optionally include weights (..., M) for weighted-least-squares if the 44 | errors are uncorrelated. 45 | 46 | ...seealso:: https://github.com/numpy/numpy/issues/8720 47 | https://github.com/cupy/cupy/issues/3062 48 | """ 49 | # TODO: Using 'out' parameter of cp.matmul() may reduce memory footprint 50 | assert a.shape[:-1] == b.shape[:-1], (f"Leading dims of a {a.shape}" 51 | f"and b {b.shape} must be same!") 52 | if weights is not None: 53 | assert weights.shape == a.shape[:-1] 54 | a = a * np.sqrt(weights[..., None]) 55 | b = b * np.sqrt(weights[..., None]) 56 | aT = hermitian(a) 57 | x = np.linalg.inv(aT @ a) @ aT @ b 58 | return x 59 | 60 | 61 | def orthogonalize_gs( 62 | x: npt.NDArray, 63 | axis: typing.Union[int, typing.Tuple[int, ...]] = -1, 64 | N: typing.Union[int, None] = None, 65 | ): 66 | """Gram-schmidt orthogonalization for complex arrays. 67 | 68 | Parameters 69 | ---------- 70 | x: (..., D) array 71 | containing dimensions to be orthogonalized. 72 | axis: 73 | The axis/axes to be orthogonalized. By default only the last axis is 74 | orthogonalized. If axis is a tuple, then the number of orthogonal 75 | vectors is the length of the last dimension not included in axis. The 76 | other dimensions are broadcast. 77 | N: 78 | The axis along which to orthogonalize. Other dimensions are broadcast. 79 | 80 | """ 81 | # Find N, the last dimension not included in axis; we iterate over N 82 | # vectors in the Gram-schmidt algorithm. Dimensions that are not N or 83 | # included in axis are leading dimensions for broadcasting. 84 | try: 85 | axis = tuple(a % x.ndim for a in axis) 86 | except TypeError: 87 | axis = (axis % x.ndim,) 88 | if N is None: 89 | N = x.ndim - 1 90 | while N in axis: 91 | N -= 1 92 | N = N % x.ndim 93 | if N in axis: 94 | raise ValueError("Cannot orthogonalize a single vector.") 95 | # Move axis N to the front for convenience 96 | x = np.moveaxis(x, N, 0) 97 | u = x.copy() 98 | for i in range(1, len(x)): 99 | u[i:] -= projection(x[i:], u[i - 1:i], axis=axis) 100 | return np.moveaxis(u, 0, N) 101 | 102 | 103 | def hermitian(x): 104 | """Compute the conjugate transpose of x along last two dimensions.""" 105 | return x.conj().swapaxes(-1, -2) 106 | 107 | 108 | def cov(x): 109 | """Compute the covariance of x with observations along axis -2.""" 110 | x0 = x - np.mean(x, axis=-2, keepdims=True) 111 | return hermitian(x0) @ x0 112 | 113 | 114 | def pca_eig(data: npt.NDArray, k: int) -> typing.Tuple[npt.NDArray, npt.NDArray]: 115 | """Return k principal components via Eigen decomposition. 116 | 117 | Parameters 118 | ---------- 119 | data: (..., N, D) array 120 | N observations of a D dimensional space. 121 | k: 122 | The number of principle components. 123 | 124 | Returns 125 | ------- 126 | S: (..., k) array 127 | The singular values corresponding to the current principal 128 | components sorted largest to smallest. 129 | U: (..., D, k) array 130 | The current best principal components of the population. 131 | """ 132 | S, U = np.linalg.eigh(cov(data)) 133 | # eigh() API states that values returned in acending order. i.e. 134 | # the best vectors are last. 135 | U = U[..., -1:-(k + 1):-1] 136 | S = S[..., -1:-(k + 1):-1] 137 | return S, U 138 | -------------------------------------------------------------------------------- /src/tike/operators/__init__.py: -------------------------------------------------------------------------------- 1 | """Defines implementations for all operators. 2 | 3 | All of the solvers, rely on operators including forward and adjoint operators. 4 | In tike, forward and adjoint operators are paired as fwd and adj methods of an 5 | Operator. 6 | 7 | In this way, multiple solvers (e.g. ePIE, gradient descent, SIRT) implemented 8 | in Python can share the same core operators and can be upgraded to better 9 | operators in the future. 10 | 11 | All operator methods accept the array type that matches the output of their 12 | asarray() method. 13 | """ 14 | 15 | from .cupy import * 16 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/__init__.py: -------------------------------------------------------------------------------- 1 | """Module for operators utilizing the CuPy library. 2 | 3 | This module implements the forward and adjoint operators using CuPy. This 4 | removes the need for interface layers like pybind11 or SWIG because kernel 5 | launches and memory management may by accessed from Python. 6 | """ 7 | 8 | from .alignment import * 9 | from .bucket import * 10 | from .cache import * 11 | from .convolution import * 12 | from .flow import * 13 | from .lamino import * 14 | from .operator import * 15 | from .objective import * 16 | from .pad import * 17 | from .patch import * 18 | from .propagation import * 19 | from .fresnelspectprop import * 20 | from .multislice import * 21 | from .ptycho import * 22 | from .rotate import * 23 | from .shift import * 24 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/alignment.py: -------------------------------------------------------------------------------- 1 | """Defines an alignment operator.""" 2 | 3 | __author__ = "Daniel Ching" 4 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 5 | 6 | import numpy as np 7 | 8 | from .flow import Flow 9 | from .operator import Operator 10 | from .pad import Pad 11 | from .rotate import Rotate 12 | from .shift import Shift 13 | 14 | 15 | class Alignment(Operator): 16 | """An alignment operator composed of pad, flow, and rotate operations. 17 | 18 | The operations are applied in the aforementioned order. 19 | 20 | Please see the help for the Pad, Flow, and Rotate operations for 21 | description of arguments. 22 | """ 23 | 24 | def __init__(self): 25 | """Please see help(Alignment) for more info.""" 26 | self.flow = Flow() 27 | self.pad = Pad() 28 | self.rotate = Rotate() 29 | self.shift = Shift() 30 | 31 | def __enter__(self): 32 | self.flow.__enter__() 33 | self.pad.__enter__() 34 | self.rotate.__enter__() 35 | self.shift.__enter__() 36 | return self 37 | 38 | def __exit__(self, type, value, traceback): 39 | self.flow.__exit__(type, value, traceback) 40 | self.pad.__exit__(type, value, traceback) 41 | self.rotate.__exit__(type, value, traceback) 42 | self.shift.__exit__(type, value, traceback) 43 | 44 | def fwd( 45 | self, 46 | unpadded, 47 | shift, 48 | flow, 49 | padded_shape, 50 | angle, 51 | unpadded_shape=None, 52 | cval=0.0, 53 | ): 54 | return self.rotate.fwd( 55 | unrotated=self.flow.fwd( 56 | f=self.shift.fwd( 57 | a=self.pad.fwd( 58 | unpadded=unpadded, 59 | padded_shape=padded_shape, 60 | cval=cval, 61 | ), 62 | shift=shift, 63 | cval=cval, 64 | ), 65 | flow=flow, 66 | cval=cval, 67 | ), 68 | angle=angle, 69 | cval=cval, 70 | ) 71 | 72 | def adj( 73 | self, 74 | rotated, 75 | flow, 76 | shift, 77 | unpadded_shape, 78 | angle, 79 | padded_shape=None, 80 | cval=0.0, 81 | ): 82 | return self.pad.adj( 83 | padded=self.shift.adj( 84 | a=self.flow.adj( 85 | g=self.rotate.adj( 86 | rotated=rotated, 87 | angle=angle, 88 | cval=cval, 89 | ), 90 | flow=flow, 91 | cval=cval, 92 | ), 93 | shift=shift, 94 | cval=cval, 95 | ), 96 | unpadded_shape=unpadded_shape, 97 | cval=cval, 98 | ) 99 | 100 | def inv( 101 | self, 102 | rotated, 103 | flow, 104 | shift, 105 | unpadded_shape, 106 | angle, 107 | padded_shape=None, 108 | cval=0.0, 109 | ): 110 | return self.pad.adj( 111 | padded=self.shift.adj( 112 | a=self.flow.fwd( 113 | f=self.rotate.fwd( 114 | unrotated=rotated, 115 | angle=angle if angle is None else -angle, 116 | cval=cval, 117 | ), 118 | flow=flow if flow is None else -flow, 119 | cval=cval, 120 | ), 121 | shift=shift, 122 | cval=cval, 123 | ), 124 | unpadded_shape=unpadded_shape, 125 | cval=cval, 126 | ) 127 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/cache.py: -------------------------------------------------------------------------------- 1 | __author__ = "Daniel Ching" 2 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 3 | 4 | import typing 5 | 6 | from cupyx.scipy.fft import fftn, ifftn, get_fft_plan 7 | import cupy.cuda.cufft 8 | import cupy.cuda.runtime 9 | import numpy.typing as npt 10 | import numpy as np 11 | 12 | 13 | class CachedFFT(): 14 | """Provides a multi-plan per-device cache for CuPy FFT. 15 | 16 | A class which inherits from this class gains the _fft2, _fftn, and _ifft2 17 | methods which provide automatic plan caching for the CuPy FFTs. 18 | 19 | This plan cache differs from the cache included in CuPy>=8 because it is 20 | NOT per-thread. This allows us to use threadpool.map() and allows us to 21 | destroy the cache manually. 22 | """ 23 | 24 | def __enter__(self): 25 | self.plan_cache = {} 26 | return self 27 | 28 | def __exit__(self, type, value, traceback): 29 | self.plan_cache.clear() 30 | del self.plan_cache 31 | 32 | def _get_fft_plan( 33 | self, 34 | a: npt.NDArray, 35 | axes: typing.Tuple[int, ...] = (), 36 | **kwargs, 37 | ) -> typing.Union[cupy.cuda.cufft.Plan1d, cupy.cuda.cufft.PlanNd]: 38 | """Cache multiple FFT plans at the same time.""" 39 | axes = tuple(range(a.ndim)) if axes == () else axes 40 | key = (*a.shape, *axes, a.dtype, cupy.cuda.runtime.getDevice()) 41 | if key in self.plan_cache: 42 | plan = self.plan_cache[key] 43 | else: 44 | plan = get_fft_plan(a, axes=axes) 45 | self.plan_cache[key] = plan 46 | return plan 47 | 48 | def _fft2( 49 | self, 50 | a: npt.NDArray, 51 | *args, 52 | axes: typing.Tuple[int, int] = (-2, -1), 53 | **kwargs, 54 | ) -> npt.NDArray[np.csingle]: 55 | return self._fftn(a, *args, axes=axes, **kwargs) 56 | 57 | def _ifft2( 58 | self, 59 | a: npt.NDArray, 60 | *args, 61 | axes: typing.Tuple[int, int] = (-2, -1), 62 | **kwargs, 63 | ) -> npt.NDArray[np.csingle]: 64 | return self._ifftn(a, *args, axes=axes, **kwargs) 65 | 66 | def _ifftn( 67 | self, 68 | a: npt.NDArray, 69 | *args, 70 | **kwargs, 71 | ) -> npt.NDArray[np.csingle]: 72 | with self._get_fft_plan(a, **kwargs): 73 | return ifftn(a, *args, **kwargs) 74 | 75 | def _fftn( 76 | self, 77 | a: npt.NDArray, 78 | *args, 79 | **kwargs, 80 | ) -> npt.NDArray[np.csingle]: 81 | with self._get_fft_plan(a, **kwargs): 82 | return fftn(a, *args, **kwargs) 83 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/grid.cu: -------------------------------------------------------------------------------- 1 | 2 | // Compute the planar USFFT frequencies for laminography. 3 | // 4 | // The frequencies are in the range [-0.5, 0.5) and planes at sampled to an (N, 5 | // N) grid. The tilt angle is the same for all planes, but the rotation angle 6 | // is unique for each plane. Radians for all angles. The shape of frequency 7 | // should be (R * N * N, 3) where R is the number of rotations. The shape of 8 | // rotation is (R, ). 9 | 10 | // Each thread gets one frequency. 11 | // grid shape (-(-N // max_threads), N, R) 12 | // block shape (min(N, max_threads), 0, 0) 13 | template 14 | __global__ void 15 | make_grids(frequencyType* frequency, const rotationType* rotation, int R, int N, 16 | float tilt) { 17 | frequencyType ctilt = cosf(tilt); 18 | frequencyType stilt = sinf(tilt); 19 | 20 | for (int p = blockIdx.z; p < R; p += gridDim.z) { 21 | frequencyType ctheta = cosf(rotation[p]); 22 | frequencyType stheta = sinf(rotation[p]); 23 | // NOTE: Use pointer arithmetic to avoid indexing overflows without using 24 | // size_t. 25 | frequencyType* plane = 3 * N * N * p + frequency; 26 | 27 | for (int y = blockIdx.y; y < N; y += gridDim.y) { 28 | frequencyType kv = (frequencyType)(y - N / 2) / N; 29 | frequencyType* height = 3 * N * y + plane; 30 | 31 | // clang-format off 32 | for ( 33 | int x = threadIdx.x + blockDim.x * blockIdx.x; 34 | x < N; 35 | x += blockDim.x * gridDim.x 36 | ) { 37 | // clang-format on 38 | frequencyType ku = (frequencyType)(x - N / 2) / N; 39 | frequencyType* f = 3 * x + height; 40 | 41 | f[0] = +kv * stilt; 42 | f[1] = -ku * stheta + kv * ctheta * ctilt; 43 | f[2] = +ku * ctheta + kv * stheta * ctilt; 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/objective.py: -------------------------------------------------------------------------------- 1 | """Implement cost functions and gradients.""" 2 | 3 | import cupy as cp 4 | 5 | # NOTE: We use mean instead of sum so that cost functions may be compared 6 | # when mini-batches of different sizes are used. 7 | 8 | # Gaussian Model 9 | 10 | 11 | @cp.fuse() 12 | def _gaussian_fuse(data, intensity): 13 | diff = cp.sqrt(intensity) - cp.sqrt(data) 14 | diff *= cp.conj(diff) 15 | return diff 16 | 17 | 18 | def gaussian(data, intensity) -> float: 19 | """The Gaussian model objective function. 20 | 21 | Parameters 22 | ---------- 23 | data : (N, M, M) 24 | The measured diffraction data 25 | intensity : (N, M, M) 26 | The modeled intensity 27 | """ 28 | return cp.mean(_gaussian_fuse(data, intensity)) 29 | 30 | 31 | def gaussian_grad(data, farplane, intensity) -> cp.ndarray: 32 | """The gradient of the Gaussian model objective function 33 | 34 | Parameters 35 | ---------- 36 | data : (N, M, M) 37 | The measured diffraction data 38 | intensity : (N, M, M) 39 | The modeled intensity 40 | farplane : (N, K, L, M, M) 41 | """ 42 | return farplane * (1 - cp.sqrt(data) / 43 | (cp.sqrt(intensity) + 1e-9))[..., cp.newaxis, 44 | cp.newaxis, :, :] 45 | 46 | 47 | def gaussian_each_pattern(data, intensity) -> cp.ndarray: 48 | """The Gaussian model objective function per diffraction pattern. 49 | 50 | Parameters 51 | ---------- 52 | data : (N, M, M) 53 | The measured diffraction data 54 | intensity : (N, M, M) 55 | The modeled intensity 56 | 57 | Returns 58 | ------- 59 | costs : (N, ) 60 | The objective function for each pattern. 61 | """ 62 | return cp.mean( 63 | _gaussian_fuse(data, intensity), 64 | axis=(-2, -1), 65 | keepdims=False, 66 | ) 67 | 68 | 69 | # Poisson Model 70 | 71 | 72 | @cp.fuse() 73 | def _poisson_fuse(data, intensity): 74 | return intensity - data * cp.log(intensity + 1e-9) 75 | 76 | 77 | def poisson(data, intensity) -> float: 78 | """The Poisson model objective function. 79 | 80 | Parameters 81 | ---------- 82 | data : (N, M, M) 83 | The measured diffraction data 84 | intensity : (N, M, M) 85 | The modeled intensity 86 | """ 87 | return cp.mean(_poisson_fuse(data, intensity)) 88 | 89 | 90 | def poisson_grad(data, farplane, intensity) -> cp.ndarray: 91 | """The gradient of the Poisson model objective function. 92 | 93 | Parameters 94 | ---------- 95 | data : (N, M, M) 96 | The measured diffraction data 97 | intensity : (N, M, M) 98 | The modeled intensity 99 | farplane : (N, K, L, M, M) 100 | """ 101 | return farplane * (1 - data / 102 | (intensity + 1e-9))[..., cp.newaxis, cp.newaxis, :, :] 103 | 104 | 105 | def poisson_each_pattern(data, intensity) -> cp.ndarray: 106 | """The Poisson model objective function per diffraction pattern. 107 | 108 | Parameters 109 | ---------- 110 | data : (N, M, M) 111 | The measured diffraction data 112 | intensity : (N, M, M) 113 | The modeled intensity 114 | 115 | Returns 116 | ------- 117 | costs : (N, ) 118 | The objective function for each pattern. 119 | """ 120 | return cp.mean( 121 | _poisson_fuse(data, intensity), 122 | axis=(-2, -1), 123 | keepdims=False, 124 | ) 125 | 126 | 127 | def _mad(x, **kwargs): 128 | """Return the mean absolute deviation around the median.""" 129 | return cp.mean(cp.abs(x - cp.median(x, **kwargs)), **kwargs) 130 | 131 | 132 | def _gaussian_penalty_grad(x, x0, variance=1.0): 133 | delta = x - x0 134 | k = -2.0 / variance 135 | return k * delta * cp.exp(0.5 * k * delta * delta) 136 | 137 | 138 | def _l2_penalty_grad(x, x0, variance=1.0): 139 | delta = x - x0 140 | return 2 * delta / variance 141 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/operator.py: -------------------------------------------------------------------------------- 1 | __author__ = "Daniel Ching, Viktor Nikitin" 2 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 3 | 4 | from abc import ABC 5 | import logging 6 | 7 | import cupy 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class Operator(ABC): 13 | """A base class for Operators. 14 | 15 | An Operator is a context manager which provides the basic functions 16 | (forward and adjoint) required solve an inverse problem. 17 | 18 | Operators may be composed into other operators and inherited from to 19 | provide additional implementations to the ones provided in this library. 20 | 21 | """ 22 | xp = cupy 23 | """The module of the array type used by this operator i.e. NumPy, Cupy.""" 24 | 25 | @classmethod 26 | def asarray(cls, *args, device=None, **kwargs): 27 | logger.debug(f"asarray to device {device}") 28 | with cupy.cuda.Device(device): 29 | return cupy.asarray(*args, **kwargs) 30 | 31 | @classmethod 32 | def asnumpy(cls, *args, **kwargs): 33 | return cupy.asnumpy(*args, **kwargs) 34 | 35 | def __enter__(self): 36 | """Return self at start of a with-block.""" 37 | # Call the __enter__ methods for any composed operators. 38 | # Allocate special memory objects. 39 | return self 40 | 41 | def __exit__(self, type, value, traceback): 42 | """Gracefully handle interruptions or with-block exit. 43 | 44 | Tasks to be handled by this function include freeing memory or closing 45 | files. 46 | """ 47 | # Call the __exit__ methods of any composed classes. 48 | # Deallocate special memory objects. 49 | pass 50 | 51 | def fwd(self, **kwargs): 52 | """Perform the forward operator.""" 53 | raise NotImplementedError("The forward operator was not implemented!") 54 | 55 | def adj(self, **kwargs): 56 | """Perform the adjoint operator.""" 57 | raise NotImplementedError("The adjoint operator was not implemented!") 58 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/pad.py: -------------------------------------------------------------------------------- 1 | __author__ = "Daniel Ching" 2 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 3 | __docformat__ = 'restructuredtext en' 4 | 5 | import numpy as np 6 | 7 | from .flow import _remap_lanczos 8 | from .operator import Operator 9 | 10 | 11 | class Pad(Operator): 12 | """Pad a stack of 2D images to the same shape but with unique pad_widths. 13 | 14 | By default, no padding is applied and/or the padding is applied 15 | symmetrically. 16 | """ 17 | 18 | def fwd(self, unpadded, corner=None, padded_shape=None, cval=0.0, **kwargs): 19 | """Pad the unpadded images with cval. 20 | 21 | Parameters 22 | ---------- 23 | corner : (N, 2) 24 | The min corner of the images in the padded array. 25 | padded_shape : 3-tuple 26 | The desired shape after padding. First element should be N. 27 | unpadded_shape : 3-tuple 28 | See padded_shape. 29 | cval : complex64 30 | The value to use for padding. 31 | """ 32 | if padded_shape is None: 33 | padded_shape = unpadded.shape 34 | if corner is None: 35 | corner = self.xp.tile( 36 | (((padded_shape[-2] - unpadded.shape[-2]) // 2, 37 | (padded_shape[-1] - unpadded.shape[-1]) // 2)), 38 | (padded_shape[0], 1), 39 | ) 40 | 41 | padded = self.xp.empty(shape=padded_shape, dtype=unpadded.dtype) 42 | padded[:] = cval 43 | for i in range(padded.shape[0]): 44 | lo0, hi0 = corner[i, 0], corner[i, 0] + unpadded.shape[-2] 45 | lo1, hi1 = corner[i, 1], corner[i, 1] + unpadded.shape[-1] 46 | assert lo0 >= 0 and lo1 >= 0 47 | assert hi0 <= padded.shape[-2] and hi1 <= padded.shape[-1] 48 | padded[i][lo0:hi0, lo1:hi1] = unpadded[i] 49 | return padded 50 | 51 | def adj(self, padded, corner=None, unpadded_shape=None, **kwargs): 52 | """Strip the edges from the padded images. 53 | 54 | Parameters 55 | ---------- 56 | corner : (N, 2) 57 | The min corner of the images in the padded array. 58 | padded_shape : 3-tuple 59 | The desired shape after padding. First element should be N. 60 | unpadded_shape : 3-tuple 61 | See padded_shape. 62 | cval : complex64 63 | The value to use for padding. 64 | """ 65 | if unpadded_shape is None: 66 | unpadded_shape = padded.shape 67 | if corner is None: 68 | corner = self.xp.tile( 69 | (((padded.shape[-2] - unpadded_shape[-2]) // 2, 70 | (padded.shape[-1] - unpadded_shape[-1]) // 2)), 71 | (padded.shape[0], 1), 72 | ) 73 | 74 | unpadded = self.xp.empty(shape=unpadded_shape, dtype=padded.dtype) 75 | for i in range(padded.shape[0]): 76 | lo0, hi0 = corner[i, 0], corner[i, 0] + unpadded.shape[-2] 77 | lo1, hi1 = corner[i, 1], corner[i, 1] + unpadded.shape[-1] 78 | assert lo0 >= 0 and lo1 >= 0 79 | assert hi0 <= padded.shape[-2] and hi1 <= padded.shape[-1] 80 | unpadded[i] = padded[i][lo0:hi0, lo1:hi1] 81 | return unpadded 82 | 83 | inv = adj 84 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/propagation.py: -------------------------------------------------------------------------------- 1 | """Defines a free-space propagation operator based on the CuPy FFT module.""" 2 | 3 | __author__ = "Daniel Ching, Viktor Nikitin" 4 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 5 | 6 | import numpy.typing as npt 7 | import numpy as np 8 | 9 | from .cache import CachedFFT 10 | from .operator import Operator 11 | 12 | 13 | class Propagation(CachedFFT, Operator): 14 | """A Fourier-based free-space propagation using CuPy. 15 | 16 | Take an (..., N, N) array and apply the Fourier transform to the last two 17 | dimensions. 18 | 19 | Attributes 20 | ---------- 21 | detector_shape : int 22 | The pixel width and height of the nearplane and farplane waves. 23 | 24 | Parameters 25 | ---------- 26 | nearplane: (..., detector_shape, detector_shape) complex64 27 | The wavefronts after exiting the object. 28 | farplane: (..., detector_shape, detector_shape) complex64 29 | The wavefronts hitting the detector respectively. Shape for cost 30 | functions and gradients is (nscan, 1, 1, detector_shape, 31 | detector_shape). 32 | 33 | 34 | .. versionchanged:: 0.25.0 Removed the model parameter and the cost(), 35 | grad() functions. Use the cost and gradient functions directly instead. 36 | 37 | """ 38 | 39 | def __init__(self, detector_shape: int, norm: str = "ortho", **kwargs): 40 | self.detector_shape = detector_shape 41 | self.norm = norm 42 | 43 | def fwd( 44 | self, 45 | nearplane: npt.NDArray[np.csingle], 46 | overwrite: bool = False, 47 | **kwargs, 48 | ) -> npt.NDArray[np.csingle]: 49 | """Forward Fourier-based free-space propagation operator.""" 50 | self._check_shape(nearplane) 51 | shape = nearplane.shape 52 | return self._fft2( 53 | nearplane.reshape(-1, self.detector_shape, self.detector_shape), 54 | norm=self.norm, 55 | axes=(-2, -1), 56 | overwrite_x=overwrite, 57 | ).reshape(shape) 58 | 59 | def adj( 60 | self, 61 | farplane: npt.NDArray[np.csingle], 62 | overwrite: bool = False, 63 | **kwargs, 64 | ) -> npt.NDArray[np.csingle]: 65 | """Adjoint Fourier-based free-space propagation operator.""" 66 | self._check_shape(farplane) 67 | shape = farplane.shape 68 | return self._ifft2( 69 | farplane.reshape(-1, self.detector_shape, self.detector_shape), 70 | norm=self.norm, 71 | axes=(-2, -1), 72 | overwrite_x=overwrite, 73 | ).reshape(shape) 74 | 75 | def _check_shape(self, x: npt.NDArray) -> None: 76 | assert type(x) is self.xp.ndarray, type(x) 77 | shape = (-1, self.detector_shape, self.detector_shape) 78 | if __debug__ and x.shape[-2:] != shape[-2:]: 79 | raise ValueError(f"waves must have shape {shape} not {x.shape}.") 80 | 81 | 82 | class ZeroPropagation(Propagation): 83 | """A zero-distance propagation using CuPy. 84 | 85 | Take an (..., N, N) array and do nothing. 86 | 87 | Attributes 88 | ---------- 89 | detector_shape : int 90 | The pixel width and height of the nearplane and farplane waves. 91 | 92 | Parameters 93 | ---------- 94 | nearplane: (..., detector_shape, detector_shape) complex64 95 | The wavefronts after exiting the object. 96 | farplane: (..., detector_shape, detector_shape) complex64 97 | The wavefronts hitting the detector respectively. Shape for cost 98 | functions and gradients is (nscan, 1, 1, detector_shape, 99 | detector_shape). 100 | """ 101 | 102 | def fwd( 103 | self, 104 | nearplane: npt.NDArray[np.csingle], 105 | overwrite: bool = False, 106 | **kwargs, 107 | ) -> npt.NDArray[np.csingle]: 108 | """Forward Fourier-based free-space propagation operator.""" 109 | return nearplane 110 | 111 | def adj( 112 | self, 113 | farplane: npt.NDArray[np.csingle], 114 | overwrite: bool = False, 115 | **kwargs, 116 | ) -> npt.NDArray[np.csingle]: 117 | """Adjoint Fourier-based free-space propagation operator.""" 118 | return farplane 119 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/rotate.py: -------------------------------------------------------------------------------- 1 | __author__ = "Daniel Ching" 2 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 3 | __docformat__ = 'restructuredtext en' 4 | 5 | import numpy as np 6 | import tike.precision 7 | 8 | from .flow import _remap_lanczos 9 | from .operator import Operator 10 | 11 | 12 | class Rotate(Operator): 13 | """Rotate a stack of 2D images along last two dimensions. 14 | 15 | Parameters 16 | ---------- 17 | angle : float 18 | The desired rotation in radians. Operation skipped if angle is None. 19 | cval : complex64 20 | The value to use for filling regions that rotated from outside the 21 | original image. 22 | """ 23 | 24 | def _make_grid(self, unrotated, angle): 25 | """Return the points on the rotated grid.""" 26 | cos, sin = np.cos(angle), np.sin(angle) 27 | shifti = (unrotated.shape[-2] - 1) / 2.0 28 | shiftj = (unrotated.shape[-1] - 1) / 2.0 29 | 30 | i, j = self.xp.mgrid[0:unrotated.shape[-2], 31 | 0:unrotated.shape[-1]].astype( 32 | tike.precision.floating) 33 | 34 | i -= shifti 35 | j -= shiftj 36 | 37 | i1 = (+cos * i + sin * j) + shifti 38 | j1 = (-sin * i + cos * j) + shiftj 39 | 40 | return self.xp.stack([i1.ravel(), j1.ravel()], axis=-1) 41 | 42 | def fwd(self, unrotated, angle, cval=0.0): 43 | if angle is None: 44 | return unrotated 45 | f = unrotated 46 | g = self.xp.zeros_like(f) 47 | 48 | # Compute rotated coordinates 49 | coords = self._make_grid(f, angle) 50 | 51 | # Reshape into stack of 2D images 52 | shape = f.shape 53 | h, w = shape[-2:] 54 | f = f.reshape(-1, h, w) 55 | g = g.reshape(-1, h * w) 56 | 57 | for i in range(len(f)): 58 | _remap_lanczos(f[i], coords, 2, g[i], fwd=True, cval=cval) 59 | 60 | return g.reshape(shape) 61 | 62 | def adj(self, rotated, angle, cval=0.0): 63 | if angle is None: 64 | return rotated 65 | g = rotated 66 | f = self.xp.zeros_like(g) 67 | 68 | # Compute rotated coordinates 69 | coords = self._make_grid(f, angle) 70 | 71 | # Reshape into stack of 2D images 72 | shape = f.shape 73 | h, w = shape[-2:] 74 | f = f.reshape(-1, h, w) 75 | g = g.reshape(-1, h * w) 76 | 77 | for i in range(len(f)): 78 | _remap_lanczos(f[i], coords, 2, g[i], fwd=False, cval=cval) 79 | 80 | return f.reshape(shape) 81 | 82 | def inv(self, rotated, angle, cval=0.0): 83 | return self.fwd( 84 | rotated, 85 | angle if angle is None else -angle, 86 | cval, 87 | ) 88 | -------------------------------------------------------------------------------- /src/tike/operators/cupy/shift.py: -------------------------------------------------------------------------------- 1 | __author__ = "Daniel Ching, Viktor Nikitin" 2 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 3 | 4 | from .cache import CachedFFT 5 | from .operator import Operator 6 | 7 | 8 | class Shift(CachedFFT, Operator): 9 | """Shift last two dimensions of an array using Fourier method.""" 10 | 11 | def fwd(self, a, shift, overwrite=False, cval=None): 12 | """Apply shifts along last two dimensions of a. 13 | 14 | Parameters 15 | ---------- 16 | array (..., H, W) float32 17 | The array to be shifted. 18 | shift (..., 2) float32 19 | The the shifts to be applied along the last two axes. 20 | 21 | """ 22 | if shift is None: 23 | return a 24 | shape = a.shape 25 | padded = a.reshape(*shape) 26 | padded = self._fft2( 27 | padded, 28 | axes=(-2, -1), 29 | overwrite_x=overwrite, 30 | ) 31 | x, y = self.xp.meshgrid( 32 | self.xp.fft.fftfreq(padded.shape[-1]).astype(shift.dtype), 33 | self.xp.fft.fftfreq(padded.shape[-2]).astype(shift.dtype), 34 | ) 35 | padded *= self.xp.exp( 36 | -2j 37 | * self.xp.pi 38 | * (x * shift[..., 1, None, None] + y * shift[..., 0, None, None]) 39 | ) 40 | padded = self._ifft2(padded, axes=(-2, -1), overwrite_x=True) 41 | return padded.reshape(*shape) 42 | 43 | def adj(self, a, shift, overwrite=False, cval=None): 44 | if shift is None: 45 | return a 46 | return self.fwd(a, -shift, overwrite=overwrite, cval=cval) 47 | 48 | inv = adj 49 | -------------------------------------------------------------------------------- /src/tike/precision.py: -------------------------------------------------------------------------------- 1 | """This module defines constants for the default data types.""" 2 | import numpy as np 3 | 4 | integer = np.intc 5 | """The default integer type""" 6 | 7 | floating = np.single 8 | """The default floating type""" 9 | 10 | cfloating = np.csingle 11 | """The default complex floating type""" 12 | -------------------------------------------------------------------------------- /src/tike/ptycho/__init__.py: -------------------------------------------------------------------------------- 1 | """Functions for ptychography.""" 2 | from .fresnel import * 3 | from .object import * 4 | from .position import * 5 | from .probe import * 6 | from .exitwave import * 7 | from .ptycho import * 8 | from .solvers import * 9 | 10 | # NOTE: The docstring below holds reference docstring that can be used to fill 11 | # in documentation of new functions. 12 | """ 13 | Parameters 14 | ---------- 15 | data : (..., FRAME, WIDE, HIGH) float32 16 | The intensity (square of the absolute value) of the propagated wavefront; 17 | i.e. what the detector records. 18 | comm : :py:class:`tike.communicators.Comm` 19 | An object which manages communications between both GPUs and nodes. 20 | eigen_probe : (..., 1, EIGEN, SHARED, WIDE, HIGH) complex64 21 | The eigen probes for all positions. 22 | eigen_weights : (..., POSI, EIGEN, SHARED) float32 23 | The relative intensity of the eigen probes at each position. 24 | op : :py:class:`tike.operators.Ptycho` 25 | A ptychography operator. Provides forward and adjoint operations. 26 | psi : (..., WIDE, HIGH) complex64 27 | The wavefront modulation coefficients of the object. 28 | probe : (..., 1, 1, SHARED, WIDE, HIGH) complex64 29 | The shared complex illumination function amongst all positions. 30 | scan : (..., POSI, 2) float32 31 | Coordinates of the minimum corner of the probe grid for each 32 | measurement in the coordinate system of psi. Coordinate order consistent 33 | with WIDE, HIGH order. 34 | 35 | """ 36 | -------------------------------------------------------------------------------- /src/tike/ptycho/learn.py: -------------------------------------------------------------------------------- 1 | """Implements functions for ptychographic deep learning.""" 2 | 3 | __author__ = "Daniel Ching" 4 | __copyright__ = "Copyright (c) 2021, UChicago Argonne, LLC." 5 | 6 | from tike.operators import Patch 7 | from .position import check_allowed_positions 8 | 9 | 10 | def extract_patches(psi, scan, patch_width): 11 | """Extract patches from the object function. 12 | 13 | Parameters 14 | ---------- 15 | scan : (..., POSI, 2) float32 16 | Coordinates of the minimum corner of the patch grid for each 17 | extracted patch. 18 | psi : (..., WIDE, HIGH) complex64 19 | The complex wavefront modulation of the object. 20 | patch_width : int 21 | The desired width of the square patches to be extraced. 22 | 23 | Returns 24 | ------- 25 | patches : (..., POSI, patch_width, patch_width) complex64 numpy-array 26 | Patches of psi extracted at the given scan positions. 27 | 28 | """ 29 | check_allowed_positions(scan, psi, (patch_width, patch_width)) 30 | with Patch() as operator: 31 | psi = operator.asarray(psi) 32 | scan = operator.asarray(scan) 33 | patches = operator.fwd( 34 | images=psi, 35 | positions=scan, 36 | patch_width=patch_width, 37 | ) 38 | patches = operator.asnumpy(patches) 39 | return patches 40 | -------------------------------------------------------------------------------- /src/tike/ptycho/solvers/__init__.py: -------------------------------------------------------------------------------- 1 | """Contains different solver implementations.""" 2 | 3 | from .lstsq import * 4 | from .rpie import * 5 | from .options import * 6 | from ._preconditioner import * 7 | 8 | __all__ = [ 9 | 'crop_fourier_space', 10 | 'lstsq_grad', 11 | 'LstsqOptions', 12 | 'PtychoParameters', 13 | 'rpie', 14 | 'RpieOptions', 15 | 'update_preconditioners', 16 | ] 17 | -------------------------------------------------------------------------------- /src/tike/random.py: -------------------------------------------------------------------------------- 1 | """Provides random number generators for complex data types.""" 2 | 3 | import logging 4 | 5 | import cupy as cp 6 | import numpy as np 7 | 8 | import tike.precision 9 | 10 | randomizer_np = np.random.default_rng() 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | def numpy_complex(*shape): 16 | """Return a complex random array in the range [-0.5, 0.5).""" 17 | return ( 18 | randomizer_np.random(size=(*shape, 2), dtype=tike.precision.floating) - 19 | 0.5).view(tike.precision.cfloating)[..., 0] 20 | 21 | 22 | def cupy_complex(*shape): 23 | """Return a complex random array in the range [-0.5, 0.5).""" 24 | return ( 25 | cp.random.random(size=(*shape, 2), dtype=tike.precision.floating) - 26 | 0.5).view(tike.precision.cfloating)[..., 0] 27 | 28 | 29 | def cluster_wobbly_center(*args, **kwargs): 30 | """Deprecated alias for :py:func:`tike.cluster.wobbly_center`.""" 31 | import warnings 32 | warnings.warn( 33 | 'tike.random.cluster_wobbly_center is depreacted. ' 34 | 'Use tike.cluster.wobbly_center instead.', 35 | DeprecationWarning, 36 | ) 37 | import tike.cluster 38 | return tike.cluster.wobbly_center(*args, **kwargs) 39 | 40 | 41 | def cluster_compact(*args, **kwargs): 42 | """Deprecated alias for :py:func:`tike.cluster.compact`.""" 43 | import warnings 44 | warnings.warn( 45 | 'tike.random.cluster_compact is depreacted. ' 46 | 'Use tike.cluster.compact instead.', 47 | DeprecationWarning, 48 | ) 49 | import tike.cluster 50 | return tike.cluster.compact(*args, **kwargs) 51 | -------------------------------------------------------------------------------- /tests/.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | 3 | [run] 4 | source = 5 | tike/ 6 | omit = 7 | tike/sharedlibs/* 8 | 9 | [report] 10 | # Regexes for lines to exclude from consideration 11 | exclude_lines = 12 | # Have to re-enable the standard pragma 13 | pragma: no cover 14 | 15 | # Don't complain about missing debug-only code: 16 | def __repr__ 17 | if self\.debug 18 | 19 | # Don't complain if tests don't hit defensive assertion code: 20 | raise AssertionError 21 | raise NotImplementedError 22 | 23 | # Don't complain if non-runnable code isn't run: 24 | if 0: 25 | if False: 26 | if __name__ == .__main__.: 27 | 28 | ignore_errors = True 29 | -------------------------------------------------------------------------------- /tests/communicators/test_comm.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cupy as cp 4 | import numpy as np 5 | 6 | import tike.communicators 7 | 8 | try: 9 | from mpi4py import MPI 10 | _mpi_size = MPI.COMM_WORLD.Get_size() 11 | _mpi_rank = MPI.COMM_WORLD.Get_rank() 12 | except ImportError: 13 | _mpi_size = 1 14 | _mpi_rank = 0 15 | 16 | _gpu_count = cp.cuda.runtime.getDeviceCount() 17 | 18 | class TestComm(unittest.TestCase): 19 | 20 | def setUp(self, workers=max(1, _gpu_count // _mpi_size)): 21 | cp.cuda.device.Device((workers * _mpi_rank) % _gpu_count).use() 22 | self.comm = tike.communicators.Comm( 23 | tuple(i + (workers * _mpi_rank) % _gpu_count for i in range(workers))) 24 | self.xp = self.comm.pool.xp 25 | 26 | def test_Allreduce_reduce_gpu(self): 27 | a = cp.array(1) 28 | a_list = self.comm.pool.bcast([a]) 29 | truth = [cp.array(self.comm.pool.num_workers * self.comm.mpi.size)] 30 | result = self.comm.Allreduce_reduce_gpu(a_list) 31 | # print() 32 | # print(truth, type(truth)) 33 | # print(result, type(result)) 34 | cp.testing.assert_array_equal(result[0], truth[0]) 35 | 36 | def test_Allreduce_reduce_cpu(self): 37 | a = np.array(1) 38 | a_list = self.comm.pool.bcast([a]) 39 | truth = a * np.array(self.comm.pool.num_workers * self.comm.mpi.size) 40 | result = self.comm.Allreduce_reduce_cpu(a_list) 41 | # print() 42 | # print(truth, type(truth)) 43 | # print(result, type(result)) 44 | np.testing.assert_array_equal(result, truth) 45 | 46 | def test_Allreduce_reduce_mean(self): 47 | a = cp.array(1.0) 48 | a_list = self.comm.pool.bcast([a]) 49 | truth = cp.array(1.0) 50 | result = self.comm.Allreduce_mean(a_list, axis=None) 51 | # print() 52 | # print(truth, type(truth)) 53 | # print(result, type(result)) 54 | cp.testing.assert_array_equal(result, truth) 55 | 56 | def test_Allreduce_allreduce(self): 57 | a = self.xp.arange(10).reshape(2, 5) 58 | a_list = self.comm.pool.bcast([a]) 59 | result = self.comm.Allreduce(a_list) 60 | 61 | def check_correct(result): 62 | self.xp.testing.assert_array_equal( 63 | result, 64 | self.xp.arange(10).reshape(2, 5) * self.comm.pool.num_workers * 65 | self.comm.mpi.size, 66 | ) 67 | print(result) 68 | 69 | self.comm.pool.map(check_correct, result) 70 | 71 | 72 | if __name__ == "__main__": 73 | unittest.main() 74 | -------------------------------------------------------------------------------- /tests/communicators/test_communicator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | # from tike.communicator import MPICommunicator 3 | import unittest 4 | 5 | 6 | @unittest.skip(reason="The communicator module is broken/disabled.") 7 | class TestMPICommunicator(unittest.TestCase): 8 | """Test the functions of the MPICommunicator class.""" 9 | 10 | def test_slicing(self): 11 | """Check correctness of pytcho and tomo data slicing.""" 12 | comm = MPICommunicator() 13 | # Data comes from forward project with dimensions (theta, v, h) 14 | shape = [11, 3, 7] 15 | tomo_data = np.arange(0, np.prod(shape), dtype=np.int32) 16 | tomo_data = tomo_data.reshape(shape) 17 | # Pytcho data should be periodic along v 18 | ptycho_data = comm.get_ptycho_slice(tomo_data) 19 | # print("{} ptycho_data:\n{}".format(comm.rank, ptycho_data)) 20 | lo = comm.rank * 3 21 | np.testing.assert_array_equal(ptycho_data[:, 0:3, :], 22 | ptycho_data[:, lo:lo + 3, :]) 23 | # Assert the reverse transform works 24 | tomo_data1 = comm.get_tomo_slice(ptycho_data) 25 | np.testing.assert_array_equal(tomo_data, tomo_data1) 26 | 27 | def test_gather(self): 28 | """Check correctness of data gathering.""" 29 | comm = MPICommunicator() 30 | data = np.ones([3, 1], dtype=np.int32) * comm.rank 31 | data = comm.gather(data, root=0, axis=1) 32 | if comm.rank == 0: 33 | # print(data) 34 | truth = np.tile(np.arange(comm.size), [3, 1]) 35 | np.testing.assert_array_equal(data, truth) 36 | else: 37 | assert data is None 38 | 39 | 40 | if __name__ == '__main__': 41 | unittest.main() 42 | -------------------------------------------------------------------------------- /tests/communicators/test_mpi.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import cupy as cp 5 | 6 | from tike.communicators import MPIComm, combined_shape 7 | 8 | _gpu_count = cp.cuda.runtime.getDeviceCount() 9 | 10 | class TestMPIComm(unittest.TestCase): 11 | 12 | def setUp(self): 13 | self.mpi = MPIComm() 14 | self.xp = np 15 | # NOTE: MPI GPU awareness requires the following environment variable 16 | # to be set: `OMPI_MCA_opal_cuda_support=true` conda-forge openmpi is 17 | # compiled with GPU awareness. 18 | 19 | def test_p2p(self): 20 | pass 21 | 22 | def test_bcast(self, root=0): 23 | with cp.cuda.Device(self.mpi.rank % _gpu_count): 24 | x = self.xp.ones(5) if self.mpi.rank == root else self.xp.zeros(5) 25 | truth = self.xp.ones(5) 26 | result = self.mpi.Bcast(x, root=root) 27 | self.xp.testing.assert_array_equal(result, truth) 28 | 29 | def test_gather(self, root=0): 30 | # For testing assign each rank 1 GPU of the same index 31 | with cp.cuda.Device(self.mpi.rank % _gpu_count): 32 | x = self.xp.ones(5) * self.mpi.rank 33 | truth = self.xp.ones( 34 | (self.mpi.size, 5)) * self.xp.arange(self.mpi.size)[..., None] 35 | result = self.mpi.Gather(x, root=root, axis=None) 36 | if self.mpi.rank == root: 37 | self.xp.testing.assert_array_equal(result, truth) 38 | else: 39 | assert result is None 40 | 41 | def test_gather_mismatched_shapes(self, root=0): 42 | # For testing assign each rank 1 GPU of the same index 43 | with cp.cuda.Device(self.mpi.rank % _gpu_count): 44 | x = self.xp.ones((2, self.mpi.rank + 1, 1, 3)) * (self.mpi.rank + 1) 45 | truth = self.xp.ones(( 46 | 2, 47 | sum(i for i in range(1, self.mpi.size + 1)), 48 | 1, 49 | 3, 50 | )) * self.xp.array( 51 | np.concatenate([[i] * i for i in range(1, self.mpi.size + 1) 52 | ]))[..., None, None] 53 | result = self.mpi.Gather(x, root=root, axis=1) 54 | if self.mpi.rank == root: 55 | print() 56 | print(result) 57 | print() 58 | print(truth) 59 | self.xp.testing.assert_array_equal(result, truth) 60 | else: 61 | assert result is None 62 | 63 | def test_scatter(self): 64 | pass 65 | 66 | def test_allreduce(self): 67 | # For testing assign each rank 1 GPU of the same index 68 | with cp.cuda.Device(self.mpi.rank % _gpu_count): 69 | x = self.xp.ones(5) 70 | truth = self.xp.ones(5) * self.mpi.size 71 | result = self.mpi.Allreduce(x) 72 | self.xp.testing.assert_array_equal(result, truth) 73 | 74 | def test_allgather(self): 75 | # For testing assign each rank 1 GPU of the same index 76 | with cp.cuda.Device(self.mpi.rank % _gpu_count): 77 | x = self.xp.ones(5) * self.mpi.rank 78 | truth = self.xp.arange(self.mpi.size)[:, None] * self.xp.ones( 79 | (1, 5)) 80 | result = self.mpi.Allgather(x, axis=None) 81 | print(result, truth) 82 | self.xp.testing.assert_array_equal(result, truth) 83 | 84 | def test_allgather_mismatched_shapes(self): 85 | # For testing assign each rank 1 GPU of the same index 86 | with cp.cuda.Device(self.mpi.rank % _gpu_count): 87 | x = self.xp.ones((2, self.mpi.rank + 1, 1, 3)) * (self.mpi.rank + 1) 88 | truth = self.xp.ones(( 89 | 2, 90 | sum(i for i in range(1, self.mpi.size + 1)), 91 | 1, 92 | 3, 93 | )) * self.xp.array( 94 | np.concatenate([[i] * i for i in range(1, self.mpi.size + 1) 95 | ]))[..., None, None] 96 | result = self.mpi.Allgather(x, axis=1) 97 | print() 98 | print(result) 99 | print() 100 | print(truth) 101 | self.xp.testing.assert_array_equal(result, truth) 102 | 103 | def test_combined_shape(self): 104 | 105 | assert combined_shape([(5, 2, 3), (1, 2, 3)], axis=0) == [6, 2, 3] 106 | 107 | # ValueError: All dimensions except for the named `axis` must be equal 108 | with self.assertRaises(ValueError): 109 | combined_shape([(5, 2, 7), (1, 2, 3)], axis=0) 110 | 111 | with self.assertRaises(ValueError): 112 | combined_shape([(5, 2, 7), (1, 2, 3)], axis=None) 113 | 114 | assert combined_shape([(1, 5, 3), (1, 5, 3)], axis=None) == [2, 1, 5, 3] 115 | -------------------------------------------------------------------------------- /tests/communicators/test_streams.py: -------------------------------------------------------------------------------- 1 | import typing 2 | 3 | import numpy as np 4 | import cupy as cp 5 | import cupyx 6 | 7 | import tike.communicators.stream 8 | 9 | 10 | def test_stream_reduce_prototype(): 11 | 12 | def f(a, b, c): 13 | return a, b * c, b + c 14 | 15 | x0 = np.array([0, 1, 0, 0]) 16 | x1 = np.array([1, 1, 3, 1]) 17 | x2 = np.array([2, 2, 7, 2]) 18 | args = [x0, x1, x2] 19 | 20 | truth = [ 21 | 1, 22 | 2 + 2 + 21 + 2, 23 | 3 + 3 + 10 + 3, 24 | ] 25 | 26 | result = [np.sum(y, axis=0) for y in zip(*[f(*x) for x in zip(*args)])] 27 | 28 | np.testing.assert_array_equal(truth, result) 29 | 30 | 31 | def test_stream_reduce(dtype=np.double, num_streams=2): 32 | 33 | def f(a, b, c): 34 | return cp.sum(a), cp.sum(b * c), cp.sum(b + c) 35 | 36 | x0 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 37 | x0[:] = [0, 1, 0, 0] 38 | x1 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 39 | x1[:] = [1, 1, 3, 1] 40 | x2 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 41 | x2[:] = [2, 2, 7, 2] 42 | args = [x0, x1, x2] 43 | 44 | truth = [ 45 | np.array([1], dtype=dtype), 46 | np.array([2 + 2 + 21 + 2], dtype=dtype), 47 | np.array([3 + 3 + 10 + 3], dtype=dtype), 48 | ] 49 | 50 | result = tike.communicators.stream.stream_and_reduce( 51 | f, 52 | args, 53 | y_shapes=[[1], [1], [1]], 54 | y_dtypes=[dtype, dtype, dtype], 55 | streams=[cp.cuda.Stream() for _ in range(num_streams)], 56 | ) 57 | result = [r.get() for r in result] 58 | 59 | print(result) 60 | 61 | np.testing.assert_array_equal(truth, result) 62 | 63 | 64 | def test_stream_reduce_benchmark(dtype=np.double, num_streams=2, w=512): 65 | 66 | def f(a): 67 | return ( 68 | cp.sum(cp.fft.fft2(a).real, axis=0, keepdims=True), 69 | cp.sum(cp.linalg.norm(a, axis=(-1, -2), keepdims=True), 70 | axis=0, 71 | keepdims=True), 72 | cp.sum(a, keepdims=True), 73 | ) 74 | 75 | x0 = cupyx.empty_pinned(shape=(1_000, w, w), dtype=dtype) 76 | x0[:] = 1 77 | args = [ 78 | x0, 79 | ] 80 | 81 | result = tike.communicators.stream.stream_and_reduce( 82 | f, 83 | args, 84 | y_shapes=[(1, w, w), (1, 1, 1), (1, 1, 1)], 85 | y_dtypes=[dtype, dtype, dtype], 86 | streams=[cp.cuda.Stream() for _ in range(num_streams)], 87 | chunk_size=32, 88 | ) 89 | result = [r.get() for r in result] 90 | 91 | print(result) 92 | 93 | 94 | def test_stream_modify(dtype=np.double, num_streams=2): 95 | 96 | def f(ind_args, mod_args, _): 97 | (a, b), (c,) = ind_args, mod_args 98 | return (np.sum(a * b) + c,) 99 | 100 | x0 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 101 | x0[:] = [0, 1, 2, 0.0] 102 | x1 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 103 | x1[:] = [1, 1, 3, 1.0] 104 | x2 = 0.0 105 | ind_args = (x0, x1) 106 | mod_args = (x2,) 107 | 108 | truth = cp.array(0 * 1 + 1 * 1 + 2 * 3 + 0 * 1.0), 109 | 110 | result = tike.communicators.stream.stream_and_modify( 111 | f, 112 | ind_args, 113 | mod_args, 114 | streams=[cp.cuda.Stream() for _ in range(num_streams)], 115 | chunk_size=2, 116 | ) 117 | 118 | for t, r in zip(truth, result): 119 | print(t, type(t)) 120 | print(r, type(t)) 121 | cp.testing.assert_array_equal(t, r) 122 | 123 | 124 | def test_stream_modify2(dtype=np.double, num_streams=2): 125 | 126 | x0 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 127 | x0[:] = [0, 1, 2, 0.0] 128 | x1 = cupyx.empty_pinned(shape=(4,), dtype=dtype) 129 | x1[:] = [1, 1, 3, 1.0] 130 | x2 = cp.array(0.0) 131 | 132 | def f( 133 | ind_args: typing.List[cp.ndarray], 134 | lo: int, 135 | hi: int, 136 | ) -> None: 137 | nonlocal x2 138 | (a, b) = ind_args 139 | x2[...] = cp.sum(a * b) + x2 140 | 141 | tike.communicators.stream.stream_and_modify2( 142 | f, 143 | ind_args=[x0, x1], 144 | streams=[cp.cuda.Stream() for _ in range(num_streams)], 145 | chunk_size=2, 146 | ) 147 | 148 | truth = cp.array(0 * 1 + 1 * 1 + 2 * 3 + 0 * 1.0) 149 | 150 | t, r = (truth, x2) 151 | print(t, type(t)) 152 | print(r, type(t)) 153 | cp.testing.assert_array_equal(t, r) 154 | -------------------------------------------------------------------------------- /tests/compare_gradients.py: -------------------------------------------------------------------------------- 1 | """Compare image gradient implementations from various sources.""" 2 | 3 | import cupy as cp 4 | import cupyx.scipy.ndimage 5 | import libimage 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | 9 | 10 | def _image_grad(x): 11 | """Return the gradient of the x for each of the last two dimesions.""" 12 | # FIXME: Use different gradient approximation that does not use FFT. Because 13 | # FFT caches are per-thread and per-device, using FFT is inefficient. 14 | ramp = 2j * cp.pi * cp.linspace( 15 | -0.5, 16 | 0.5, 17 | x.shape[-1], 18 | dtype='float32', 19 | endpoint=False, 20 | ) 21 | grad_x = np.fft.ifftn( 22 | ramp[:, None] * np.fft.fftn(x, axes=(-2,)), 23 | axes=(-2,), 24 | ) 25 | grad_y = np.fft.ifftn( 26 | ramp * np.fft.fftn(x, axes=(-1,)), 27 | axes=(-1,), 28 | ) 29 | return grad_x, grad_y 30 | 31 | 32 | def _image_grad_sobel(x): 33 | return ( 34 | -cupyx.scipy.ndimage.sobel(x, axis=-2, mode='nearest'), 35 | -cupyx.scipy.ndimage.sobel(x, axis=-1, mode='nearest'), 36 | ) 37 | 38 | 39 | def _image_grad_gradient(x): 40 | return cp.gradient( 41 | -x, 42 | axis=(-2, -1), 43 | ) 44 | 45 | 46 | def _image_grad_gaussian(x, s=1.0): 47 | """Return the gradient of the x for each of the last two dimesions.""" 48 | return ( 49 | -cupyx.scipy.ndimage.gaussian_filter1d( 50 | x, s, order=1, axis=-2, mode='nearest'), 51 | -cupyx.scipy.ndimage.gaussian_filter1d( 52 | x, s, order=1, axis=-1, mode='nearest'), 53 | ) 54 | 55 | 56 | def _diff(x): 57 | return ( 58 | a - b for a, b in zip(_image_grad_gradient(x), _image_grad_gaussian(x))) 59 | 60 | 61 | def test_image_grads(w=512): 62 | x = (libimage.load('earring', w) + np.random.normal(size=(w, w)) + 1j * 63 | (libimage.load('satyre', w) + np.random.normal(size=(w, w)))) 64 | x = cp.asarray(x) 65 | 66 | for grad in [ 67 | _image_grad, 68 | _image_grad_gradient, 69 | _image_grad_sobel, 70 | _image_grad_gaussian, 71 | _diff, 72 | ]: 73 | 74 | dx, dy = grad(x) 75 | dx = dx.get() 76 | dy = dy.get() 77 | 78 | f = plt.figure() 79 | plt.subplot(2, 2, 1) 80 | plt.imshow(dx.imag) 81 | plt.colorbar() 82 | plt.subplot(2, 2, 2) 83 | plt.imshow(dy.imag) 84 | plt.colorbar() 85 | plt.subplot(2, 2, 3) 86 | plt.imshow(dx.real) 87 | plt.colorbar() 88 | plt.subplot(2, 2, 4) 89 | plt.imshow(dy.real) 90 | plt.colorbar() 91 | plt.savefig(f'{grad.__name__}.png') 92 | -------------------------------------------------------------------------------- /tests/data/algin_setup.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/algin_setup.pickle.lzma -------------------------------------------------------------------------------- /tests/data/beta-chip-128.tiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/beta-chip-128.tiff -------------------------------------------------------------------------------- /tests/data/delta-chip-128.tiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/delta-chip-128.tiff -------------------------------------------------------------------------------- /tests/data/images.bib: -------------------------------------------------------------------------------- 1 | @misc{ wiki:xxx, 2 | author = "Wikimedia Commons", 3 | title = "File:Erdhummel (Bombus terrestris)2.jpg --- Wikimedia Commons{,} the free media repository", 4 | year = "2018", 5 | url = "https://commons.wikimedia.org/w/index.php?title=File:Erdhummel_(Bombus_terrestris)2.jpg&oldid=304892818", 6 | note = "[Online; accessed 17-July-2018]" 7 | } 8 | 9 | @misc{ wiki:xxx, 10 | author = "Wikimedia Commons", 11 | title = "File:Cryptomeria japonica MHNT.BOT2004.0.64.jpg --- Wikimedia Commons{,} the free media repository", 12 | year = "2016", 13 | url = "https://commons.wikimedia.org/w/index.php?title=File:Cryptomeria_japonica_MHNT.BOT2004.0.64.jpg&oldid=189538915", 14 | note = "[Online; accessed 17-August-2018]" 15 | } 16 | 17 | -------------------------------------------------------------------------------- /tests/data/lamino_bucket.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/lamino_bucket.pickle.lzma -------------------------------------------------------------------------------- /tests/data/lamino_cgrad.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/lamino_cgrad.pickle.lzma -------------------------------------------------------------------------------- /tests/data/lamino_setup.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/lamino_setup.pickle.lzma -------------------------------------------------------------------------------- /tests/data/nalm256.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/nalm256.pickle.lzma -------------------------------------------------------------------------------- /tests/data/position-error-247.pickle.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/position-error-247.pickle.bz2 -------------------------------------------------------------------------------- /tests/data/ptycho_gaussian.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/ptycho_gaussian.pickle.lzma -------------------------------------------------------------------------------- /tests/data/ptycho_setup.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/ptycho_setup.pickle.lzma -------------------------------------------------------------------------------- /tests/data/siemens-star-small.npz.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/siemens-star-small.npz.bz2 -------------------------------------------------------------------------------- /tests/data/singers.npz.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/singers.npz.bz2 -------------------------------------------------------------------------------- /tests/data/tomo_grad.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/tomo_grad.pickle.lzma -------------------------------------------------------------------------------- /tests/data/tomo_setup.pickle.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/data/tomo_setup.pickle.lzma -------------------------------------------------------------------------------- /tests/operators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/operators/__init__.py -------------------------------------------------------------------------------- /tests/operators/test_alignment.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Alignment 8 | import tike.precision 9 | 10 | from .util import random_complex, OperatorTests 11 | 12 | __author__ = "Daniel Ching" 13 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 14 | __docformat__ = 'restructuredtext en' 15 | 16 | 17 | class TestAlignment(unittest.TestCase, OperatorTests): 18 | """Test the Alignment operator.""" 19 | 20 | def setUp(self, shape=(7, 5, 5)): 21 | """Load a dataset for reconstruction.""" 22 | 23 | self.operator = Alignment() 24 | self.operator.__enter__() 25 | self.xp = self.operator.xp 26 | 27 | padded_shape = shape + np.asarray((0, 41, 32)) 28 | flow = (self.xp.random.rand( 29 | *padded_shape, 2, dtype=tike.precision.floating) - 0.5) * 9 30 | shift = self.xp.random.rand( 31 | *shape[:-2], 2, dtype=tike.precision.floating) - 0.5 32 | 33 | np.random.seed(0) 34 | self.m = self.xp.asarray(random_complex(*shape)) 35 | self.m_name = 'unpadded' 36 | self.d = self.xp.asarray(random_complex(*padded_shape)) 37 | self.d_name = 'rotated' 38 | self.kwargs = { 39 | 'flow': flow, 40 | 'shift': shift, 41 | 'padded_shape': padded_shape, 42 | 'unpadded_shape': shape, 43 | 'angle': tike.precision.floating(np.random.rand() * 2 * np.pi), 44 | 'cval': 0, 45 | } 46 | print(self.operator) 47 | 48 | @unittest.skip('FIXME: This operator is not scaled.') 49 | def test_scaled(self): 50 | pass 51 | 52 | 53 | if __name__ == '__main__': 54 | unittest.main() 55 | -------------------------------------------------------------------------------- /tests/operators/test_checkerboard.py: -------------------------------------------------------------------------------- 1 | """Check whether the checkerboard algorithm is equivalent to fftshift.""" 2 | import numpy as xp 3 | import tike.precision 4 | 5 | from tike.operators.cupy.usfft import checkerboard 6 | 7 | 8 | def shifted_fft_two(a, xp): 9 | return checkerboard( 10 | xp, 11 | xp.fft.fftn( 12 | checkerboard(xp, a), 13 | norm='ortho', 14 | ), 15 | inverse=True, 16 | ) 17 | 18 | 19 | def shifted_fft_ref(a, xp): 20 | return xp.fft.ifftshift(xp.fft.fftn( 21 | xp.fft.fftshift(a), 22 | norm='ortho', 23 | )) 24 | 25 | 26 | def test_checkerboard_correctness(): 27 | shape = xp.random.randint(1, 32, 3) * 2 28 | a = xp.random.rand(*shape) + 1j * xp.random.rand(*shape) 29 | a = a.astype(tike.precision.cfloating) 30 | b = a.copy() 31 | a = shifted_fft_ref(a, xp) 32 | b = shifted_fft_two(b, xp) 33 | xp.testing.assert_array_almost_equal(a, b, decimal=5) 34 | -------------------------------------------------------------------------------- /tests/operators/test_flow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Flow 8 | 9 | from .util import random_complex, random_floating, OperatorTests 10 | 11 | __author__ = "Daniel Ching, Viktor Nikitin" 12 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 13 | __docformat__ = 'restructuredtext en' 14 | 15 | 16 | class TestFlow(unittest.TestCase, OperatorTests): 17 | """Test the Flow operator.""" 18 | 19 | def setUp(self, n=16, nz=17, ntheta=8): 20 | """Load a dataset for reconstruction.""" 21 | 22 | self.operator = Flow() 23 | self.operator.__enter__() 24 | self.xp = self.operator.xp 25 | 26 | np.random.seed(0) 27 | self.m = self.xp.asarray(random_complex(ntheta, nz, n)) 28 | self.m_name = 'f' 29 | self.d = self.xp.asarray(random_complex(*self.m.shape)) 30 | self.d_name = 'g' 31 | self.kwargs = { 32 | 'flow': self.xp.asarray(random_floating(*self.m.shape, 2) * 16), 33 | } 34 | print(self.operator) 35 | 36 | @unittest.skip('FIXME: This operator is not scaled.') 37 | def test_scaled(self): 38 | pass 39 | 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /tests/operators/test_lamino.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Lamino, Bucket 8 | import tike.precision 9 | 10 | from .util import random_complex, OperatorTests 11 | 12 | __author__ = "Daniel Ching, Viktor Nikitin, Xiaodong Yu" 13 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 14 | __docformat__ = 'restructuredtext en' 15 | 16 | 17 | class TestLaminoFourier(unittest.TestCase, OperatorTests): 18 | """Test the Laminography operator.""" 19 | 20 | def setUp(self, n=16, ntheta=8, tilt=np.pi / 3, eps=1e-6): 21 | self.operator = Lamino( 22 | n=n, 23 | tilt=tilt, 24 | eps=eps, 25 | ) 26 | self.operator.__enter__() 27 | self.xp = self.operator.xp 28 | np.random.seed(0) 29 | self.m = self.xp.asarray(random_complex(n, n, n)) 30 | self.m_name = 'u' 31 | self.d = self.xp.asarray(random_complex(ntheta, n, n)) 32 | self.d_name = 'data' 33 | self.kwargs = { 34 | 'theta': 35 | self.xp.linspace(0, 2 * np.pi, 36 | ntheta).astype(tike.precision.floating) 37 | } 38 | print(self.operator) 39 | 40 | @unittest.skip('FIXME: This operator is not scaled.') 41 | def test_scaled(self): 42 | pass 43 | 44 | 45 | class TestLaminoBucket(unittest.TestCase, OperatorTests): 46 | """Test the Laminography operator.""" 47 | 48 | def setUp(self, n=16, ntheta=8, tilt=np.pi / 3, eps=1e-1): 49 | self.operator = Bucket( 50 | n=n, 51 | tilt=tilt, 52 | eps=eps, 53 | ) 54 | self.operator.__enter__() 55 | self.xp = self.operator.xp 56 | np.random.seed(0) 57 | self.m = self.xp.asarray(random_complex(n, n, n), 58 | dtype=tike.precision.cfloating) 59 | self.m_name = 'u' 60 | self.d = self.xp.asarray(random_complex(ntheta, n, n), 61 | dtype=tike.precision.cfloating) 62 | self.d_name = 'data' 63 | self.kwargs = { 64 | 'theta': 65 | self.xp.linspace(0, 2 * np.pi, 66 | ntheta).astype(tike.precision.floating), 67 | 'grid': 68 | self.xp.asarray(self.operator._make_grid().reshape(n**3, 3), 69 | dtype='int16'), 70 | } 71 | print(self.operator) 72 | 73 | @unittest.skip('FIXME: This operator is not scaled.') 74 | def test_scaled(self): 75 | pass 76 | 77 | 78 | if __name__ == '__main__': 79 | unittest.main() 80 | -------------------------------------------------------------------------------- /tests/operators/test_multislice.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Multislice, SingleSlice 8 | import tike.precision 9 | import tike.linalg 10 | 11 | from .util import random_complex 12 | 13 | __author__ = "Daniel Ching" 14 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 15 | __docformat__ = "restructuredtext en" 16 | 17 | 18 | class TestMultiSlice(unittest.TestCase): 19 | """Test the MultiSlice operator.""" 20 | 21 | def setUp(self, depth=7, pw=15, nscan=27): 22 | """Load a dataset for reconstruction.""" 23 | self.nscan = nscan 24 | self.nprobe = 3 25 | self.probe_shape = (nscan, self.nprobe, pw, pw) 26 | self.detector_shape = (pw, pw) 27 | self.original_shape = (depth, 128, 128) 28 | self.scan_shape = (nscan, 2) 29 | print(Multislice) 30 | 31 | np.random.seed(0) 32 | scan = np.random.rand(*self.scan_shape).astype(tike.precision.floating) * ( 33 | 127 - 16 34 | ) 35 | probe = random_complex(*self.probe_shape) 36 | original = random_complex(*self.original_shape) 37 | farplane = random_complex(*self.probe_shape[:-2], *self.detector_shape) 38 | 39 | self.operator = Multislice( 40 | nscan=self.scan_shape[-2], 41 | probe_shape=self.probe_shape[-1], 42 | probe_wavelength = 1e-10, 43 | probe_FOV_lengths = (1e-5, 1e-5), 44 | detector_shape=self.detector_shape[-1], 45 | nz=self.original_shape[-2], 46 | n=self.original_shape[-1], 47 | multislice_propagation_distance = 1e-8, 48 | ) 49 | self.operator.__enter__() 50 | self.xp = self.operator.xp 51 | 52 | self.mkwargs = { 53 | "probe": self.xp.asarray(probe), 54 | "psi": self.xp.asarray(original), 55 | "scan": self.xp.asarray(scan), 56 | } 57 | self.dkwargs = { 58 | "nearplane": self.xp.asarray(farplane), 59 | } 60 | 61 | def test_adjoint(self): 62 | """Check that the adjoint operator is correct.""" 63 | d = self.operator.fwd(**self.mkwargs) 64 | assert d.shape == self.dkwargs["nearplane"].shape 65 | m0, m1 = self.operator.adj(**self.dkwargs, **self.mkwargs) 66 | assert m0.shape == self.mkwargs["psi"].shape 67 | assert m1.shape == self.mkwargs["probe"].shape 68 | a = tike.linalg.inner(d, self.dkwargs["nearplane"]) 69 | b = tike.linalg.inner(self.mkwargs["psi"], m0) 70 | c = tike.linalg.inner(self.mkwargs["probe"], m1) 71 | print() 72 | print(" = {:.5g}{:+.5g}j".format(a.real.item(), a.imag.item())) 73 | print("< m0, F*d> = {:.5g}{:+.5g}j".format(b.real.item(), b.imag.item())) 74 | print("< m1, F*d> = {:.5g}{:+.5g}j".format(c.real.item(), c.imag.item())) 75 | self.xp.testing.assert_allclose(a.real, b.real, rtol=1e-3, atol=0) 76 | self.xp.testing.assert_allclose(a.imag, b.imag, rtol=1e-3, atol=0) 77 | self.xp.testing.assert_allclose(a.real, c.real, rtol=1e-3, atol=0) 78 | self.xp.testing.assert_allclose(a.imag, c.imag, rtol=1e-3, atol=0) 79 | 80 | @unittest.skip("FIXME: This operator is not scaled.") 81 | def test_scaled(self): 82 | pass 83 | 84 | 85 | class TestSingleSlice(TestMultiSlice): 86 | """Test the SingleSlice operator.""" 87 | 88 | def setUp(self, depth=1, pw=15, nscan=27): 89 | """Load a dataset for reconstruction.""" 90 | self.nscan = nscan 91 | self.nprobe = 3 92 | self.probe_shape = (nscan, self.nprobe, pw, pw) 93 | self.detector_shape = (pw, pw) 94 | self.original_shape = (depth, 128, 128) 95 | self.scan_shape = (nscan, 2) 96 | print(SingleSlice) 97 | 98 | np.random.seed(0) 99 | scan = np.random.rand(*self.scan_shape).astype(tike.precision.floating) * ( 100 | 127 - 16 101 | ) 102 | probe = random_complex(*self.probe_shape) 103 | original = random_complex(*self.original_shape) 104 | farplane = random_complex(*self.probe_shape[:-2], *self.detector_shape) 105 | 106 | self.operator = SingleSlice( 107 | nscan=self.scan_shape[-2], 108 | probe_shape=self.probe_shape[-1], 109 | detector_shape=self.detector_shape[-1], 110 | nz=self.original_shape[-2], 111 | n=self.original_shape[-1], 112 | ) 113 | self.operator.__enter__() 114 | self.xp = self.operator.xp 115 | 116 | self.mkwargs = { 117 | "probe": self.xp.asarray(probe), 118 | "psi": self.xp.asarray(original), 119 | "scan": self.xp.asarray(scan), 120 | } 121 | self.dkwargs = { 122 | "nearplane": self.xp.asarray(farplane), 123 | } 124 | 125 | 126 | if __name__ == "__main__": 127 | unittest.main() 128 | -------------------------------------------------------------------------------- /tests/operators/test_pad.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Pad 8 | 9 | from .util import random_complex, OperatorTests 10 | 11 | __author__ = "Daniel Ching" 12 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 13 | __docformat__ = 'restructuredtext en' 14 | 15 | 16 | class TestPad(unittest.TestCase, OperatorTests): 17 | """Test the Pad operator.""" 18 | 19 | def setUp(self, shape=(7, 5, 5)): 20 | """Load a dataset for reconstruction.""" 21 | 22 | self.operator = Pad() 23 | self.operator.__enter__() 24 | self.xp = self.operator.xp 25 | 26 | padded_shape = shape + np.asarray((0, 41, 32)) 27 | corner = self.xp.asarray(np.random.randint(0, 32, size=(shape[0], 2))) 28 | 29 | np.random.seed(0) 30 | self.m = self.xp.asarray(random_complex(*shape)) 31 | self.m_name = 'unpadded' 32 | self.d = self.xp.asarray(random_complex(*padded_shape)) 33 | self.d_name = 'padded' 34 | self.kwargs = { 35 | 'corner': corner, 36 | 'padded_shape': padded_shape, 37 | 'unpadded_shape': shape, 38 | } 39 | print(self.operator) 40 | 41 | 42 | class TestPadDefaults(unittest.TestCase, OperatorTests): 43 | """Test the Pad operator.""" 44 | 45 | def setUp(self, shape=(7, 5, 5)): 46 | """Load a dataset for reconstruction.""" 47 | 48 | self.operator = Pad() 49 | self.operator.__enter__() 50 | self.xp = self.operator.xp 51 | 52 | padded_shape = shape 53 | 54 | np.random.seed(0) 55 | self.m = self.xp.asarray(random_complex(*shape)) 56 | self.m_name = 'unpadded' 57 | self.d = self.xp.asarray(random_complex(*padded_shape)) 58 | self.d_name = 'padded' 59 | self.kwargs = { 60 | 'corner': None, 61 | 'padded_shape': None, 62 | 'unpadded_shape': None, 63 | } 64 | print(self.operator) 65 | 66 | 67 | if __name__ == '__main__': 68 | unittest.main() 69 | -------------------------------------------------------------------------------- /tests/operators/test_propagation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Propagation, FresnelSpectProp 8 | 9 | from .util import random_complex, OperatorTests 10 | 11 | __author__ = "Daniel Ching" 12 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 13 | __docformat__ = 'restructuredtext en' 14 | 15 | 16 | class TestPropagation(unittest.TestCase, OperatorTests): 17 | """Test the Propagation operator.""" 18 | 19 | def setUp(self, nwaves=13, probe_shape=127): 20 | """Load a dataset for reconstruction.""" 21 | self.operator = Propagation( 22 | nwaves=nwaves, 23 | detector_shape=probe_shape, 24 | probe_shape=probe_shape, 25 | ) 26 | self.operator.__enter__() 27 | self.xp = self.operator.xp 28 | np.random.seed(0) 29 | self.m = self.xp.asarray( 30 | random_complex(nwaves, probe_shape, probe_shape)) 31 | self.m_name = 'nearplane' 32 | self.d = self.xp.asarray( 33 | random_complex(nwaves, probe_shape, probe_shape)) 34 | self.d_name = 'farplane' 35 | self.kwargs = {} 36 | print(self.operator) 37 | 38 | 39 | class TestFresnelSpectrumPropagation(unittest.TestCase, OperatorTests): 40 | """Test the FresnelSpectProp operator.""" 41 | 42 | def setUp(self, nwaves=13, probe_shape=127): 43 | """Load a dataset for reconstruction.""" 44 | self.operator = FresnelSpectProp( 45 | nwaves=nwaves, 46 | detector_shape=probe_shape, 47 | probe_shape=probe_shape, 48 | ) 49 | self.operator.__enter__() 50 | self.xp = self.operator.xp 51 | np.random.seed(0) 52 | self.m = self.xp.asarray( 53 | random_complex(nwaves, probe_shape, probe_shape)) 54 | self.m_name = 'nearplane' 55 | self.d = self.xp.asarray( 56 | random_complex(nwaves, probe_shape, probe_shape)) 57 | self.d_name = 'farplane' 58 | self.kwargs = {} 59 | print(self.operator) 60 | 61 | if __name__ == '__main__': 62 | unittest.main() 63 | -------------------------------------------------------------------------------- /tests/operators/test_ptycho.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Ptycho 8 | import tike.precision 9 | import tike.linalg 10 | 11 | from .util import random_complex 12 | 13 | __author__ = "Daniel Ching" 14 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 15 | __docformat__ = "restructuredtext en" 16 | 17 | 18 | class TestPtycho(unittest.TestCase): 19 | """Test the ptychography operator.""" 20 | 21 | def setUp(self, depth=1, pw=15, nscan=27): 22 | """Load a dataset for reconstruction.""" 23 | self.nscan = nscan 24 | self.nprobe = 3 25 | self.probe_shape = (nscan, 1, self.nprobe, pw, pw) 26 | self.detector_shape = (pw * 3, pw * 3) 27 | self.original_shape = (depth, 128, 128) 28 | self.scan_shape = (nscan, 2) 29 | print(Ptycho) 30 | 31 | np.random.seed(0) 32 | scan = np.random.rand(*self.scan_shape).astype(tike.precision.floating) * ( 33 | 127 - 16 34 | ) 35 | probe = random_complex(*self.probe_shape) 36 | original = random_complex(*self.original_shape) 37 | farplane = random_complex(*self.probe_shape[:-2], *self.detector_shape) 38 | 39 | self.operator = Ptycho( 40 | nscan=self.scan_shape[-2], 41 | probe_shape=self.probe_shape[-1], 42 | detector_shape=self.detector_shape[-1], 43 | nz=self.original_shape[-2], 44 | n=self.original_shape[-1], 45 | ) 46 | self.operator.__enter__() 47 | self.xp = self.operator.xp 48 | 49 | self.mkwargs = { 50 | "scan": self.xp.asarray(scan), 51 | "probe": self.xp.asarray(probe), 52 | "psi": self.xp.asarray(original), 53 | } 54 | self.dkwargs = { 55 | "farplane": self.xp.asarray(farplane), 56 | } 57 | 58 | def test_adjoint(self): 59 | """Check that the adjoint operator is correct.""" 60 | d = self.operator.fwd(**self.mkwargs) 61 | assert d.shape == self.dkwargs["farplane"].shape 62 | m0, m1 = self.operator.adj(**self.dkwargs, **self.mkwargs) 63 | assert m0.shape == self.mkwargs["psi"].shape 64 | assert m1.shape == self.mkwargs["probe"].shape 65 | a = tike.linalg.inner(d, self.dkwargs["farplane"]) 66 | b = tike.linalg.inner(self.mkwargs["psi"], m0) 67 | c = tike.linalg.inner(self.mkwargs["probe"], m1) 68 | print() 69 | print(" = {:.5g}{:+.5g}j".format(a.real.item(), a.imag.item())) 70 | print("< m0, F*d> = {:.5g}{:+.5g}j".format(b.real.item(), b.imag.item())) 71 | print("< m1, F*d> = {:.5g}{:+.5g}j".format(c.real.item(), c.imag.item())) 72 | self.xp.testing.assert_allclose(a.real, b.real, rtol=1e-3, atol=0) 73 | self.xp.testing.assert_allclose(a.imag, b.imag, rtol=1e-3, atol=0) 74 | self.xp.testing.assert_allclose(a.real, c.real, rtol=1e-3, atol=0) 75 | self.xp.testing.assert_allclose(a.imag, c.imag, rtol=1e-3, atol=0) 76 | 77 | @unittest.skip("FIXME: This operator is not scaled.") 78 | def test_scaled(self): 79 | pass 80 | 81 | 82 | if __name__ == "__main__": 83 | unittest.main() 84 | -------------------------------------------------------------------------------- /tests/operators/test_rotate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Rotate 8 | import tike.precision 9 | 10 | from .util import random_complex, OperatorTests 11 | 12 | __author__ = "Daniel Ching" 13 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 14 | __docformat__ = 'restructuredtext en' 15 | 16 | 17 | class TestRotate(unittest.TestCase, OperatorTests): 18 | """Test the Rotate operator.""" 19 | 20 | def setUp(self, shape=(7, 25, 53)): 21 | """Load a dataset for reconstruction.""" 22 | 23 | self.operator = Rotate() 24 | self.operator.__enter__() 25 | self.xp = self.operator.xp 26 | 27 | np.random.seed(0) 28 | self.m = self.xp.asarray(random_complex(*shape)) 29 | self.m_name = 'unrotated' 30 | self.d = self.xp.asarray(random_complex(*shape)) 31 | self.d_name = 'rotated' 32 | self.kwargs = { 33 | 'angle': np.random.rand() * 2 * np.pi, 34 | } 35 | print(self.operator) 36 | 37 | def debug_show(self): 38 | import libimage 39 | import matplotlib 40 | matplotlib.use('Agg') 41 | from matplotlib import pyplot as plt 42 | x = self.xp.asarray(libimage.load('coins', 256), 43 | dtype=tike.precision.cfloating) 44 | y = self.operator.fwd(x[None], 4 * np.pi) 45 | 46 | print(x.shape, y.shape) 47 | 48 | plt.figure() 49 | plt.imshow(x.real.get()) 50 | 51 | plt.figure() 52 | plt.imshow(y[0].real.get()) 53 | plt.show() 54 | 55 | @unittest.skip('FIXME: This operator is not scaled.') 56 | def test_scaled(self): 57 | pass 58 | 59 | 60 | if __name__ == '__main__': 61 | unittest.main() 62 | -------------------------------------------------------------------------------- /tests/operators/test_shift.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Shift 8 | import tike.precision 9 | 10 | from .util import random_complex, OperatorTests 11 | 12 | __author__ = "Daniel Ching, Viktor Nikitin" 13 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 14 | __docformat__ = 'restructuredtext en' 15 | 16 | 17 | class TestShift(unittest.TestCase, OperatorTests): 18 | """Test the Shift operator.""" 19 | 20 | def setUp(self, n=16, nz=17, ntheta=8): 21 | self.operator = Shift() 22 | self.operator.__enter__() 23 | self.xp = self.operator.xp 24 | np.random.seed(0) 25 | self.m = self.xp.asarray(random_complex(ntheta, nz, n)) 26 | self.m_name = 'a' 27 | self.d = self.xp.asarray(random_complex(*self.m.shape)) 28 | self.d_name = 'a' 29 | self.kwargs = { 30 | 'shift': 31 | self.xp.asarray((np.random.random([ntheta, 2]) - 0.5) * 7, 32 | dtype=tike.precision.floating) 33 | } 34 | print(self.operator) 35 | 36 | 37 | if __name__ == '__main__': 38 | unittest.main() 39 | -------------------------------------------------------------------------------- /tests/operators/test_sum.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators import Operator 8 | 9 | from .util import random_complex, OperatorTests 10 | 11 | __author__ = "Daniel Ching" 12 | __copyright__ = "Copyright (c) 2023, UChicago Argonne, LLC." 13 | __docformat__ = 'restructuredtext en' 14 | 15 | 16 | class Sum(Operator): 17 | 18 | def fwd(self, unsummed, shape): 19 | """Perform the forward operator.""" 20 | return self.xp.sum(unsummed, keepdims=True) 21 | 22 | def adj(self, summed, shape): 23 | """Perform the adjoint operator.""" 24 | return self.xp.ones_like(summed, shape=shape) * summed 25 | 26 | 27 | class TestSum(unittest.TestCase, OperatorTests): 28 | """Test the Pad operator.""" 29 | 30 | def setUp(self, shape=(7, 5, 5)): 31 | """Load a dataset for reconstruction.""" 32 | 33 | self.operator = Sum() 34 | self.operator.__enter__() 35 | self.xp = self.operator.xp 36 | 37 | np.random.seed(0) 38 | self.m = self.xp.asarray(random_complex(*shape)) 39 | self.m_name = 'unsummed' 40 | self.d = self.xp.asarray(random_complex(*(1, 1, 1))) 41 | self.d_name = 'summed' 42 | self.kwargs = { 43 | 'shape': shape, 44 | } 45 | print(self.operator) 46 | 47 | @unittest.skip('FIXME: This operator is not scaled.') 48 | def test_scaled(self): 49 | pass 50 | 51 | 52 | if __name__ == '__main__': 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /tests/operators/test_usfft.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import unittest 5 | 6 | import numpy as np 7 | from tike.operators.cupy.usfft import (eq2us, us2eq, vector_gather, 8 | vector_scatter) 9 | from tike.operators import Operator 10 | import tike.precision 11 | 12 | from .util import random_complex, OperatorTests 13 | 14 | __author__ = "Daniel Ching" 15 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 16 | __docformat__ = 'restructuredtext en' 17 | 18 | 19 | class Interp(Operator): 20 | 21 | def __init__(self, eps): 22 | self.eps = eps 23 | self.m = 7 24 | self.mu = 4.42341 25 | 26 | def fwd(self, f, x, n): 27 | return vector_gather(self.xp, f, x, n, self.m, self.mu) 28 | 29 | def adj(self, F, x, n): 30 | return vector_scatter(self.xp, F, x, n, self.m, self.mu) 31 | 32 | 33 | class TestInterp(unittest.TestCase, OperatorTests): 34 | """Test the Interp operator.""" 35 | 36 | def setUp(self, n=16, ntheta=32, eps=1e-6): 37 | self.operator = Interp(eps) 38 | self.operator.__enter__() 39 | self.xp = self.operator.xp 40 | np.random.seed(0) 41 | self.m = self.xp.asarray(random_complex(n, n, n)) 42 | self.m_name = 'f' 43 | self.d = self.xp.asarray(random_complex(ntheta)) 44 | self.d_name = 'F' 45 | self.kwargs = { 46 | 'x': 47 | self.xp.asarray(np.random.rand(ntheta, 3) - 0.5, 48 | dtype=tike.precision.floating), 49 | 'n': 50 | n, 51 | } 52 | print(self.operator) 53 | 54 | @unittest.skip('FIXME: This operator is not scaled.') 55 | def test_scaled(self): 56 | pass 57 | 58 | 59 | class USFFT(Operator): 60 | 61 | def __init__(self, eps): 62 | self.eps = eps 63 | 64 | def fwd(self, f, x, n): 65 | return eq2us(f, x, n, self.eps, self.xp) 66 | 67 | def adj(self, F, x, n): 68 | return us2eq(F, -x, n, self.eps, self.xp) 69 | 70 | 71 | class TestUSFFT(unittest.TestCase, OperatorTests): 72 | """Test the USFFT operator.""" 73 | 74 | def setUp(self, n=16, ntheta=8, eps=1e-6): 75 | self.operator = USFFT(eps) 76 | self.operator.__enter__() 77 | self.xp = self.operator.xp 78 | np.random.seed(1) 79 | self.m = self.xp.asarray(random_complex(n, n, n)) 80 | self.m_name = 'f' 81 | self.d = self.xp.asarray(random_complex(ntheta)) 82 | self.d_name = 'F' 83 | self.kwargs = { 84 | 'x': 85 | self.xp.asarray(np.random.rand(ntheta, 3) - 0.5).astype( 86 | tike.precision.floating), 87 | 'n': 88 | n, 89 | } 90 | print(self.operator) 91 | 92 | @unittest.skip('FIXME: This operator is not scaled.') 93 | def test_scaled(self): 94 | pass 95 | 96 | @unittest.skip('For debugging only.') 97 | def test_image(self, s=64, ntheta=16 * 16 * 16): 98 | import libimage 99 | import matplotlib 100 | matplotlib.use('Agg') 101 | from matplotlib import pyplot as plt 102 | 103 | f = libimage.load('satyre', s) 104 | f = np.tile(f, (s, 1, 1)) 105 | f = self.xp.asarray(f, dtype=tike.precision.cfloating) 106 | 107 | x = [ 108 | g.ravel() for g in np.meshgrid( 109 | np.linspace(-0.5, 0.5, s), 110 | np.linspace(-0.5, 0.5, s), 111 | np.linspace(-0.5, 0.5, s), 112 | ) 113 | ] 114 | 115 | x = np.stack(x, -1) 116 | 117 | print(x.shape) 118 | 119 | x = self.xp.asarray(x, dtype=tike.precision.floating) 120 | 121 | d = self.operator.fwd(f, x, s) 122 | m = self.operator.adj(d, x, s) 123 | 124 | plt.figure() 125 | plt.imshow(m[s // 2].real.get()) 126 | plt.savefig('usfft-real.png') 127 | 128 | 129 | if __name__ == '__main__': 130 | unittest.main() 131 | -------------------------------------------------------------------------------- /tests/operators/util.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import numpy as np 4 | 5 | __author__ = "Daniel Ching, Viktor Nikitin" 6 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 7 | __docformat__ = 'restructuredtext en' 8 | 9 | import tike.linalg 10 | import tike.random 11 | import tike.precision 12 | 13 | random_complex = tike.random.numpy_complex 14 | 15 | 16 | def random_floating(*shape): 17 | return tike.random.randomizer_np.random( 18 | size=shape, 19 | dtype=tike.precision.floating, 20 | ) - 0.5 21 | 22 | 23 | class OperatorTests(): 24 | """Provide operator tests for correct adjoint and normalization.""" 25 | 26 | def setUp(self): 27 | self.operator = None 28 | self.operator.__enter__() 29 | self.xp = self.operator.xp 30 | np.random.seed(0) 31 | self.m = None 32 | self.m_name = '' 33 | self.d = None 34 | self.d_name = '' 35 | self.kwargs = {} 36 | print(self.operator) 37 | raise NotImplementedError() 38 | 39 | def tearDown(self): 40 | self.operator.__exit__(None, None, None) 41 | 42 | def test_adjoint(self): 43 | """Check that the adjoint operator is correct.""" 44 | d = self.operator.fwd(**{self.m_name: self.m}, **self.kwargs) 45 | assert d.shape == self.d.shape, (d.shape, self.d.shape) 46 | m = self.operator.adj(**{self.d_name: self.d}, **self.kwargs) 47 | assert m.shape == self.m.shape, (m.shape, self.m.shape) 48 | a = tike.linalg.inner(d, self.d) 49 | b = tike.linalg.inner(self.m, m) 50 | print() 51 | print(' = {:.5g}{:+.5g}j'.format(a.real.item(), a.imag.item())) 52 | print('< d, F*d> = {:.5g}{:+.5g}j'.format(b.real.item(), b.imag.item())) 53 | self.xp.testing.assert_allclose(a.real, b.real, rtol=1e-3, atol=0) 54 | self.xp.testing.assert_allclose(a.imag, b.imag, rtol=1e-3, atol=0) 55 | 56 | def test_scaled(self): 57 | """Check that the adjoint operator is scaled.""" 58 | # NOTE: For a linear operator to be considered 'normal', the input and 59 | # output spaces must be the same. That requirement is too strict for 60 | # all of our operators. Here we only test whether |F*Fm| = |m|. 61 | d = self.operator.fwd(**{self.m_name: self.m}, **self.kwargs) 62 | m = self.operator.adj(**{self.d_name: d}, **self.kwargs) 63 | a = tike.linalg.inner(m, m) 64 | b = tike.linalg.inner(self.m, self.m) 65 | print() 66 | # NOTE: Inner product with self is real-only magnitude of self 67 | print(' = {:.5g}{:+.5g}j'.format(a.real.item(), 0)) 68 | print('< m, m> = {:.5g}{:+.5g}j'.format(b.real.item(), 0)) 69 | self.xp.testing.assert_allclose(a.real, b.real, rtol=1e-3, atol=0) 70 | 71 | def test_fwd_time(self): 72 | """Time the forward operation.""" 73 | start = time.perf_counter() 74 | d = self.operator.fwd(**{self.m_name: self.m}, **self.kwargs) 75 | elapsed = time.perf_counter() - start 76 | print(f"\n{elapsed:1.3e} seconds") 77 | 78 | def test_adj_time(self): 79 | """Time the adjoint operation.""" 80 | start = time.perf_counter() 81 | m = self.operator.adj(**{self.d_name: self.d}, **self.kwargs) 82 | elapsed = time.perf_counter() - start 83 | print(f"\n{elapsed:1.3e} seconds") 84 | -------------------------------------------------------------------------------- /tests/print-gpu-info.py: -------------------------------------------------------------------------------- 1 | import cupy 2 | import pprint 3 | 4 | if __name__ == '__main__': 5 | 6 | pp = pprint.PrettyPrinter() 7 | 8 | print(f'CUDA driver version is {cupy.cuda.runtime.driverGetVersion()}\n' 9 | f'CUDA runtime version is {cupy.cuda.runtime.runtimeGetVersion()}\n') 10 | 11 | for i in range(cupy.cuda.runtime.getDeviceCount()): 12 | print(f'Properties for device {i}:') 13 | pp.pprint(cupy.cuda.runtime.getDeviceProperties(i)) 14 | -------------------------------------------------------------------------------- /tests/ptycho/__init__.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | 3 | matplotlib.use('Agg') 4 | -------------------------------------------------------------------------------- /tests/ptycho/hermite.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/ptycho/hermite.mat -------------------------------------------------------------------------------- /tests/ptycho/io.py: -------------------------------------------------------------------------------- 1 | import os 2 | import typing 3 | import warnings 4 | 5 | import numpy as np 6 | import numpy.typing as npt 7 | import tike.view 8 | import tike.ptycho 9 | 10 | test_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) 11 | 12 | result_dir = os.path.join(test_dir, 'result', 'ptycho') 13 | os.makedirs(result_dir, exist_ok=True) 14 | 15 | data_dir = os.path.join(test_dir, 'data') 16 | 17 | 18 | def _save_eigen_probe(output_folder, eigen_probe): 19 | import matplotlib 20 | matplotlib.use('Agg') 21 | from matplotlib import pyplot as plt 22 | flattened = [] 23 | for i in range(eigen_probe.shape[-4]): 24 | probe = eigen_probe[..., i, :, :, :] 25 | flattened.append( 26 | np.concatenate( 27 | probe.reshape((-1, *probe.shape[-2:])), 28 | axis=1, 29 | )) 30 | flattened = np.concatenate(flattened, axis=0) 31 | with warnings.catch_warnings(): 32 | warnings.filterwarnings("ignore", category=UserWarning) 33 | plt.imsave( 34 | f'{output_folder}/eigen-phase.png', 35 | np.angle(flattened), 36 | # The output of np.angle is locked to (-pi, pi] 37 | cmap=plt.cm.twilight, 38 | vmin=-np.pi, 39 | vmax=np.pi, 40 | ) 41 | plt.imsave( 42 | f'{output_folder}/eigen-ampli.png', 43 | np.abs(flattened), 44 | ) 45 | 46 | 47 | def _save_probe( 48 | output_folder: str, 49 | probe: npt.NDArray, 50 | probe_options: typing.Union[None, tike.ptycho.ProbeOptions], 51 | algorithm: str, 52 | ): 53 | import matplotlib 54 | matplotlib.use('Agg') 55 | from matplotlib import pyplot as plt 56 | flattened = np.concatenate( 57 | probe.reshape((-1, *probe.shape[-2:])), 58 | axis=1, 59 | ) 60 | flattened /= (np.abs(flattened).max() * 1.001) 61 | plt.imsave( 62 | f'{output_folder}/probe.png', 63 | tike.view.complexHSV_to_RGB(flattened), 64 | ) 65 | if probe_options is not None and len(probe_options.power) > 0: 66 | f = plt.figure() 67 | tike.view.plot_probe_power_series(probe_options.power) 68 | plt.title(algorithm) 69 | plt.savefig(f'{output_folder}/probe-power.png') 70 | plt.close(f) 71 | nmodes = probe.shape[-3] 72 | probe_orthogonality_matrix = np.zeros((nmodes, nmodes)) 73 | for i in range(nmodes): 74 | for j in range(nmodes): 75 | probe_orthogonality_matrix[i, j] = np.abs(tike.linalg.inner( 76 | probe[..., i, :, :], 77 | probe[..., j, :, :] 78 | )) 79 | f = plt.figure() 80 | plt.imshow(probe_orthogonality_matrix, interpolation='nearest') 81 | plt.colorbar() 82 | plt.tight_layout() 83 | plt.savefig(f'{output_folder}/probe-orthogonality.png') 84 | plt.close(f) 85 | 86 | 87 | def _save_ptycho_result(result, algorithm): 88 | if result is None: 89 | return 90 | try: 91 | import matplotlib 92 | matplotlib.use('Agg') 93 | from matplotlib import pyplot as plt 94 | import tike.view 95 | fname = os.path.join(result_dir, f'{algorithm}') 96 | os.makedirs(fname, exist_ok=True) 97 | if len(result.algorithm_options.costs) > 1: 98 | fig = plt.figure() 99 | ax1, ax2 = tike.view.plot_cost_convergence( 100 | result.algorithm_options.costs, 101 | result.algorithm_options.times, 102 | ) 103 | ax2.set_xlim(0, 60) 104 | ax1.set_ylim(10**(-1), 10**2) 105 | fig.suptitle(algorithm) 106 | fig.tight_layout() 107 | plt.savefig(os.path.join(fname, 'convergence.png')) 108 | plt.close(fig) 109 | plt.imsave( 110 | f'{fname}/{0}-phase.png', 111 | np.sum(np.angle(result.psi).astype('float32'), axis=0), 112 | # The output of np.angle is locked to (-pi, pi] 113 | cmap=plt.cm.twilight, 114 | vmin=-np.pi, 115 | vmax=np.pi, 116 | ) 117 | plt.imsave( 118 | f'{fname}/{0}-ampli.png', 119 | np.sum(np.abs(result.psi).astype('float32'), axis=0), 120 | cmap=plt.cm.gray, 121 | ) 122 | _save_probe(fname, result.probe, result.probe_options, algorithm) 123 | if result.eigen_weights is not None: 124 | _save_eigen_weights(fname, result.eigen_weights) 125 | if result.eigen_weights.shape[-2] > 1: 126 | _save_eigen_probe(fname, result.eigen_probe) 127 | except ImportError: 128 | pass 129 | 130 | 131 | def _save_eigen_weights(fname, weights): 132 | import matplotlib 133 | matplotlib.use('Agg') 134 | from matplotlib import pyplot as plt 135 | plt.figure() 136 | tike.view.plot_eigen_weights(weights) 137 | plt.suptitle('weights') 138 | plt.tight_layout() 139 | plt.savefig(f'{fname}/weights.png') 140 | -------------------------------------------------------------------------------- /tests/ptycho/ortho-in.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/ptycho/ortho-in.mat -------------------------------------------------------------------------------- /tests/ptycho/ortho-out.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/ptycho/ortho-out.mat -------------------------------------------------------------------------------- /tests/ptycho/templates.py: -------------------------------------------------------------------------------- 1 | import os 2 | import bz2 3 | import typing 4 | 5 | import numpy as np 6 | import cupy as cp 7 | 8 | from .io import data_dir 9 | 10 | import tike.ptycho 11 | import tike.communicators 12 | 13 | 14 | class SiemensStarSetup(): 15 | """Implements a setUp function which loads the siemens start dataset.""" 16 | 17 | def setUp(self, filename='siemens-star-small.npz.bz2'): 18 | """Load a dataset for reconstruction.""" 19 | dataset_file = os.path.join(data_dir, filename) 20 | with bz2.open(dataset_file, 'rb') as f: 21 | archive = np.load(f) 22 | self.scan = archive['scan'][0] 23 | self.data = archive['data'][0] 24 | self.probe = archive['probe'][0] 25 | self.scan -= np.amin(self.scan, axis=-2) - 20 26 | self.probe = tike.ptycho.probe.add_modes_cartesian_hermite( 27 | self.probe, 5) 28 | self.probe = tike.ptycho.probe.adjust_probe_power(self.probe) 29 | self.probe, _ = tike.ptycho.probe.orthogonalize_eig(self.probe) 30 | 31 | with tike.communicators.Comm(1, mpi=tike.communicators.MPIComm) as comm: 32 | mask = tike.cluster.by_scan_stripes( 33 | self.scan, 34 | n=comm.mpi.size, 35 | fly=1, 36 | axis=0, 37 | )[comm.mpi.rank] 38 | self.scan = self.scan[mask] 39 | self.data = self.data[mask] 40 | 41 | self.psi = np.full( 42 | (1, 600, 600), 43 | dtype=np.complex64, 44 | fill_value=np.complex64(0.5 + 0j), 45 | ) 46 | 47 | 48 | class SingersSetup: 49 | """Implements a setUp function which loads a simulated multislice dataset.""" 50 | 51 | def setUp(self): 52 | """Load a dataset for reconstruction.""" 53 | with bz2.open(os.path.join(data_dir, "singers.npz.bz2"), "rb") as f: 54 | psi = np.load(f) 55 | with bz2.open(os.path.join(data_dir, "siemens-star-small.npz.bz2"), "rb") as f: 56 | archive = np.load(f) 57 | probe = archive["probe"][0] 58 | scan = ( 59 | np.random.rand(1024, 2) 60 | * np.array( 61 | [ 62 | psi.shape[-2] - probe.shape[-2] - 2, 63 | psi.shape[-1] - probe.shape[-1] - 2, 64 | ] 65 | ) 66 | + 1 67 | ) 68 | data = tike.ptycho.simulate(probe.shape[-1], probe=probe, scan=scan, psi=psi) 69 | self.data = data 70 | self.probe = probe 71 | self.scan = scan 72 | 73 | with tike.communicators.Comm(1, mpi=tike.communicators.MPIComm) as comm: 74 | mask = tike.cluster.by_scan_stripes( 75 | self.scan, 76 | n=comm.mpi.size, 77 | fly=1, 78 | axis=0, 79 | )[comm.mpi.rank] 80 | self.scan = self.scan[mask] 81 | self.data = self.data[mask] 82 | 83 | self.psi = np.full( 84 | psi.shape, 85 | dtype=np.complex64, 86 | fill_value=np.complex64(0.5 + 0j), 87 | ) 88 | 89 | 90 | try: 91 | from mpi4py import MPI 92 | _mpi_size = MPI.COMM_WORLD.Get_size() 93 | _mpi_rank = MPI.COMM_WORLD.Get_rank() 94 | except ImportError: 95 | _mpi_size = 1 96 | _mpi_rank = 0 97 | 98 | _device_per_rank = max(1, cp.cuda.runtime.getDeviceCount() // _mpi_size) 99 | _base_device = (_device_per_rank * _mpi_rank) % cp.cuda.runtime.getDeviceCount() 100 | _gpu_indices = tuple((i + _base_device) % cp.cuda.runtime.getDeviceCount() 101 | for i in range(_device_per_rank)) 102 | 103 | 104 | class MPIAndGPUInfo(): 105 | """Provides mpi rank and gpu index information.""" 106 | 107 | mpi_size: int = _mpi_size 108 | mpi_rank: int = _mpi_rank 109 | gpu_indices: typing.Tuple[int] = _gpu_indices 110 | 111 | 112 | class ReconstructTwice(MPIAndGPUInfo): 113 | """Call tike.ptycho reconstruct twice in a loop.""" 114 | 115 | def template_consistent_algorithm(self, *, data, params): 116 | """Check ptycho.solver.algorithm for consistency.""" 117 | with cp.cuda.Device(self.gpu_indices[0]): 118 | # Call twice to check that reconstruction continuation is correct 119 | for _ in range(2): 120 | params = tike.ptycho.reconstruct( 121 | data=data, 122 | parameters=params, 123 | num_gpu=self.gpu_indices, 124 | use_mpi=self.mpi_size > 1, 125 | ) 126 | 127 | print() 128 | print('\n'.join(f'{c[0]:1.3e}' for c in params.algorithm_options.costs)) 129 | return params 130 | -------------------------------------------------------------------------------- /tests/ptycho/test_multigrid.py: -------------------------------------------------------------------------------- 1 | import bz2 2 | import os 3 | 4 | import cupy as cp 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import pytest 8 | import unittest 9 | 10 | import tike.ptycho 11 | from tike.ptycho.solvers.options import ( 12 | _resize_fft, 13 | _resize_spline, 14 | _resize_cubic, 15 | _resize_lanczos, 16 | _resize_linear, 17 | ) 18 | 19 | from .templates import _mpi_size 20 | from .io import result_dir, data_dir 21 | from .test_ptycho import PtychoRecon 22 | 23 | output_folder = os.path.join(result_dir, 'multigrid') 24 | 25 | 26 | @pytest.mark.parametrize("function", [ 27 | _resize_fft, 28 | _resize_spline, 29 | _resize_linear, 30 | _resize_cubic, 31 | _resize_lanczos, 32 | ]) 33 | def test_resample(function, filename='siemens-star-small.npz.bz2'): 34 | 35 | os.makedirs(output_folder, exist_ok=True) 36 | 37 | dataset_file = os.path.join(data_dir, filename) 38 | with bz2.open(dataset_file, 'rb') as f: 39 | archive = np.load(f) 40 | probe = archive['probe'][0] 41 | 42 | for i in [0.25, 0.50, 1.0, 2.0, 4.0]: 43 | p1 = function(probe, i) 44 | flattened = np.concatenate( 45 | p1.reshape((-1, *p1.shape[-2:])), 46 | axis=1, 47 | ) 48 | plt.imsave( 49 | f'{output_folder}/{function.__name__}-probe-ampli-{i}.png', 50 | np.abs(flattened), 51 | ) 52 | plt.imsave( 53 | f'{output_folder}/{function.__name__}-probe-phase-{i}.png', 54 | np.angle(flattened), 55 | ) 56 | 57 | 58 | @unittest.skipIf( 59 | _mpi_size > 1, 60 | reason="MPI not implemented for multi-grid.", 61 | ) 62 | class ReconMultiGrid(): 63 | """Test ptychography multi-grid reconstruction method.""" 64 | 65 | def interp(self, x, f): 66 | pass 67 | 68 | def template_consistent_algorithm(self, *, data, params): 69 | """Check ptycho.solver.algorithm for consistency.""" 70 | if _mpi_size > 1: 71 | raise NotImplementedError() 72 | 73 | with cp.cuda.Device(self.gpu_indices[0]): 74 | parameters = tike.ptycho.reconstruct_multigrid( 75 | parameters=params, 76 | data=self.data, 77 | num_gpu=self.gpu_indices, 78 | use_mpi=self.mpi_size > 1, 79 | num_levels=2, 80 | interp=self.interp, 81 | ) 82 | 83 | print() 84 | print('\n'.join( 85 | f'{c[0]:1.3e}' for c in parameters.algorithm_options.costs)) 86 | return parameters 87 | 88 | 89 | class TestPtychoReconMultiGridFFT( 90 | ReconMultiGrid, 91 | PtychoRecon, 92 | unittest.TestCase, 93 | ): 94 | 95 | post_name = '-multigrid-fft' 96 | 97 | def interp(self, x, f): 98 | return _resize_fft(x, f) 99 | 100 | 101 | if False: 102 | # Don't need to run these tests on CI every time. 103 | 104 | class TestPtychoReconMultiGridLinear(PtychoReconMultiGrid, TestPtychoRecon, 105 | unittest.TestCase): 106 | 107 | post_name = '-multigrid-linear' 108 | 109 | def interp(self, x, f): 110 | return _resize_linear(x, f) 111 | 112 | class TestPtychoReconMultiGridCubic(PtychoReconMultiGrid, TestPtychoRecon, 113 | unittest.TestCase): 114 | 115 | post_name = '-multigrid-cubic' 116 | 117 | def interp(self, x, f): 118 | return _resize_cubic(x, f) 119 | 120 | class TestPtychoReconMultiGridLanczos(PtychoReconMultiGrid, TestPtychoRecon, 121 | unittest.TestCase): 122 | 123 | post_name = '-multigrid-lanczos' 124 | 125 | def interp(self, x, f): 126 | return _resize_lanczos(x, f) 127 | 128 | class TestPtychoReconMultiGridSpline(PtychoReconMultiGrid, TestPtychoRecon, 129 | unittest.TestCase): 130 | 131 | post_name = '-multigrid-spline' 132 | 133 | def interp(self, x, f): 134 | return _resize_spline(x, f) 135 | -------------------------------------------------------------------------------- /tests/ptycho/test_online.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | import tike.ptycho 6 | 7 | from .test_ptycho import PtychoRecon 8 | from .templates import _mpi_size 9 | 10 | 11 | @unittest.skipIf( 12 | True, 13 | reason="Data addition method broken until further notice.", 14 | ) 15 | class TestPtychoOnline(PtychoRecon, unittest.TestCase): 16 | """Test ptychography reconstruction when data is streaming.""" 17 | 18 | post_name = "-online" 19 | 20 | def setUp(self, chunks=16) -> None: 21 | """Modify the setup data to have streaming data.""" 22 | PtychoRecon.setUp(self) 23 | data = np.array_split(self.data, chunks, axis=0) 24 | scan = np.array_split(self.scan, chunks, axis=0) 25 | assert len(data) == chunks 26 | assert len(scan) == chunks 27 | 28 | self.data = data[0] 29 | self.scan = scan[0] 30 | self.data_more = data[1:] 31 | self.scan_more = scan[1:] 32 | 33 | def template_consistent_algorithm(self, *, data, params): 34 | """Call tike.ptycho.Reconstruction with streaming data.""" 35 | if self.mpi_size > 1: 36 | raise NotImplementedError() 37 | 38 | with tike.ptycho.Reconstruction(parameters=params, 39 | data=data) as context: 40 | context.iterate(2) 41 | for d, s in zip(self.data_more, self.scan_more): 42 | context.append_new_data( 43 | new_data=d, 44 | new_scan=s, 45 | ) 46 | context.iterate(2) 47 | result = context.parameters 48 | print() 49 | print('\n'.join(f'{c[0]:1.3e}' for c in result.algorithm_options.costs)) 50 | return result 51 | -------------------------------------------------------------------------------- /tests/test_align.py: -------------------------------------------------------------------------------- 1 | import lzma 2 | import os 3 | import pickle 4 | import unittest 5 | 6 | import numpy as np 7 | 8 | import tike.align 9 | 10 | __author__ = "Daniel Ching, Viktor Nikitin" 11 | __copyright__ = "Copyright (c) 2020, UChicago Argonne, LLC." 12 | __docformat__ = 'restructuredtext en' 13 | 14 | testdir = os.path.dirname(__file__) 15 | 16 | 17 | class TestAlignRecon(unittest.TestCase): 18 | """Test alignment reconstruction methods.""" 19 | 20 | def create_dataset(self, dataset_file): 21 | """Create a dataset for testing this module. 22 | 23 | Only called with setUp detects that `dataset_file` has been deleted. 24 | """ 25 | import libimage 26 | amplitude = libimage.load("cryptomeria", 128) 27 | phase = libimage.load("bombus", 128) 28 | original = amplitude * np.exp(1j * phase * np.pi) 29 | self.original = np.expand_dims(original, axis=0).astype('complex64') 30 | 31 | np.random.seed(0) 32 | self.flow = np.empty((*self.original.shape, 2), dtype='float32') 33 | self.flow[..., :] = 5 * (np.random.rand(2) - 0.5) 34 | 35 | self.shift = 2 * (np.random.rand(*self.original.shape[:-2], 2) - 0.5) 36 | 37 | self.data = tike.align.simulate( 38 | original=self.original, 39 | flow=self.flow, 40 | shift=self.shift, 41 | padded_shape=None, 42 | angle=None, 43 | ) 44 | 45 | setup_data = [ 46 | self.data, 47 | self.original, 48 | self.flow, 49 | self.shift, 50 | ] 51 | 52 | with lzma.open(dataset_file, 'wb') as file: 53 | pickle.dump(setup_data, file) 54 | 55 | def setUp(self): 56 | """Load a dataset for reconstruction.""" 57 | dataset_file = os.path.join(testdir, 'data/algin_setup.pickle.lzma') 58 | if not os.path.isfile(dataset_file): 59 | self.create_dataset(dataset_file) 60 | with lzma.open(dataset_file, 'rb') as file: 61 | [ 62 | self.data, 63 | self.original, 64 | self.flow, 65 | self.shift, 66 | ] = pickle.load(file) 67 | 68 | def test_consistent_simulate(self): 69 | """Check align.simulate for consistency.""" 70 | data = tike.align.simulate( 71 | original=self.original, 72 | flow=self.flow, 73 | shift=self.shift, 74 | padded_shape=None, 75 | angle=None, 76 | ) 77 | assert data.dtype == 'complex64', data.dtype 78 | np.testing.assert_array_equal(data.shape, self.data.shape) 79 | np.testing.assert_allclose(data, self.data, atol=1e-6) 80 | 81 | def test_align_cross_correlation(self): 82 | """Check that align.solvers.cross_correlation works.""" 83 | result = tike.align.reconstruct( 84 | unaligned=self.data, 85 | original=self.original, 86 | algorithm='cross_correlation', 87 | upsample_factor=1e3, 88 | ) 89 | shift = result['shift'] 90 | assert shift.dtype == 'float32', shift.dtype 91 | # np.testing.assert_array_equal(shift.shape, self.shift.shape) 92 | np.testing.assert_allclose(shift, 93 | self.flow[:, 0, 0] + self.shift, 94 | atol=1e-1) 95 | 96 | def test_align_farneback(self): 97 | """Check that align.solvers.farneback works.""" 98 | result = tike.align.solvers.farneback( 99 | op=None, 100 | unaligned=np.angle(self.data), 101 | original=np.angle(self.original), 102 | ) 103 | shift = result['flow'] 104 | assert shift.dtype == 'float32', shift.dtype 105 | np.testing.assert_array_equal(shift.shape, (*self.original.shape, 2)) 106 | h, w = shift.shape[1:3] 107 | np.testing.assert_allclose(shift[:, h // 2, w // 2, :], 108 | self.flow[:, 0, 0] + self.shift, 109 | atol=1e-1) 110 | 111 | 112 | if __name__ == '__main__': 113 | unittest.main() 114 | -------------------------------------------------------------------------------- /tests/test_linalg.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cupy as cp 4 | 5 | import tike.linalg 6 | import tike.random 7 | import tike.precision 8 | 9 | 10 | def test_norm(): 11 | # Complex inner product is equal to square of complex norm 12 | a = tike.random.cupy_complex(5) 13 | cp.testing.assert_allclose( 14 | tike.precision.floating(1.0), 15 | cp.linalg.norm(a / cp.linalg.norm(a)), 16 | rtol=1e-6, 17 | ) 18 | cp.testing.assert_allclose( 19 | cp.sqrt(tike.linalg.inner(a, a)), 20 | cp.linalg.norm(a), 21 | rtol=1e-6, 22 | ) 23 | 24 | 25 | def test_lstsq(): 26 | a = tike.random.cupy_complex(5, 1, 4, 3, 3) 27 | x = tike.random.cupy_complex(5, 1, 4, 3, 1) 28 | w = cp.random.random( 29 | size=(5, 1, 4, 3), 30 | dtype=tike.precision.floating, 31 | ) 32 | b = a @ x 33 | x1 = tike.linalg.lstsq(a, b, weights=w) 34 | cp.testing.assert_allclose(x1, x, rtol=1e-2, atol=0) 35 | 36 | 37 | def test_projection(): 38 | # Tests that we can make an orthogonal vector with this projection operator 39 | a = tike.random.cupy_complex(5) 40 | b = tike.random.cupy_complex(5) 41 | pab = tike.linalg.projection(a, b) 42 | pba = tike.linalg.projection(b, a) 43 | assert abs(tike.linalg.inner(a - pab, b)) < 1e-6 44 | assert abs(tike.linalg.inner(a, b - pba)) < 1e-6 45 | 46 | 47 | class Orthogonal(unittest.TestCase): 48 | 49 | def setUp(self): 50 | self.x = tike.random.cupy_complex(1, 4, 3, 3) 51 | 52 | def test_gram_schmidt_single_vector(self): 53 | with self.assertRaises(ValueError): 54 | y = tike.linalg.orthogonalize_gs(self.x, axis=(0, 1, 2, 3)) 55 | 56 | def test_gram_schmidt_single_axis(self): 57 | y = tike.linalg.orthogonalize_gs(self.x) 58 | assert self.x.shape == y.shape 59 | 60 | def test_gram_schmidt_multi_axis(self): 61 | y = tike.linalg.orthogonalize_gs(self.x, axis=(1, -1)) 62 | assert self.x.shape == y.shape 63 | 64 | def test_gram_schmidt_orthogonal(self, axis=(-2, -1)): 65 | u = tike.linalg.orthogonalize_gs(self.x, axis=axis) 66 | for i in range(4): 67 | for j in range(i + 1, 4): 68 | error = abs( 69 | tike.linalg.inner( 70 | u[:, i:i + 1], 71 | u[:, j:j + 1], 72 | axis=axis, 73 | )) 74 | assert cp.all(error < 1e-6) 75 | -------------------------------------------------------------------------------- /tests/test_opt.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tike.opt 3 | 4 | class AlgorithmOptionsStub: 5 | 6 | def __init__(self, costs, window=5) -> None: 7 | self.costs = costs 8 | self.convergence_window = window 9 | 10 | 11 | def test_is_converged(): 12 | assert tike.opt.is_converged( 13 | AlgorithmOptionsStub((np.arange(11) / 1234).tolist(), 5)) 14 | assert tike.opt.is_converged( 15 | AlgorithmOptionsStub((np.zeros(11) / 1234).tolist(), 5)) 16 | assert not tike.opt.is_converged( 17 | AlgorithmOptionsStub((-np.arange(11) / 1234).tolist(), 5)) 18 | 19 | 20 | def test_fit_line(): 21 | result = np.around(tike.opt.fit_line_least_squares( 22 | y=np.asarray([0, np.log(0.9573), np.log(0.8386)]), 23 | x=np.asarray([0, 1, 2]), 24 | ), 4) 25 | np.testing.assert_array_equal((-0.0880, 0.0148), result) 26 | -------------------------------------------------------------------------------- /tests/test_trajectory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """Test functions in tike.trajectory.""" 4 | 5 | import numpy as np 6 | from tike.trajectory import * 7 | 8 | __author__ = "Daniel Ching" 9 | __copyright__ = "Copyright (c) 2018, UChicago Argonne, LLC." 10 | __docformat__ = 'restructuredtext en' 11 | 12 | 13 | def test_discrete_trajectory(): 14 | """Check trajectory.discrete_trajectory for stationary probe.""" 15 | 16 | def stationary(t): 17 | """Probe is stationary at location h = 8, v = 8.""" 18 | return 0 * t, 8 + 0 * t, 8 + 0 * t 19 | 20 | answer = discrete_trajectory( 21 | stationary, 22 | tmin=0, 23 | tmax=0.65, 24 | xstep=0.1, 25 | tstep=1, 26 | ) 27 | truth = ([0], [8], [8], [0.65], [0]) 28 | np.testing.assert_equal(answer, truth) 29 | 30 | 31 | def test_coded_exposure(): 32 | """Check trajectory.coded_exposure for correctness.""" 33 | c_time = np.arange(11) 34 | c_dwell = np.ones(11) * 0.5 35 | 36 | time = np.array( 37 | [-1., 0.8, 1.8, 3.0, 4.1, 4.2, 6.1, 7.5, 8.6, 8.9, 8.9, 8.9, 20, 21]) 38 | dwell = np.array( 39 | [0.1, 0.2, 0.4, 0.5, 0.1, 0.1, 0.6, 0.2, 0.2, 2, 0, 0.3, 1.0, 1.0]) 40 | 41 | theta = np.arange(time.size) 42 | v = np.arange(time.size) 43 | h = np.arange(time.size) 44 | 45 | th1, v1, h1, t1, d1, b1 = coded_exposure(theta, v, h, time, dwell, c_time, 46 | c_dwell) 47 | 48 | np.testing.assert_equal(th1, [2, 3, 4, 5, 6, 9, 11, 9]) 49 | np.testing.assert_equal(v1, [2, 3, 4, 5, 6, 9, 11, 9]) 50 | np.testing.assert_equal(h1, [2, 3, 4, 5, 6, 9, 11, 9]) 51 | np.testing.assert_equal(t1, [2., 3., 4.1, 4.2, 6.1, 9., 9., 10.]) 52 | np.testing.assert_allclose(d1, [0.2, 0.5, 0.1, 0.1, 0.4, 0.5, 0.2, 0.5]) 53 | np.testing.assert_equal(b1, [0, 1, 2, 4, 5, 7]) 54 | 55 | 56 | if __name__ == '__main__': 57 | test_discrete_trajectory() 58 | test_coded_exposure() 59 | -------------------------------------------------------------------------------- /tests/theta_coverage.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedPhotonSource/tike/8e1f93724921be1a443edccc669b4c88ae4a9b6a/tests/theta_coverage.npy --------------------------------------------------------------------------------