├── .circleci ├── codecov_upload.sh ├── config.yml └── early_exit.sh ├── .codecov.yaml ├── .coveragerc ├── .cruft.json ├── .flake8 ├── .gitattributes ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ ├── label_sync.yml │ └── sub_package_update.yml ├── .gitignore ├── .gitmodules ├── .isort.cfg ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── .rtd-environment.yaml ├── .ruff.toml ├── .sunpy-template.yml ├── CHANGELOG.rst ├── LICENSE.rst ├── MANIFEST.in ├── README.rst ├── _typos.toml ├── changelog └── README.rst ├── codecov.yml ├── docs ├── Makefile ├── code_ref │ ├── asda.rst │ ├── coalignment.rst │ ├── enhance.rst │ ├── granule.rst │ ├── index.rst │ ├── radial.rst │ ├── stara.rst │ ├── time_lag.rst │ ├── trace.rst │ └── utils.rst ├── conf.py ├── index.rst ├── make.bat ├── nitpick-exceptions ├── robots.txt ├── rtd_requirements.txt └── whatsnew │ ├── changelog.rst │ └── index.rst ├── examples ├── README.txt ├── advanced_wow.py ├── calculating_time_lags.py ├── detecting_swirls.py ├── finding_sunspots_using_stara.py ├── multiscale_gaussian_normalization.py ├── radial_gradient_filters.py ├── radial_histogram_equalization.py ├── remove_cosmic_rays.py ├── rgb_composite.py ├── tracing_loops.py └── watroo_wow.py ├── licenses ├── LICENSE.rst ├── LICENSE_ASDA.rst ├── LICENSE_NOISE.rst ├── README.rst └── TEMPLATE_LICENSE.rst ├── pyproject.toml ├── pytest.ini ├── ruff.toml ├── setup.py ├── sunkit-test-env.yaml ├── sunkit_image ├── __init__.py ├── _dev │ ├── __init__.py │ └── scm_version.py ├── asda.py ├── coalignment.py ├── conftest.py ├── data │ ├── README.rst │ ├── __init__.py │ └── test │ │ ├── IDL.txt │ │ ├── __init__.py │ │ ├── aia_171_cutout.fits │ │ ├── asda_correct.npz │ │ ├── asda_vxvy.npz │ │ ├── dkist_photosphere.fits │ │ └── hmi_continuum_test_lowres_data.fits ├── enhance.py ├── granule.py ├── radial.py ├── stara.py ├── tests │ ├── __init__.py │ ├── figure_hashes_mpl_390_ft_261_sunpy_600_astropy_610.json │ ├── helpers.py │ ├── test_asda.py │ ├── test_coalignment.py │ ├── test_enhance.py │ ├── test_granule.py │ ├── test_radial.py │ ├── test_stara.py │ ├── test_time_lag.py │ └── test_trace.py ├── time_lag.py ├── trace.py ├── utils │ ├── __init__.py │ ├── decorators.py │ ├── noise.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_noise.py │ │ └── test_utils.py │ └── utils.py └── version.py └── tox.ini /.circleci/codecov_upload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import 3 | curl -Os https://uploader.codecov.io/latest/linux/codecov 4 | curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM 5 | curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig 6 | gpgv codecov.SHA256SUM.sig codecov.SHA256SUM 7 | shasum -a 256 -c codecov.SHA256SUM 8 | chmod +x codecov 9 | ./codecov "$@" 10 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | no-backports: &no-backports 4 | name: Skip any branches called cherry-pick 5 | command: | 6 | if [[ "${CIRCLE_BRANCH}" == *"cherry-pick"* || "${CIRCLE_BRANCH}" == *"backport"* ]]; then 7 | circleci step halt 8 | fi 9 | 10 | skip-check: &skip-check 11 | name: Check for [ci skip] 12 | command: bash .circleci/early_exit.sh 13 | 14 | merge-check: &merge-check 15 | name: Check if we need to merge upstream main 16 | command: | 17 | if [[ -n "${CIRCLE_PR_NUMBER}" ]]; then 18 | git fetch origin --tags 19 | git fetch origin +refs/pull/$CIRCLE_PR_NUMBER/merge:pr/$CIRCLE_PR_NUMBER/merge 20 | git checkout -qf pr/$CIRCLE_PR_NUMBER/merge 21 | fi 22 | 23 | apt-run: &apt-install 24 | name: Install apt packages 25 | command: | 26 | sudo apt update 27 | sudo apt install -y libopenjp2-7 28 | 29 | jobs: 30 | figure: 31 | parameters: 32 | jobname: 33 | type: string 34 | docker: 35 | - image: cimg/python:3.12 36 | environment: TOXENV=<< parameters.jobname >> 37 | steps: 38 | - run: *no-backports 39 | - checkout 40 | - run: *skip-check 41 | - run: *merge-check 42 | - run: *apt-install 43 | - run: pip install --user -U tox tox-pypi-filter 44 | - run: tox 45 | - run: 46 | name: Running codecov 47 | command: bash -e .circleci/codecov_upload.sh -f ".tmp/${TOXENV}/coverage.xml" 48 | - store_artifacts: 49 | path: .tmp/<< parameters.jobname >>/figure_test_images 50 | - run: 51 | name: "Image comparison page is available at: " 52 | command: echo "${CIRCLE_BUILD_URL}/artifacts/${CIRCLE_NODE_INDEX}/.tmp/${TOXENV}/figure_test_images/fig_comparison.html" 53 | 54 | deploy-reference-images: 55 | parameters: 56 | jobname: 57 | type: string 58 | docker: 59 | - image: cimg/python:3.12 60 | environment: 61 | TOXENV: << parameters.jobname >> 62 | GIT_SSH_COMMAND: ssh -i ~/.ssh/id_rsa_6464b6a8248237ca368fd4690777d921 63 | steps: 64 | - checkout 65 | - run: *skip-check 66 | - run: *merge-check 67 | - run: *apt-install 68 | # Clear out all the ssh keys so that it always uses the write deploy key 69 | - run: ssh-add -D 70 | # Add private key for deploying to the figure tests repo 71 | - add_ssh_keys: 72 | fingerprints: "64:64:b6:a8:24:82:37:ca:36:8f:d4:69:07:77:d9:21" 73 | - run: ssh-keyscan github.com >> ~/.ssh/known_hosts 74 | - run: git config --global user.email "sunpy@circleci" && git config --global user.name "SunPy Circle CI" 75 | - run: git clone git@github.com:sunpy/sunpy-figure-tests.git --depth 1 -b sunkit-image-${CIRCLE_BRANCH} ~/sunpy-figure-tests/ 76 | # Generate Reference images 77 | - run: pip install --user -U tox tox-pypi-filter 78 | - run: rm -rf /home/circleci/sunpy-figure-tests/figures/$TOXENV/* 79 | - run: tox -- --mpl-generate-path=/home/circleci/sunpy-figure-tests/figures/$TOXENV | tee toxlog 80 | - run: | 81 | hashlib=$(grep "^figure_hashes.*\.json$" toxlog) 82 | cp ./sunkit_image/tests/$hashlib /home/circleci/sunpy-figure-tests/figures/$TOXENV/ 83 | - run: | 84 | cd ~/sunpy-figure-tests/ 85 | git pull 86 | git status 87 | git add . 88 | git commit -m "Update reference figures from ${CIRCLE_BRANCH}" || echo "No changes to reference images to deploy" 89 | git push 90 | 91 | workflows: 92 | version: 2 93 | figure-tests: 94 | jobs: 95 | - figure: 96 | name: << matrix.jobname >> 97 | matrix: 98 | parameters: 99 | jobname: 100 | - "py312-figure" 101 | - deploy-reference-images: 102 | name: baseline-<< matrix.jobname >> 103 | matrix: 104 | parameters: 105 | jobname: 106 | - "py312-figure" 107 | requires: 108 | - << matrix.jobname >> 109 | filters: 110 | branches: 111 | only: 112 | - main 113 | -------------------------------------------------------------------------------- /.circleci/early_exit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | commitmessage=$(git log --pretty=%B -n 1) 4 | if [[ $commitmessage = *"[ci skip]"* ]] || [[ $commitmessage = *"[skip ci]"* ]]; then 5 | echo "Skipping build because [ci skip] found in commit message" 6 | circleci step halt 7 | fi 8 | -------------------------------------------------------------------------------- /.codecov.yaml: -------------------------------------------------------------------------------- 1 | comment: off 2 | coverage: 3 | status: 4 | project: 5 | default: 6 | threshold: 0.2% 7 | 8 | codecov: 9 | require_ci_to_pass: false 10 | notify: 11 | wait_for_ci: true 12 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | sunkit_image/conftest.py 4 | sunkit_image/*setup_package* 5 | sunkit_image/extern/* 6 | sunkit_image/version* 7 | sunkit_image/cython_version 8 | */sunkit_image/conftest.py 9 | */sunkit_image/*setup_package* 10 | */sunkit_image/extern/* 11 | */sunkit_image/version* 12 | */sunkit_image/data/sample.py 13 | */sunkit_image/data/_sample.py 14 | 15 | [report] 16 | exclude_lines = 17 | # Have to re-enable the standard pragma 18 | pragma: no cover 19 | # Don't complain about packages we have installed 20 | except ImportError 21 | # Don't complain if tests don't hit assertions 22 | raise AssertionError 23 | raise NotImplementedError 24 | # Don't complain about script hooks 25 | def main(.*): 26 | # Ignore branches that don't pertain to this version of Python 27 | pragma: py{ignore_python_version} 28 | # Don't complain about IPython completion helper 29 | def _ipython_key_completions_ 30 | # typing.TYPE_CHECKING is False at runtime 31 | if TYPE_CHECKING: 32 | # Ignore typing overloads 33 | @overload 34 | -------------------------------------------------------------------------------- /.cruft.json: -------------------------------------------------------------------------------- 1 | { 2 | "template": "https://github.com/sunpy/package-template", 3 | "commit": "1bdd28c1e2d725d9ae9d9c0b6ad682d75687f45d", 4 | "checkout": null, 5 | "context": { 6 | "cookiecutter": { 7 | "package_name": "sunkit-image", 8 | "module_name": "sunkit_image", 9 | "short_description": "An image processing toolbox for Solar Physics.", 10 | "author_name": "The SunPy Community", 11 | "author_email": "sunpy@googlegrouups.com", 12 | "project_url": "https://sunpy.org", 13 | "github_repo": "sunpy/sunkit-image", 14 | "sourcecode_url": "https://github.com/sunpy/sunkit-image", 15 | "download_url": "https://pypi.org/project/sunkit-image", 16 | "documentation_url": "https://docs.sunpy.org/projects/sunkit-image", 17 | "changelog_url": "https://docs.sunpy.org/projects/sunkit-image/en/stable/whatsnew/changelog.html", 18 | "issue_tracker_url": "https://github.com/sunpy/sunkit-image/issues", 19 | "license": "BSD 2-Clause", 20 | "minimum_python_version": "3.10", 21 | "use_compiled_extensions": "n", 22 | "enable_dynamic_dev_versions": "y", 23 | "include_example_code": "n", 24 | "include_cruft_update_github_workflow": "y", 25 | "use_extended_ruff_linting": "y", 26 | "_sphinx_theme": "sunpy", 27 | "_parent_project": "", 28 | "_install_requires": "", 29 | "_copy_without_render": [ 30 | "docs/_templates", 31 | "docs/_static", 32 | ".github/workflows/sub_package_update.yml" 33 | ], 34 | "_template": "https://github.com/sunpy/package-template", 35 | "_commit": "1bdd28c1e2d725d9ae9d9c0b6ad682d75687f45d" 36 | } 37 | }, 38 | "directory": null 39 | } 40 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = 3 | # missing-whitespace-around-operator 4 | E225 5 | # missing-whitespace-around-arithmetic-operator 6 | E226 7 | # line-too-long 8 | E501 9 | # unused-import 10 | F401 11 | # undefined-local-with-import-star 12 | F403 13 | # redefined-while-unused 14 | F811 15 | # Line break occurred before a binary operator 16 | W503, 17 | # Line break occurred after a binary operator 18 | W504 19 | max-line-length = 110 20 | exclude = 21 | .git 22 | __pycache__ 23 | docs/conf.py 24 | build 25 | sunkit-image/__init__.py 26 | rst-directives = 27 | plot 28 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *fit binary 2 | *fit.gz binary 3 | *fits binary 4 | *fits.gz binary 5 | *fts binary 6 | *fts.gz binary 7 | *npz binary 8 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "monthly" 7 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Main CI Workflow 2 | name: CI 3 | 4 | on: 5 | push: 6 | branches: 7 | - 'main' 8 | - '*.*' 9 | - '!*backport*' 10 | tags: 11 | - 'v*' 12 | - '!*dev*' 13 | - '!*pre*' 14 | - '!*post*' 15 | pull_request: 16 | # Allow manual runs through the web UI 17 | workflow_dispatch: 18 | schedule: 19 | # ┌───────── minute (0 - 59) 20 | # │ ┌───────── hour (0 - 23) 21 | # │ │ ┌───────── day of the month (1 - 31) 22 | # │ │ │ ┌───────── month (1 - 12 or JAN-DEC) 23 | # │ │ │ │ ┌───────── day of the week (0 - 6 or SUN-SAT) 24 | - cron: '0 7 * * 3' # Every Wed at 07:00 UTC 25 | 26 | concurrency: 27 | group: ${{ github.workflow }}-${{ github.ref }} 28 | cancel-in-progress: true 29 | 30 | jobs: 31 | core: 32 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 33 | with: 34 | submodules: false 35 | coverage: codecov 36 | toxdeps: tox-pypi-filter 37 | envs: | 38 | - linux: py312 39 | secrets: 40 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 41 | 42 | sdist_verify: 43 | runs-on: ubuntu-latest 44 | steps: 45 | - uses: actions/checkout@v4 46 | - uses: actions/setup-python@v5 47 | with: 48 | python-version: '3.12' 49 | - run: python -m pip install -U --user build 50 | - run: python -m build . --sdist 51 | - run: python -m pip install -U --user twine 52 | - run: python -m twine check dist/* 53 | 54 | test: 55 | needs: [core, sdist_verify] 56 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 57 | with: 58 | submodules: false 59 | coverage: codecov 60 | toxdeps: tox-pypi-filter 61 | posargs: -n auto 62 | envs: | 63 | - windows: py311 64 | - macos: py310 65 | - linux: py310-oldestdeps 66 | - linux: py311-devdeps 67 | secrets: 68 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 69 | 70 | docs: 71 | needs: [core] 72 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 73 | with: 74 | default_python: '3.12' 75 | submodules: false 76 | pytest: false 77 | toxdeps: tox-pypi-filter 78 | libraries: | 79 | apt: 80 | - graphviz 81 | envs: | 82 | - linux: build_docs 83 | 84 | online: 85 | if: "!startsWith(github.event.ref, 'refs/tags/v')" 86 | needs: [test] 87 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 88 | with: 89 | default_python: '3.12' 90 | submodules: false 91 | coverage: codecov 92 | toxdeps: tox-pypi-filter 93 | posargs: -n 1 94 | envs: | 95 | - linux: py312-online 96 | secrets: 97 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 98 | 99 | devdeps: 100 | needs: [test] 101 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@main 102 | with: 103 | submodules: false 104 | coverage: codecov 105 | toxdeps: tox-pypi-filter 106 | posargs: -n auto 107 | envs: | 108 | - linux: py312-devdeps 109 | secrets: 110 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 111 | 112 | conda: 113 | needs: [online] 114 | runs-on: ubuntu-latest 115 | steps: 116 | - uses: actions/checkout@v4 117 | with: 118 | fetch-depth: 0 119 | lfs: true 120 | - uses: conda-incubator/setup-miniconda@v3 121 | with: 122 | installer-url: https://github.com/conda-forge/miniforge/releases/download/24.3.0-0/Miniforge3-24.3.0-0-Linux-x86_64.sh 123 | python-version: "3.12" 124 | activate-environment: sunkit_image_test 125 | environment-file: sunkit-test-env.yaml 126 | - name: Run tests 127 | shell: bash -el {0} 128 | run: | 129 | conda list 130 | tox -e py312 131 | - uses: codecov/codecov-action@v5 132 | with: 133 | token: ${{ secrets.CODECOV_TOKEN }} 134 | files: ./coverage.xml 135 | 136 | publish: 137 | # Build wheels on PRs only when labelled. Releases will only be published if tagged ^v.* 138 | # see https://github-actions-workflows.openastronomy.org/en/latest/publish.html#upload-to-pypi 139 | if: | 140 | github.event_name != 'pull_request' || 141 | ( 142 | github.event_name == 'pull_request' && 143 | contains(github.event.pull_request.labels.*.name, 'Run publish') 144 | ) 145 | needs: [test, docs] 146 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/publish_pure_python.yml@v1 147 | with: 148 | python-version: '3.12' 149 | test_extras: 'tests' 150 | test_command: 'pytest -p no:warnings --doctest-rst -m "not mpl_image_compare" --pyargs sunkit_image' 151 | submodules: false 152 | secrets: 153 | pypi_token: ${{ secrets.pypi_token }} 154 | -------------------------------------------------------------------------------- /.github/workflows/label_sync.yml: -------------------------------------------------------------------------------- 1 | name: Label Sync 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | # ┌───────── minute (0 - 59) 6 | # │ ┌───────── hour (0 - 23) 7 | # │ │ ┌───────── day of the month (1 - 31) 8 | # │ │ │ ┌───────── month (1 - 12 or JAN-DEC) 9 | # │ │ │ │ ┌───────── day of the week (0 - 6 or SUN-SAT) 10 | - cron: '0 0 * * *' # run every day at midnight UTC 11 | 12 | # Give permissions to write issue labels 13 | permissions: 14 | issues: write 15 | 16 | jobs: 17 | label_sync: 18 | runs-on: ubuntu-latest 19 | name: Label Sync 20 | steps: 21 | - uses: srealmoreno/label-sync-action@850ba5cef2b25e56c6c420c4feed0319294682fd 22 | with: 23 | config-file: https://raw.githubusercontent.com/sunpy/.github/main/labels.yml 24 | -------------------------------------------------------------------------------- /.github/workflows/sub_package_update.yml: -------------------------------------------------------------------------------- 1 | # This template is taken from the cruft example code, for further information please see: 2 | # https://cruft.github.io/cruft/#automating-updates-with-github-actions 3 | name: Automatic Update from package template 4 | permissions: 5 | contents: write 6 | pull-requests: write 7 | 8 | on: 9 | # Allow manual runs through the web UI 10 | workflow_dispatch: 11 | schedule: 12 | # ┌───────── minute (0 - 59) 13 | # │ ┌───────── hour (0 - 23) 14 | # │ │ ┌───────── day of the month (1 - 31) 15 | # │ │ │ ┌───────── month (1 - 12 or JAN-DEC) 16 | # │ │ │ │ ┌───────── day of the week (0 - 6 or SUN-SAT) 17 | - cron: '0 7 * * 1' # Every Monday at 7am UTC 18 | 19 | jobs: 20 | update: 21 | runs-on: ubuntu-latest 22 | strategy: 23 | fail-fast: true 24 | steps: 25 | - uses: actions/checkout@v4 26 | 27 | - uses: actions/setup-python@v5 28 | with: 29 | python-version: "3.11" 30 | 31 | - name: Install Cruft 32 | run: python -m pip install git+https://github.com/Cadair/cruft@patch-p1 33 | 34 | - name: Check if update is available 35 | continue-on-error: false 36 | id: check 37 | run: | 38 | CHANGES=0 39 | if [ -f .cruft.json ]; then 40 | if ! cruft check; then 41 | CHANGES=1 42 | fi 43 | else 44 | echo "No .cruft.json file" 45 | fi 46 | 47 | echo "has_changes=$CHANGES" >> "$GITHUB_OUTPUT" 48 | 49 | - name: Run update if available 50 | id: cruft_update 51 | if: steps.check.outputs.has_changes == '1' 52 | run: | 53 | git config --global user.email "${{ github.actor }}@users.noreply.github.com" 54 | git config --global user.name "${{ github.actor }}" 55 | 56 | cruft_output=$(cruft update --skip-apply-ask --refresh-private-variables) 57 | echo $cruft_output 58 | git restore --staged . 59 | 60 | if [[ "$cruft_output" == *"Failed to cleanly apply the update, there may be merge conflicts."* ]]; then 61 | echo merge_conflicts=1 >> $GITHUB_OUTPUT 62 | else 63 | echo merge_conflicts=0 >> $GITHUB_OUTPUT 64 | fi 65 | 66 | - name: Check if only .cruft.json is modified 67 | id: cruft_json 68 | if: steps.check.outputs.has_changes == '1' 69 | run: | 70 | git status --porcelain=1 71 | if [[ "$(git status --porcelain=1)" == " M .cruft.json" ]]; then 72 | echo "Only .cruft.json is modified. Exiting workflow early." 73 | echo "has_changes=0" >> "$GITHUB_OUTPUT" 74 | else 75 | echo "has_changes=1" >> "$GITHUB_OUTPUT" 76 | fi 77 | 78 | - name: Create pull request 79 | if: steps.cruft_json.outputs.has_changes == '1' 80 | uses: peter-evans/create-pull-request@v7 81 | with: 82 | token: ${{ secrets.GITHUB_TOKEN }} 83 | add-paths: "." 84 | commit-message: "Automatic package template update" 85 | branch: "cruft/update" 86 | delete-branch: true 87 | draft: ${{ steps.cruft_update.outputs.merge_conflicts == '1' }} 88 | title: "Updates from the package template" 89 | labels: | 90 | No Changelog Entry Needed 91 | body: | 92 | This is an autogenerated PR, which will applies the latest changes from the [SunPy Package Template](https://github.com/sunpy/package-template). 93 | If this pull request has been opened as a draft there are conflicts which need fixing. 94 | 95 | **To run the CI on this pull request you will need to close it and reopen it.** 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Python: https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | tmp/ 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | pip-wheel-metadata/ 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | junit/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | # automodapi 79 | docs/api 80 | docs/sg_execution_times.rst 81 | 82 | # PyBuilder 83 | .pybuilder/ 84 | target/ 85 | 86 | # Jupyter Notebook 87 | .ipynb_checkpoints 88 | 89 | # IPython 90 | profile_default/ 91 | ipython_config.py 92 | 93 | # pyenv 94 | # For a library or package, you might want to ignore these files since the code is 95 | # intended to run in multiple environments; otherwise, check them in: 96 | # .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/#use-with-ide 111 | .pdm.toml 112 | 113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 114 | __pypackages__/ 115 | 116 | # Celery stuff 117 | celerybeat-schedule 118 | celerybeat.pid 119 | 120 | # SageMath parsed files 121 | *.sage.py 122 | 123 | # Environments 124 | .env 125 | .venv 126 | env/ 127 | venv/ 128 | ENV/ 129 | env.bak/ 130 | venv.bak/ 131 | 132 | # Rope project settings 133 | .ropeproject 134 | 135 | # mkdocs documentation 136 | /site 137 | 138 | # mypy 139 | .mypy_cache/ 140 | 141 | # Pyre type checker 142 | .pyre/ 143 | 144 | # IDE 145 | # PyCharm 146 | .idea 147 | 148 | # Spyder project settings 149 | .spyderproject 150 | .spyproject 151 | 152 | ### VScode: https://raw.githubusercontent.com/github/gitignore/master/Global/VisualStudioCode.gitignore 153 | .vscode/* 154 | .vs/* 155 | 156 | ### https://raw.github.com/github/gitignore/master/Global/OSX.gitignore 157 | .DS_Store 158 | .AppleDouble 159 | .LSOverride 160 | 161 | # Icon must ends with two \r. 162 | Icon 163 | 164 | # Thumbnails 165 | ._* 166 | 167 | # Files that might appear on external disk 168 | .Spotlight-V100 169 | .Trashes 170 | 171 | ### Linux: https://raw.githubusercontent.com/github/gitignore/master/Global/Linux.gitignore 172 | *~ 173 | 174 | # temporary files which can be created if a process still has a handle open of a deleted file 175 | .fuse_hidden* 176 | 177 | # KDE directory preferences 178 | .directory 179 | 180 | # Linux trash folder which might appear on any partition or disk 181 | .Trash-* 182 | 183 | # .nfs files are created when an open file is removed but is still being accessed 184 | .nfs* 185 | 186 | # pytype static type analyzer 187 | .pytype/ 188 | 189 | ### MacOS: https://raw.githubusercontent.com/github/gitignore/master/Global/macOS.gitignore 190 | 191 | # General 192 | .DS_Store 193 | .AppleDouble 194 | .LSOverride 195 | 196 | # Icon must end with two \r 197 | Icon 198 | 199 | 200 | # Thumbnails 201 | ._* 202 | 203 | # Files that might appear in the root of a volume 204 | .DocumentRevisions-V100 205 | .fseventsd 206 | .Spotlight-V100 207 | .TemporaryItems 208 | .Trashes 209 | .VolumeIcon.icns 210 | .com.apple.timemachine.donotpresent 211 | 212 | # Directories potentially created on remote AFP share 213 | .AppleDB 214 | .AppleDesktop 215 | Network Trash Folder 216 | Temporary Items 217 | .apdisk 218 | 219 | ### Windows: https://raw.githubusercontent.com/github/gitignore/master/Global/Windows.gitignore 220 | 221 | # Windows thumbnail cache files 222 | Thumbs.db 223 | ehthumbs.db 224 | ehthumbs_vista.db 225 | 226 | # Dump file 227 | *.stackdump 228 | 229 | # Folder config file 230 | [Dd]esktop.ini 231 | 232 | # Recycle Bin used on file shares 233 | $RECYCLE.BIN/ 234 | 235 | # Windows Installer files 236 | *.cab 237 | *.msi 238 | *.msix 239 | *.msm 240 | *.msp 241 | 242 | # Windows shortcuts 243 | *.lnk 244 | 245 | ### Extra Python Items and SunPy Specific 246 | .hypothesis 247 | .pytest_cache 248 | sunpydata.sqlite 249 | sunpydata.sqlite-journal 250 | sunkit_image/_compiler.c 251 | sunkit_image/cython_version.py 252 | sunkit_image/_version.py 253 | docs/_build 254 | docs/generated 255 | docs/api/ 256 | docs/whatsnew/latest_changelog.txt 257 | examples/**/*.csv 258 | figure_test_images* 259 | tags 260 | baseline 261 | 262 | # Release script 263 | .github_cache 264 | 265 | # Misc Stuff 266 | .history 267 | *.orig 268 | .tmp 269 | node_modules/ 270 | package-lock.json 271 | package.json 272 | .prettierrc 273 | 274 | # Log files generated by 'vagrant up' 275 | *.log 276 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/.gitmodules -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | balanced_wrapping = true 3 | skip = 4 | docs/conf.py 5 | sunkit_image/__init__.py 6 | default_section = THIRDPARTY 7 | include_trailing_comma = true 8 | known_astropy = astropy, asdf 9 | known_sunpy = sunpy 10 | known_first_party = sunkit_image 11 | length_sort = false 12 | length_sort_sections = stdlib 13 | line_length = 110 14 | multi_line_output = 3 15 | no_lines_before = LOCALFOLDER 16 | sections = STDLIB, THIRDPARTY, ASTROPY, SUNPY, FIRSTPARTY, LOCALFOLDER 17 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | rev: "v0.11.12" 4 | hooks: 5 | - id: ruff 6 | args: ["--fix", "--unsafe-fixes"] 7 | - repo: https://github.com/PyCQA/isort 8 | rev: 6.0.1 9 | hooks: 10 | - id: isort 11 | exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*|extern.*|sunkit_image/extern)$" 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v5.0.0 14 | hooks: 15 | - id: check-ast 16 | - id: check-case-conflict 17 | - id: trailing-whitespace 18 | exclude: ".*(.fits|.fts|.fit|.header|.txt)$" 19 | - id: check-yaml 20 | - id: debug-statements 21 | - id: check-added-large-files 22 | args: ["--enforce-all", "--maxkb=1054"] 23 | exclude: "sunkit_image/data/test/asda_vxvy.npz" 24 | - id: end-of-file-fixer 25 | exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*|.json)$|^CITATION.rst$" 26 | - id: mixed-line-ending 27 | exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*)$" 28 | - repo: https://github.com/codespell-project/codespell 29 | rev: v2.4.1 30 | hooks: 31 | - id: codespell 32 | args: [ "--write-changes" ] 33 | - repo: https://github.com/crate-ci/typos 34 | rev: v1 35 | hooks: 36 | - id: typos 37 | - repo: https://github.com/pre-commit/mirrors-mypy 38 | rev: "v1.15.0" 39 | hooks: 40 | - id: mypy 41 | additional_dependencies: ["types-setuptools"] 42 | ci: 43 | autofix_prs: false 44 | autoupdate_schedule: "quarterly" 45 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-lts-latest 5 | tools: 6 | python: "mambaforge-latest" 7 | jobs: 8 | post_checkout: 9 | - git fetch --unshallow || true 10 | pre_install: 11 | - git update-index --assume-unchanged .rtd-environment.yaml docs/conf.py 12 | 13 | conda: 14 | environment: .rtd-environment.yaml 15 | 16 | sphinx: 17 | builder: html 18 | configuration: docs/conf.py 19 | fail_on_warning: false 20 | 21 | formats: 22 | - htmlzip 23 | 24 | python: 25 | install: 26 | - method: pip 27 | extra_requirements: 28 | - all 29 | - docs 30 | path: . 31 | -------------------------------------------------------------------------------- /.rtd-environment.yaml: -------------------------------------------------------------------------------- 1 | name: rtd_sunkit_image 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.12 6 | - pip 7 | - graphviz!=2.42.*,!=2.43.* 8 | -------------------------------------------------------------------------------- /.ruff.toml: -------------------------------------------------------------------------------- 1 | target-version = "py310" 2 | line-length = 120 3 | exclude = [ 4 | ".git,", 5 | "__pycache__", 6 | "build", 7 | "sunkit-image/version.py", 8 | ] 9 | 10 | [lint] 11 | select = [ 12 | "E", 13 | "F", 14 | "W", 15 | "UP", 16 | "PT", 17 | "BLE", 18 | "A", 19 | "C4", 20 | "INP", 21 | "PIE", 22 | "T20", 23 | "RET", 24 | "TID", 25 | "PTH", 26 | "PD", 27 | "PLC", 28 | "PLE", 29 | "FLY", 30 | "NPY", 31 | "PERF", 32 | "RUF", 33 | ] 34 | extend-ignore = [ 35 | "A005", # Module `trace` shadows a Python standard-library module 36 | # pycodestyle (E, W) 37 | "E501", # ignore line length will use a formatter instead 38 | # pyupgrade (UP) 39 | "UP038", # Use | in isinstance - not compatible with models and is slower 40 | # pytest (PT) 41 | "PT001", # Always use pytest.fixture() 42 | "PT023", # Always use () on pytest decorators 43 | # flake8-pie (PIE) 44 | "PIE808", # Disallow passing 0 as the first argument to range 45 | # flake8-use-pathlib (PTH) 46 | "PTH123", # open() should be replaced by Path.open() 47 | # Ruff (RUF) 48 | "RUF003", # Ignore ambiguous quote marks, doesn't allow ' in comments 49 | "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` 50 | "RUF013", # PEP 484 prohibits implicit `Optional` 51 | "RUF015", # Prefer `next(iter(...))` over single element slice 52 | ] 53 | 54 | [lint.per-file-ignores] 55 | "setup.py" = [ 56 | "INP001", # File is part of an implicit namespace package. 57 | ] 58 | "conftest.py" = [ 59 | "INP001", # File is part of an implicit namespace package. 60 | ] 61 | "docs/conf.py" = [ 62 | "E402" # Module imports not at top of file 63 | ] 64 | "docs/*.py" = [ 65 | "INP001", # File is part of an implicit namespace package. 66 | ] 67 | "examples/**.py" = [ 68 | "T201", # allow use of print in examples 69 | "INP001", # File is part of an implicit namespace package. 70 | ] 71 | "__init__.py" = [ 72 | "E402", # Module level import not at top of cell 73 | "F401", # Unused import 74 | "F403", # from {name} import * used; unable to detect undefined names 75 | "F405", # {name} may be undefined, or defined from star imports 76 | ] 77 | "test_*.py" = [ 78 | "E402", # Module level import not at top of cell 79 | ] 80 | 81 | [lint.pydocstyle] 82 | convention = "numpy" 83 | -------------------------------------------------------------------------------- /.sunpy-template.yml: -------------------------------------------------------------------------------- 1 | default_context: 2 | package_name: "sunkit-image" 3 | module_name: "sunkit_image" 4 | short_description: "A image processing toolbox for Solar Physics." 5 | long_description: "file: README.rst" 6 | author_name: "The SunPy Community" 7 | author_email: "sunpy@googlegroups.com" 8 | license: "BSD 3-Clause" 9 | project_url: "http://docs.sunpy.org/projects/sunkit-image/" 10 | project_version: "0.1.0dev" 11 | include_example_code: "n" 12 | include_example_cython_code: "n" 13 | include_cextern_folder: "n" 14 | edit_on_github_extension: "True" 15 | github_project: "sunpy/sunkit-image" 16 | use_travis_ci: "no" 17 | use_appveyor_ci: "no" 18 | use_azure_ci: "y" 19 | use_circle_ci: "y" 20 | use_read_the_docs: "y" 21 | sphinx_theme: "sunpy-sphinx-theme" 22 | initialize_git_repo: "n" 23 | minimum_python_version: "3.7" 24 | -------------------------------------------------------------------------------- /LICENSE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013-2022 The SunPy Developers 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Exclude specific files 2 | # All files which are tracked by git and not explicitly excluded here are included by setuptools_scm 3 | # Prune folders 4 | prune build 5 | prune docs/_build 6 | prune docs/api 7 | global-exclude *.pyc *.o 8 | 9 | # This subpackage is only used in development checkouts 10 | # and should not be included in built tarballs 11 | prune sunkit_image/_dev 12 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ``sunkit-image`` 2 | ================ 3 | 4 | |Latest Version| |codecov| |Powered by NumFOCUS| |Powered by Sunpy| 5 | 6 | .. |Powered by Sunpy| image:: http://img.shields.io/badge/powered%20by-SunPy-orange.svg?style=flat 7 | :target: https://www.sunpy.org 8 | .. |Latest Version| image:: https://img.shields.io/pypi/v/sunkit-image.svg 9 | :target: https://pypi.python.org/pypi/sunkit-image/ 10 | .. |codecov| image:: https://codecov.io/gh/sunpy/sunpy/branch/main/graph/badge.svg 11 | :target: https://codecov.io/gh/sunpy/sunkit-image 12 | .. |Powered by NumFOCUS| image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A 13 | :target: http://numfocus.org 14 | 15 | ``sunkit-image`` is a a open-source toolbox for solar physics image processing. 16 | Currently it is an experimental library for solar physics specific image processing routines. 17 | Ideally it will only contain routines that have been published in the literature. 18 | 19 | See `sunkit-image Documentation`_ for instructions on how to install and contribute. 20 | 21 | Usage of Generative AI 22 | ---------------------- 23 | 24 | We expect authentic engagement in our community. 25 | Be wary of posting output from Large Language Models or similar generative AI as comments on GitHub or any other platform, as such comments tend to be formulaic and low quality content. 26 | If you use generative AI tools as an aid in developing code or documentation changes, ensure that you fully understand the proposed changes and can explain why they are the correct approach and an improvement to the current state. 27 | 28 | Code of Conduct 29 | --------------- 30 | 31 | When you are interacting with the SunPy community you are asked to follow our `code of conduct `__. 32 | 33 | .. _sunkit-image Documentation: https://docs.sunpy.org/projects/sunkit-image/en/stable/ 34 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | default.extend-ignore-identifiers-re = [ 2 | "NDCube", 3 | "EIT", 4 | "RHE", 5 | # Badly named variables 6 | "iy_ip", 7 | "iy", 8 | "BA", 9 | ] 10 | -------------------------------------------------------------------------------- /changelog/README.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | .. This README was adapted from the pytest changelog readme under the terms of the MIT licence. 5 | 6 | This directory contains "news fragments" which are short files that contain a small 7 | **ReST**-formatted text that will be added to the next ``CHANGELOG``. 8 | 9 | The ``CHANGELOG`` will be read by users, so this description should be aimed at SunPy users 10 | instead of describing internal changes which are only relevant to the developers. 11 | 12 | Make sure to use full sentences with correct case and punctuation, for example:: 13 | 14 | Add support for Helioprojective coordinates in `sunpy.coordinates.frames`. 15 | 16 | Please try to use Sphnix intersphinx using backticks. 17 | 18 | Each file should be named like ``..rst``, 19 | where ```` is a pull request number, and ```` is one of: 20 | 21 | * ``breaking``: A change which requires users to change code and is not backwards compatible. (Not to be used for removal of deprecated features.) 22 | * ``feature``: New user facing features and any new behavior. 23 | * ``bugfix``: Fixes a reported bug. 24 | * ``doc``: Documentation addition or improvement, like rewording an entire session or adding missing docs. 25 | * ``removal``: Feature deprecation and/or feature removal. 26 | * ``trivial``: A change which has no user facing effect or is tiny change. 27 | 28 | So for example: ``123.feature.rst``, ``456.bugfix.rst``. 29 | 30 | If you are unsure what pull request type to use, don't hesitate to ask in your PR. 31 | 32 | Note that the ``towncrier`` tool will automatically reflow your text, so it will work best 33 | if you stick to a single paragraph, but multiple sentences and links are OK and encouraged. 34 | You can install ``towncrier`` and then run ``towncrier --draft`` if you want to get a 35 | preview of how your change will look in the final release notes. 36 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | coverage: 3 | status: 4 | project: 5 | default: 6 | threshold: 0.2% 7 | codecov: 8 | require_ci_to_pass: false 9 | notify: 10 | wait_for_ci: true 11 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/code_ref/asda.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.asda 2 | -------------------------------------------------------------------------------- /docs/code_ref/coalignment.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.coalignment 2 | -------------------------------------------------------------------------------- /docs/code_ref/enhance.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.enhance 2 | -------------------------------------------------------------------------------- /docs/code_ref/granule.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.granule 2 | -------------------------------------------------------------------------------- /docs/code_ref/index.rst: -------------------------------------------------------------------------------- 1 | .. _reference: 2 | 3 | ************* 4 | API Reference 5 | ************* 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | asda 11 | coalignment 12 | enhance 13 | granule 14 | radial 15 | stara 16 | time_lag 17 | trace 18 | utils 19 | -------------------------------------------------------------------------------- /docs/code_ref/radial.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.radial 2 | -------------------------------------------------------------------------------- /docs/code_ref/stara.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.stara 2 | -------------------------------------------------------------------------------- /docs/code_ref/time_lag.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.time_lag 2 | -------------------------------------------------------------------------------- /docs/code_ref/trace.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.trace 2 | -------------------------------------------------------------------------------- /docs/code_ref/utils.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: sunkit_image.utils 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration file for the Sphinx documentation builder. 3 | """ 4 | 5 | import datetime 6 | import os 7 | import pathlib 8 | import warnings 9 | from pathlib import Path 10 | 11 | from packaging.version import Version 12 | 13 | from astropy.utils.exceptions import AstropyDeprecationWarning 14 | from matplotlib import MatplotlibDeprecationWarning 15 | from sunpy.util.exceptions import SunpyDeprecationWarning, SunpyPendingDeprecationWarning 16 | from sunpy_sphinx_theme import PNG_ICON 17 | 18 | # -- Read the Docs Specific Configuration -------------------------------------- 19 | 20 | os.environ["PARFIVE_HIDE_PROGRESS"] = "True" 21 | on_rtd = os.environ.get("READTHEDOCS", None) == "True" 22 | if on_rtd: 23 | os.environ["SUNPY_CONFIGDIR"] = "/home/docs/" 24 | os.environ["HOME"] = "/home/docs/" 25 | os.environ["LANG"] = "C" 26 | os.environ["LC_ALL"] = "C" 27 | 28 | # -- Project information ----------------------------------------------------- 29 | 30 | # The full version, including alpha/beta/rc tags 31 | from sunkit_image import __version__ 32 | 33 | _version = Version(__version__) 34 | version = release = str(_version) 35 | # Avoid "post" appearing in version string in rendered docs 36 | if _version.is_postrelease: 37 | version = release = _version.base_version 38 | # Avoid long githashes in rendered Sphinx docs 39 | elif _version.is_devrelease: 40 | version = release = f"{_version.base_version}.dev{_version.dev}" 41 | is_development = _version.is_devrelease 42 | is_release = not(_version.is_prerelease or _version.is_devrelease) 43 | 44 | project = "sunkit_image" 45 | author = "The SunPy Community" 46 | copyright = f"{datetime.datetime.now(datetime.UTC).year}, {author}" # NOQA: A001 47 | 48 | # -- General configuration ----------------------------------------------------- 49 | 50 | # Wrap large function/method signatures 51 | maximum_signature_line_length = 80 52 | 53 | # Add any Sphinx extension module names here, as strings. They can be 54 | # extensions coming with Sphinx (named "sphinx.ext.*") or your custom 55 | # ones. 56 | extensions = [ 57 | "matplotlib.sphinxext.plot_directive", 58 | "sphinx_design", 59 | "sphinx_gallery.gen_gallery", 60 | "sphinx.ext.autodoc", 61 | "sphinx.ext.coverage", 62 | "sphinx.ext.doctest", 63 | "sphinx.ext.inheritance_diagram", 64 | "sphinx.ext.intersphinx", 65 | "sphinx.ext.napoleon", 66 | "sphinx.ext.todo", 67 | "sphinx.ext.viewcode", 68 | "sphinx.ext.mathjax", 69 | "sphinx_automodapi.automodapi", 70 | "sphinx_automodapi.smart_resolver", 71 | "sphinx_changelog", 72 | ] 73 | 74 | # Add any paths that contain templates here, relative to this directory. 75 | # templates_path = ["_templates"] 76 | 77 | # List of patterns, relative to source directory, that match files and 78 | # directories to ignore when looking for source files. 79 | # This pattern also affects html_static_path and html_extra_path. 80 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 81 | 82 | # The suffix(es) of source filenames. 83 | # You can specify multiple suffix as a list of string: 84 | source_suffix = ".rst" 85 | 86 | # The master toctree document. 87 | master_doc = "index" 88 | 89 | # Treat everything in single ` as a Python reference. 90 | default_role = "py:obj" 91 | 92 | # -- Options for intersphinx extension --------------------------------------- 93 | 94 | intersphinx_mapping = { 95 | "python": ( 96 | "https://docs.python.org/3/", 97 | (None, "http://www.astropy.org/astropy-data/intersphinx/python3.inv"), 98 | ), 99 | "numpy": ( 100 | "https://numpy.org/doc/stable/", 101 | (None, "http://www.astropy.org/astropy-data/intersphinx/numpy.inv"), 102 | ), 103 | "scipy": ( 104 | "https://docs.scipy.org/doc/scipy/reference/", 105 | (None, "http://www.astropy.org/astropy-data/intersphinx/scipy.inv"), 106 | ), 107 | "matplotlib": ("https://matplotlib.org/stable", None), 108 | "sunpy": ("https://docs.sunpy.org/en/stable/", None), 109 | "ndcube": ('https://docs.sunpy.org/projects/ndcube/en/stable/', None), 110 | "astropy": ("https://docs.astropy.org/en/stable/", None), 111 | "dask": ("https://docs.dask.org/en/latest", None), 112 | "skimage": ("https://scikit-image.org/docs/stable/", None), 113 | } 114 | 115 | # -- Options for HTML output --------------------------------------------------- 116 | 117 | # The theme to use for HTML and HTML Help pages. See the documentation for 118 | # a list of builtin themes. 119 | html_theme = "sunpy" 120 | 121 | # Render inheritance diagrams in SVG 122 | graphviz_output_format = "svg" 123 | 124 | graphviz_dot_args = [ 125 | "-Nfontsize=10", 126 | "-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif", 127 | "-Efontsize=10", 128 | "-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif", 129 | "-Gfontsize=10", 130 | "-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif", 131 | ] 132 | 133 | # Add any paths that contain custom static files (such as style sheets) here, 134 | # relative to this directory. They are copied after the builtin static files, 135 | # so a file named "default.css" will overwrite the builtin "default.css". 136 | # html_static_path = ["_static"] 137 | 138 | # By default, when rendering docstrings for classes, sphinx.ext.autodoc will 139 | # make docs with the class-level docstring and the class-method docstrings, 140 | # but not the __init__ docstring, which often contains the parameters to 141 | # class constructors across the scientific Python ecosystem. The option below 142 | # will append the __init__ docstring to the class-level docstring when rendering 143 | # the docs. For more options, see: 144 | # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autoclass_content 145 | autoclass_content = "both" 146 | 147 | # -- Other options ---------------------------------------------------------- 148 | 149 | html_extra_path = ["robots.txt"] 150 | 151 | napoleon_use_rtype = False 152 | 153 | napoleon_google_docstring = False 154 | 155 | # Enable nitpicky mode, which forces links to be non-broken 156 | nitpicky = True 157 | 158 | # This is not used. See docs/nitpick-exceptions file for the actual listing. 159 | nitpick_ignore = [] 160 | with Path("nitpick-exceptions").open() as f: 161 | for line in f.readlines(): 162 | if line.strip() == "" or line.startswith("#"): 163 | continue 164 | dtype, target = line.split(None, 1) 165 | target = target.strip() 166 | nitpick_ignore.append((dtype, target)) 167 | 168 | # We want to make sure all the following warnings fail the build 169 | warnings.filterwarnings("error", category=SunpyDeprecationWarning) 170 | warnings.filterwarnings("error", category=SunpyPendingDeprecationWarning) 171 | warnings.filterwarnings("error", category=MatplotlibDeprecationWarning) 172 | warnings.filterwarnings("error", category=AstropyDeprecationWarning) 173 | 174 | # For the linkcheck 175 | linkcheck_ignore = [ 176 | r"https://doi.org/\d+", 177 | r"https://element.io/\d+", 178 | r"https://github.com/\d+", 179 | r"https://docs.sunpy.org/\d+", 180 | ] 181 | linkcheck_anchors = False 182 | 183 | # This is added to the end of RST files - a good place to put substitutions to 184 | # be used globally. 185 | rst_epilog = """ 186 | .. SunPy 187 | .. _SunPy: https://sunpy.org 188 | .. _`SunPy mailing list`: https://groups.google.com/group/sunpy 189 | .. _`SunPy dev mailing list`: https://groups.google.com/group/sunpy-dev 190 | """ 191 | 192 | # -- Options for the Sphinx gallery ----------------------------------------- 193 | 194 | path = pathlib.Path.cwd() 195 | example_dir = path.parent.joinpath("examples") 196 | sphinx_gallery_conf = { 197 | "backreferences_dir": str(path.joinpath("generated", "modules")), 198 | "filename_pattern": "^((?!skip_).)*$", 199 | "examples_dirs": example_dir, 200 | "gallery_dirs": path.joinpath("generated", "gallery"), 201 | "default_thumb_file": PNG_ICON, 202 | "abort_on_example_error": False, 203 | "plot_gallery": "True", 204 | "remove_config_comments": True, 205 | "only_warn_on_example_error": True, 206 | } 207 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ************ 2 | sunkit-image 3 | ************ 4 | 5 | .. grid:: 3 6 | 7 | .. grid-item:: 8 | 9 | A toolbox of useful image processing routines applicable to solar physics data. 10 | 11 | .. grid-item-card:: API Reference 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | code_ref/index 17 | 18 | .. grid-item-card:: Other info 19 | 20 | .. toctree:: 21 | :maxdepth: 1 22 | 23 | generated/gallery/index 24 | whatsnew/index 25 | 26 | Mission Statement 27 | ================= 28 | 29 | The goal of ``sunkit-image`` is **not** to be a general purpose image processing library. 30 | 31 | The goal of ``sunkit-image`` is to provide access to image processing routines that are: 32 | 33 | 1. Focused on being applied to solar image data. 34 | 2. Are published in the literature or in preparation to be published. 35 | If for any reason, there is doubt to the publication status, the code will only be merged when it's close to actual publication i.e., after approval from the referees. 36 | 3. Widely used throughout the solar physics community. 37 | Examples include co-alignment routines that compensate for incorrect pointing, solar feature identification algorithms, and filtering functions. 38 | 39 | If the code is already in a released package, we will wrap calls to the existing package in a way that makes it easy to use with `sunpy.map.Map` or `ndcube.NDCube` objects. 40 | Additional modifications to such packages are outside the scope of ``sunkit-image``. 41 | We will not copy code from other packages into this one. 42 | 43 | Installation 44 | ============ 45 | 46 | For detailed installation instructions, see the `installation guide`_ in the ``sunpy`` docs. 47 | This takes you through the options for getting a virtual environment and installing ``sunpy``. 48 | You will need to replace "sunpy" with "sunkit-image". 49 | 50 | Getting Help 51 | ============ 52 | 53 | Stop by our chat room `#sunpy:matrix.org`_ if you have any questions. 54 | 55 | Contributing 56 | ============ 57 | 58 | Help is always welcome so let us know what you like to work on, or check out the `issues page`_ for the list of known outstanding items. 59 | If you would like to get involved, please read our `contributing guide`_, this talks about ``sunpy`` but the same is for ``sunkit-image``. 60 | 61 | If you want help develop ``sunkit-image`` you will need to install it from GitHub. 62 | The best way to do this is to create a new python virtual environment. 63 | Once you have that virtual environment, you will want to fork the repo and then run:: 64 | 65 | $ git clone https://github.com//sunkit-image.git 66 | $ cd sunkit-image 67 | $ pip install -e ".[dev]" 68 | 69 | .. _installation guide: https://docs.sunpy.org/en/stable/tutorial/installation.html 70 | .. _`#sunpy:matrix.org`: https://app.element.io/#/room/#sunpy:openastronomy.org 71 | .. _issues page: https://github.com/sunpy/sunkit-image/issues 72 | .. _contributing guide: https://docs.sunpy.org/en/latest/dev_guide/contents/newcomers.html 73 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/nitpick-exceptions: -------------------------------------------------------------------------------- 1 | # Prevents sphinx nitpicky mode picking up on optional 2 | # (see https://github.com/sphinx-doc/sphinx/issues/6861) 3 | # Even if it was "fixed", still broken 4 | py:class optional 5 | # See https://github.com/numpy/numpy/issues/10039 6 | py:obj numpy.datetime64 7 | # There's no specific file or function classes to link to 8 | py:class any type 9 | py:class array-like 10 | py:class file object 11 | py:class function 12 | py:class path-like 13 | py:class str-like 14 | py:class time-like 15 | # Specific units 16 | py:class Unit 17 | py:class Unit('s') 18 | py:class Unit('pix') 19 | # It hates the numpy docstring 20 | py:class { "li" 21 | py:class "otsu" 22 | py:class "isodata" 23 | py:class "mean" 24 | py:class "minimum" 25 | py:class "yen" 26 | py:class "triangle" } 27 | py:class {"propagate" 28 | py:class "replace"} 29 | py:class {'center' 30 | py:class 'left' 31 | py:class 'right'} 32 | py:class {'center' 33 | py:class {'center' 34 | py:class {'center' 35 | -------------------------------------------------------------------------------- /docs/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Allow: /*/latest/ 3 | Allow: /en/latest/ # Fallback for bots that don't understand wildcards 4 | Allow: /*/stable/ 5 | Allow: /en/stable/ # Fallback for bots that don't understand wildcards 6 | Disallow: / 7 | -------------------------------------------------------------------------------- /docs/rtd_requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/docs/rtd_requirements.txt -------------------------------------------------------------------------------- /docs/whatsnew/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changelog: 2 | 3 | ************** 4 | Full Changelog 5 | ************** 6 | 7 | .. changelog:: 8 | :towncrier: ../../ 9 | :towncrier-skip-if-empty: 10 | :changelog_file: ../../CHANGELOG.rst 11 | -------------------------------------------------------------------------------- /docs/whatsnew/index.rst: -------------------------------------------------------------------------------- 1 | .. _whatsnew: 2 | 3 | *************** 4 | Release History 5 | *************** 6 | 7 | This page documents the releases for sunkit-image 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | changelog 13 | -------------------------------------------------------------------------------- /examples/README.txt: -------------------------------------------------------------------------------- 1 | Example Gallery 2 | =============== 3 | 4 | This gallery contains examples of how to use sunkit-image. Most of these examples 5 | require the SunPy sample data, which you can download by running:: 6 | 7 | >>> import sunpy.data.sample 8 | 9 | Once downloaded the data will be stored in your user directory and you will not 10 | need to download it again. 11 | -------------------------------------------------------------------------------- /examples/advanced_wow.py: -------------------------------------------------------------------------------- 1 | """ 2 | ==================================================== 3 | Advanced Usage of Wavelets Optimized Whitening (WOW) 4 | ==================================================== 5 | 6 | This example demonstrates different options of the Wavelets Optimized Whitening 7 | applied to a `sunpy.map.Map` using `sunkit_image.enhance.wow`. 8 | """ 9 | 10 | import matplotlib.pyplot as plt 11 | 12 | from astropy import units as u 13 | from astropy.coordinates import SkyCoord 14 | from astropy.visualization import AsymmetricPercentileInterval, ImageNormalize, LinearStretch, PowerStretch 15 | 16 | import sunpy.data.sample 17 | import sunpy.map 18 | 19 | import sunkit_image.enhance as enhance 20 | 21 | ########################################################################### 22 | # `sunpy` provides a range of sample data with a number of suitable images, 23 | # however, here will just use AIA 193. 24 | 25 | aia_193 = sunpy.map.Map(sunpy.data.sample.AIA_193_JUN2012) 26 | 27 | ########################################################################### 28 | # We will now crop the southwestern quadrant. This is so we can get a mixture 29 | # of a bright region and the limb. 30 | 31 | top_right = SkyCoord(1200 * u.arcsec, 0 * u.arcsec, frame=aia_193.coordinate_frame) 32 | bottom_left = SkyCoord(0 * u.arcsec, -1200 * u.arcsec, frame=aia_193.coordinate_frame) 33 | submap_193 = aia_193.submap(bottom_left, top_right=top_right) 34 | 35 | ########################################################################### 36 | # We now will apply different options of the Wavelets Optimized Whitening algorithm. 37 | # The `sunkit_image.enhance.wow` function takes a `sunpy.map.Map` as an input. 38 | # First, we call WOW with no arguments, which returns the default WOW enhancement. 39 | 40 | default_wow = enhance.wow(submap_193) 41 | 42 | ########################################################################### 43 | # Then we can denoise the output using a soft threshold in the three first wavelet 44 | # scales using "sigma = 5, 2, 1". 45 | 46 | denoise_coefficients = [5, 2, 1] 47 | denoised_wow = enhance.wow(submap_193, denoise_coefficients=denoise_coefficients) 48 | 49 | ########################################################################### 50 | # We then run the edge-aware (bilateral) flavor of the algorithm. 51 | # This prevents ringing around sharp edges (e.g., the solar limb 52 | # or very bright features. 53 | 54 | bilateral_wow = enhance.wow(submap_193, bilateral=1) 55 | 56 | ########################################################################### 57 | # This will call the edge-aware algorithm with denoising. 58 | 59 | bilateral_denoised_wow = enhance.wow(submap_193, bilateral=1, denoise_coefficients=denoise_coefficients) 60 | 61 | ########################################################################### 62 | # Finally, we merge the denoised edge-aware enhanced image with the 63 | # gamma-stretched input, with weight "h". 64 | 65 | gamma = 4 66 | bilateral_denoised_merged_wow = enhance.wow(submap_193, bilateral=1, denoise_coefficients=denoise_coefficients, gamma=gamma, h=0.99) 67 | 68 | ########################################################################### 69 | # Finally, we will plot the full set of outputs created and 70 | # compare that to the original image. 71 | 72 | fig = plt.figure(figsize=(8, 12)) 73 | variations = { 74 | f'Input | Gamma = {gamma} stretch': {'map': submap_193, 'stretch': PowerStretch(1 / gamma)}, 75 | 'WOW | Linear stretch': {'map': default_wow, 'stretch': LinearStretch()}, 76 | 'Denoised WOW': {'map': denoised_wow, 'stretch': LinearStretch()}, 77 | 'Edge-aware WOW': {'map': bilateral_wow, 'stretch': LinearStretch()}, 78 | 'Edge-aware & denoised WOW': {'map': bilateral_denoised_wow, 'stretch': LinearStretch()}, 79 | 'Merged with input': {'map': bilateral_denoised_merged_wow, 'stretch': LinearStretch()} 80 | } 81 | interval = AsymmetricPercentileInterval(1, 99.9) 82 | for i, (title, image) in enumerate(variations.items()): 83 | ax = fig.add_subplot(3, 2, i + 1, projection=image['map']) 84 | image['map'].plot(norm=ImageNormalize(image['map'].data, interval=interval, stretch=image['stretch'])) 85 | ax.set_title(title) 86 | ax.axis('off') 87 | 88 | fig.tight_layout() 89 | 90 | plt.show() 91 | -------------------------------------------------------------------------------- /examples/calculating_time_lags.py: -------------------------------------------------------------------------------- 1 | """ 2 | ========================================== 3 | Computing Cross-Correlations and Time Lags 4 | ========================================== 5 | 6 | This example shows how to compute cross-correlations 7 | between light curves and map the resulting time lags, 8 | those temporal offsets which maximize the cross-correlation 9 | between the two signals, back to an image pixel. 10 | 11 | This method was developed for studying temporal evolution of AIA intensities 12 | by `Viall and Klimchuk (2012) `__. 13 | 14 | The specific implementation in this package is described in detail 15 | in Appendix C of `Barnes et al. (2019) `__. 16 | """ 17 | # sphinx_gallery_thumbnail_number = 4 18 | 19 | import dask.array 20 | import matplotlib.pyplot as plt 21 | import numpy as np 22 | from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable 23 | 24 | import astropy.units as u 25 | 26 | from sunkit_image.time_lag import cross_correlation, get_lags, max_cross_correlation, time_lag 27 | 28 | ################################################################### 29 | # Consider two timeseries whose peaks are separated in time by some 30 | # interval. We will create a toy model with two Gaussian pulses. In 31 | # practice, this method is often applied to many AIA light curves 32 | # and is described in detail in 33 | # `Viall and Klimchuk (2012) `__. 34 | 35 | 36 | def gaussian_pulse(x, x0, sigma): 37 | return np.exp(-((x - x0) ** 2) / (2 * sigma**2)) 38 | 39 | 40 | time = np.linspace(0, 1, 500) * u.s 41 | s_a = gaussian_pulse(time, 0.4 * u.s, 0.02 * u.s) 42 | s_b = gaussian_pulse(time, 0.6 * u.s, 0.02 * u.s) 43 | plt.plot(time, s_a, label="A") 44 | plt.plot(time, s_b, label="B") 45 | plt.xlabel("Time [s]") 46 | plt.ylabel("Signal") 47 | plt.legend() 48 | 49 | ################################################################### 50 | # The problem we are concerned with is how much do we need shift 51 | # signal A, either forward or backward in time, in order for it to 52 | # best line up with signal B. In other words, what is the time lag 53 | # between A and B. In the context of analyzing light curves from 54 | # AIA, this gives us a proxy for the cooling time between two 55 | # narrowband channels and thus two temperatures. To find this, 56 | # we can compute the cross-correlation between the two signals 57 | # and find which "lag" yields the highest correlation. 58 | 59 | lags = get_lags(time) 60 | cc = cross_correlation(s_a, s_b, lags) 61 | plt.plot(lags, cc) 62 | plt.xlabel("Lag [s]") 63 | plt.ylabel("Cross-correlation, AB") 64 | 65 | ################################################################### 66 | # Additionally, we can also easily calculate the maximum value of the 67 | # cross-correlation and the associate lag, or the time lag. 68 | 69 | tl = time_lag(s_a, s_b, time) 70 | max_cc = max_cross_correlation(s_a, s_b, time) 71 | plt.plot(lags, cc) 72 | plt.plot(tl, max_cc, marker="o", ls="", markersize=4) 73 | plt.xlabel("Lag [s]") 74 | plt.ylabel("Cross-correlation, AB") 75 | 76 | ################################################################### 77 | # As expected from the first intensity plot, we find that the lag 78 | # which maximizes the cross-correlation is approximately the separation 79 | # between the mean values of the Gaussian pulses. 80 | 81 | print("Time lag, A -> B = ", tl) 82 | 83 | ################################################################### 84 | # Note that a positive time lag indicates that signal A has to be 85 | # shifted forward in time to match signal B. By reversing the order 86 | # of the inputs, we also reverse the sign of the time lag. 87 | 88 | print("Time lag, B -> A =", time_lag(s_b, s_a, time)) 89 | 90 | ################################################################### 91 | # The real power in the time lag approach is it's ability to reveal 92 | # large scale patterns of cooling in images of the Sun, particularly 93 | # in active regions. All of these functions can also be applied to 94 | # intensity data cubes to create a "time lag map". 95 | # 96 | # As an example, we'll create a fake data cube by repeating Gaussian 97 | # pulses with varying means and then add some noise to them 98 | 99 | rng = np.random.default_rng() 100 | means_a = np.tile(rng.standard_normal((10, 10)), (*time.shape, 1, 1)) * u.s 101 | means_b = np.tile(rng.standard_normal((10, 10)), (*time.shape, 1, 1)) * u.s 102 | noise = 0.2 * (-0.5 + rng.random(means_a.shape)) 103 | s_a = gaussian_pulse(np.tile(time, means_a.shape[1:] + (1,)).T, means_a, 0.02 * u.s) + noise 104 | s_b = gaussian_pulse(np.tile(time, means_b.shape[1:] + (1,)).T, means_b, 0.02 * u.s) + noise 105 | 106 | ################################################################### 107 | # We can now compute a map of the time lag and maximum cross correlation. 108 | 109 | max_cc_map = max_cross_correlation(s_a, s_b, time) 110 | tl_map = time_lag(s_a, s_b, time) 111 | 112 | fig = plt.figure(figsize=(10, 5)) 113 | 114 | ax = fig.add_subplot(121) 115 | im = ax.imshow(tl_map.value, cmap="RdBu", vmin=-1, vmax=1) 116 | cax = make_axes_locatable(ax).append_axes("right", size="5%", pad="1%") 117 | cb = fig.colorbar(im, cax=cax) 118 | cb.set_label(r"$\tau_{AB}$ [s]") 119 | 120 | ax = fig.add_subplot(122) 121 | im = ax.imshow(max_cc_map.value, vmin=0, vmax=1, cmap="magma") 122 | cax = make_axes_locatable(ax).append_axes("right", size="5%", pad="1%") 123 | cb = fig.colorbar(im, cax=cax) 124 | cb.set_label(r"Max cross-correlation") 125 | 126 | fig.tight_layout() 127 | 128 | ################################################################### 129 | # In practice, these data cubes are often very large, sometimes many 130 | # GB, such that doing operations like these on them can be prohibitively 131 | # expensive. All of these operations can be parallelized and distributed 132 | # easily by passing in the intensity cubes as Dask arrays. Note that we 133 | # strip the units off of our signal arrays before creating the Dask arrays 134 | # from the as creating a Dask array from an `~astropy.units.Quantity` may 135 | # result in undefined behavior. 136 | 137 | s_a = dask.array.from_array(s_a.value, chunks=s_a.shape[:1] + (5, 5)) 138 | s_b = dask.array.from_array(s_b.value, chunks=s_b.shape[:1] + (5, 5)) 139 | tl_map = time_lag(s_a, s_b, time) 140 | print(tl_map) 141 | 142 | ################################################################### 143 | # Rather than being computed "eagerly", :func:`~sunkit_image.time_lag.time_lag` returns 144 | # a graph of the computation that can be handed off to a distributed 145 | # scheduler to be run in parallel. This is extremely advantageous for 146 | # large data cubes as these operations are likely to exceed the 147 | # memory limits of most desktop machines and are easily accelerated through 148 | # parallelism. 149 | 150 | plt.show() 151 | -------------------------------------------------------------------------------- /examples/detecting_swirls.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================================== 3 | Detecting Swirls in the Solar Atmosphere 4 | ======================================== 5 | 6 | This example demonstrates the use of Automated Swirl Detection Algorithm (ASDA) in detecting and plotting swirls (vortices) in a 2D velocity flow field. 7 | 8 | `More information on the algorithm can be found in the original paper. `__ 9 | 10 | Unfortunately, currently ASDA within sunkit-image only works on arrays. 11 | """ 12 | # sphinx_gallery_thumbnail_number = 3 13 | 14 | import matplotlib.pyplot as plt 15 | import numpy as np 16 | from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable 17 | 18 | from sunkit_image.asda import calculate_gamma_values, get_vortex_edges, get_vortex_properties 19 | from sunkit_image.data.test import get_test_filepath 20 | 21 | ########################################################################### 22 | # This example demonstrates how to find swirls (vortices) in a 2D velocity flow field. 23 | # 24 | # Ideally you will want to calculate the velocity field from your data, but for this example 25 | # we will use precomputed flow field data from our test dataset. 26 | # 27 | # `pyflct `__ is a good tool to calculate the velocity field from your data. 28 | 29 | vxvy = np.load(get_test_filepath("asda_vxvy.npz")) 30 | # This is the original data used to calculate the velocity field 31 | data = vxvy["data"] 32 | # These are the velocity components in the x and y directions 33 | vx = vxvy["vx"] 34 | vy = vxvy["vy"] 35 | 36 | ########################################################################### 37 | # Before we proceed with swirl detection, let's understand data by 38 | # visualizing the velocity magnitude. 39 | 40 | # Calculate velocity magnitude 41 | velocity_magnitude = np.sqrt(vx**2 + vy**2) 42 | fig = plt.figure(figsize=(10, 7)) 43 | ax = fig.add_subplot(111) 44 | 45 | im = ax.imshow(velocity_magnitude, origin="lower", cmap="viridis") 46 | ax.set_title("Velocity Magnitude") 47 | cax = make_axes_locatable(ax).append_axes("bottom", size="5%", pad="5%") 48 | cbar = fig.colorbar(im, ax=cax, orientation="horizontal") 49 | cbar.set_label("Velocity (m/s)") 50 | 51 | ########################################################################### 52 | # Now we will perform swirl detection using the methods provided by `~sunkit_image.asda`. 53 | # 54 | # The first step is to calculate the Gamma values. Gamma1 (Γ1) is useful for identifying 55 | # vortex centers, while Gamma2 (Γ2) helps in detecting the edges of vortices. 56 | # These values are calculated based on the method proposed by `Graftieaux et al. (2001) `__ 57 | # and are used to quantify the swirling strength and structure of the flow field. 58 | # 59 | # To enhance the detection of smaller swirls and improve the accuracy in identifying 60 | # vortex boundaries, a factor is introduced that magnifies the original data. This 61 | # magnification aids in enhancing the resolution of the velocity field, allowing for 62 | # more precise detection of sub-grid vortex centers and boundaries. By default, the 63 | # factor is set to 1, but it can be adjusted based on the resolution of the data. 64 | 65 | gamma = calculate_gamma_values(vx, vy) 66 | 67 | ########################################################################### 68 | # Next, we identify the edges and centers of the swirls using the calculated Gamma values. 69 | # The :func:`~sunkit_image.asda.get_vortex_edges` function processes the Gamma2 values 70 | # to locate the boundaries of vortices and uses Gamma1 to pinpoint their centers. 71 | 72 | center_edge = get_vortex_edges(gamma) 73 | 74 | ########################################################################### 75 | # We can also determine various properties of the identified vortices, such as their 76 | # expanding speed (ve), rotational speed (vr), center velocity (vc), and average 77 | # observational values (ia). This information can be useful for detailed analysis 78 | # and is calculated using the :func:`~sunkit_image.asda.get_vortex_properties` function. 79 | 80 | ve, vr, vc, ia = get_vortex_properties(vx, vy, center_edge, image=data) 81 | 82 | ########################################################################### 83 | # Now we will plot the Gamma1 and Gamma2 values, which highlight the vortex 84 | # centers and edges respectively. 85 | 86 | fig = plt.figure(figsize=(10, 10)) 87 | ax = fig.add_subplot(211) 88 | ax2 = fig.add_subplot(212) 89 | 90 | ax.imshow(gamma[..., 0], origin="lower") 91 | ax.set_title(r"$\Gamma_1$") 92 | ax.set(ylabel="y") 93 | ax.set_xticklabels([]) 94 | 95 | ax2.imshow(gamma[..., 1], origin="lower") 96 | ax2.set_title(r"$\Gamma_2$") 97 | ax2.set(xlabel="x", ylabel="y") 98 | 99 | fig.tight_layout() 100 | 101 | ########################################################################### 102 | # Finally, we can create a swirl map visualization with streamlines. 103 | 104 | fig = plt.figure(figsize=(10, 7)) 105 | ax = fig.add_subplot(111) 106 | 107 | ax.imshow(data, origin="lower", cmap="gray") 108 | 109 | # Overlay streamlines 110 | Y, X = np.mgrid[0:512, 0:1024] 111 | ax.streamplot(X, Y, vx, vy, color="green") 112 | 113 | # Mark and number swirl centers 114 | centers = np.array(center_edge["center"]) 115 | for i, center in enumerate(centers): 116 | ax.plot(center[0], center[1], "bo") 117 | ax.text(center[0], center[1], str(i), color="red", ha="right", va="bottom") 118 | 119 | # Overlay swirl edges 120 | for edge in center_edge["edge"]: 121 | edge = np.array(edge) 122 | ax.plot(edge[:, 0], edge[:, 1], "b--") 123 | 124 | ax.set_title("Swirl Map Region with Streamlines") 125 | ax.set(xlabel="x", ylabel="y") 126 | fig.tight_layout() 127 | 128 | plt.show() 129 | -------------------------------------------------------------------------------- /examples/finding_sunspots_using_stara.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================ 3 | Finding sunspots using STARA 4 | ============================ 5 | 6 | This example demonstrates the use of Sunspot Tracking And Recognition 7 | Algorithm (STARA) in detecting and plotting sunspots. 8 | 9 | More information on the algorithm can be found in `this paper. `__ 10 | 11 | If you wish to perform analysis over a large period of time we suggest to refer 12 | `this `__ 13 | notebook implementation of the same algorithm using dask arrays. 14 | """ 15 | # sphinx_gallery_thumbnail_number = 2 16 | 17 | import matplotlib.pyplot as plt 18 | from skimage.measure import label, regionprops_table 19 | 20 | import astropy.units as u 21 | from astropy.table import QTable 22 | from astropy.time import Time 23 | 24 | import sunpy.io._fits 25 | import sunpy.map 26 | from sunpy.net import Fido 27 | from sunpy.net import attrs as a 28 | 29 | from sunkit_image.stara import stara 30 | 31 | ############################################################################### 32 | # Firstly, let's download HMI continuum data from the Virtual Solar Observatory (VSO). 33 | 34 | query = Fido.search(a.Time("2023-01-01 00:00", "2023-01-01 00:01"), a.Instrument("HMI"), a.Physobs("intensity")) 35 | hmi_file = Fido.fetch(query) 36 | 37 | ############################################################################### 38 | # Once the data is downloaded, we read the FITS file using`sunpy.map.Map`. 39 | # 40 | # To reduce computational expense, we resample the continuum image to a lower 41 | # resolution as this is run on a small cloud machine. 42 | # 43 | # HMI images are inverted, meaning that the solar north pole appears at the 44 | # bottom of the image. To correct this, we rotate each map in the MapSequence 45 | # using the ``rotate`` method with an order of 3. 46 | # 47 | # We will combine these into one step. 48 | 49 | hmi_map = sunpy.map.Map(hmi_file).resample((1024, 1024) * u.pixel).rotate(order=3) 50 | 51 | ############################################################################### 52 | # Next, we use the :func:`~sunkit_image.stara.stara` function to detect sunspots in data. 53 | 54 | stara_segments = stara(hmi_map, limb_filter=10 * u.percent) 55 | 56 | ############################################################################### 57 | # Now we will plot the detected contours from STARA on the HMI data. 58 | 59 | fig = plt.figure() 60 | ax = fig.add_subplot(111, projection=hmi_map) 61 | hmi_map.plot(axes=ax) 62 | ax.contour(stara_segments, levels=0) 63 | 64 | fig.tight_layout() 65 | 66 | ############################################################################### 67 | # To focus on specific regions containing sunspots, we can create a submap, 68 | # which is a smaller section of the original map. This allows us to zoom in 69 | # on areas of interest. We define the coordinates of the rectangle to crop 70 | # in pixel coordinates. 71 | 72 | bottom_left = hmi_map.pixel_to_world(240 * u.pix, 350 * u.pix) 73 | top_right = hmi_map.pixel_to_world(310 * u.pix, 410 * u.pix) 74 | 75 | hmi_submap = hmi_map.submap(bottom_left, top_right=top_right) 76 | stara_segments = stara(hmi_submap, limb_filter=10 * u.percent) 77 | 78 | ############################################################################### 79 | # We can further enhance our analysis by extracting key properties from the 80 | # segmented image and organizing them into a structured table. 81 | # First, a labeled image is created where each connected component (a sunspot) 82 | # is assigned a unique label. 83 | 84 | labelled = label(stara_segments) 85 | 86 | # Extract properties of the labeled regions 87 | regions = regionprops_table( 88 | labelled, 89 | hmi_submap.data, 90 | properties=[ 91 | "label", # Unique for each sunspot 92 | "centroid", # Centroid coordinates (center of mass) 93 | "area", # Total area (number of pixels) 94 | "min_intensity", 95 | ], 96 | ) 97 | # We will add a new column named "obstime" is added to the table, which contains 98 | # the observation date for each sunspot. 99 | regions["obstime"] = Time([hmi_submap.date] * regions["label"].size) 100 | # The pixel coordinates of sunspot centroids are converted to world coordinates 101 | # (solar longitude and latitude) in Heliographic Stonyhurst (HGS). 102 | regions["center_coord"] = hmi_submap.pixel_to_world( 103 | regions["centroid-0"] * u.pix, 104 | regions["centroid-1"] * u.pix, 105 | ).heliographic_stonyhurst 106 | print(QTable(regions)) 107 | 108 | ############################################################################### 109 | # Further we could also plot a map with the corresponding center coordinates 110 | # marked and their number. 111 | 112 | # Extract centroid coordinates. 113 | centroids_x = regions["centroid-1"] 114 | centroids_y = regions["centroid-0"] 115 | 116 | fig = plt.figure() 117 | ax = fig.add_subplot(111, projection=hmi_submap) 118 | hmi_submap.plot(axes=ax) 119 | ax.contour(stara_segments, levels=0) 120 | ax.scatter(centroids_x, centroids_y, color="red", marker="o", s=30, label="Centroids") 121 | # Label each centroid with its corresponding sunspot label for better identification. 122 | for i, labels in enumerate(regions["label"]): 123 | ax.text(centroids_x[i], centroids_y[i], f"{labels}", color="yellow", fontsize=16) 124 | fig.tight_layout() 125 | 126 | plt.show() 127 | -------------------------------------------------------------------------------- /examples/multiscale_gaussian_normalization.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================== 3 | Multi-scale Gaussian Normalization 4 | ================================== 5 | 6 | This example applies Multi-scale Gaussian Normalization to a `sunpy.map.Map` using `sunkit_image.enhance.mgn`. 7 | """ 8 | 9 | import matplotlib.pyplot as plt 10 | 11 | from astropy import units as u 12 | 13 | import sunpy.data.sample 14 | import sunpy.map 15 | 16 | import sunkit_image.enhance as enhance 17 | 18 | ########################################################################### 19 | # `sunpy` provides a range of sample data with a number of suitable images. 20 | # Here we will use a sample AIA 171 image. 21 | 22 | aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 23 | 24 | ########################################################################### 25 | # Applying Multi-scale Gaussian Normalization on a solar image. 26 | # 27 | # The `sunkit_image.enhance.mgn` function takes either a `sunpy.map.Map` or a `numpy.ndarray` as a input. 28 | 29 | mgn_map = enhance.mgn(aia_map) 30 | 31 | ########################################################################### 32 | # Finally we will plot the filtered maps with the original to demonstrate the effect. 33 | 34 | fig = plt.figure(figsize=(15, 10)) 35 | 36 | ax = fig.add_subplot(121, projection=aia_map) 37 | aia_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 38 | 39 | ax1 = fig.add_subplot(122, projection=mgn_map) 40 | mgn_map.plot(axes=ax1) 41 | ax1.set_title("MGN") 42 | 43 | ax1.coords[1].set_ticklabel_visible(False) 44 | fig.tight_layout() 45 | 46 | plt.show() 47 | -------------------------------------------------------------------------------- /examples/radial_gradient_filters.py: -------------------------------------------------------------------------------- 1 | """ 2 | =================================== 3 | Normalizing Radial Gradient Filters 4 | =================================== 5 | 6 | This example showcases the filters found in the "radial" module. 7 | 8 | These are: 9 | 10 | - Normalizing Radial Gradient Filter (NRGF) (`sunkit_image.radial.nrgf`) 11 | - Fourier Normalizing Radial Gradient Filter (FNRGF) (`sunkit_image.radial.fnrgf`) 12 | - Radial Histogram Equalizing Filter (RHEF) (`sunkit_image.radial.rhef`) 13 | """ 14 | 15 | import matplotlib.pyplot as plt 16 | 17 | import astropy.units as u 18 | 19 | import sunpy.data.sample 20 | import sunpy.map 21 | 22 | import sunkit_image.radial as radial 23 | from sunkit_image.utils import equally_spaced_bins 24 | 25 | ########################################################################### 26 | # `sunpy` sample data contain a number of suitable FITS files for this purpose. 27 | # Here we will use a sample AIA 171 image. 28 | 29 | aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 30 | 31 | ########################################################################### 32 | # Both the NRGF and FNRGF work on radial segments above their application radius. 33 | # 34 | # Here we create those segments radial segments. Each segment created will be of 35 | # equal dimensions radially. The distance between 1 solar radii and 2 solar radii 36 | # is divided into equal parts by the following two lines. 37 | 38 | radial_bin_edges = equally_spaced_bins(1, 2, aia_map.data.shape[0] // 4) 39 | radial_bin_edges *= u.R_sun 40 | 41 | base_nrgf = radial.nrgf(aia_map, radial_bin_edges=radial_bin_edges, application_radius=1 * u.R_sun) 42 | 43 | ########################################################################### 44 | # We will need to work out a few parameters for the FNRGF. 45 | # 46 | # Order is the number of Fourier coefficients to be used in the approximation. 47 | # The attenuation coefficients are calculated to be linearly decreasing, you should 48 | # choose them according to your requirements. These can be changed by tweaking the following keywords: ``mean_attenuation_range`` and ``std_attenuation_range``. 49 | 50 | order = 20 51 | base_fnrgf = radial.fnrgf( 52 | aia_map, 53 | radial_bin_edges=radial_bin_edges, 54 | order=order, 55 | application_radius=1 * u.R_sun 56 | ) 57 | 58 | ########################################################################### 59 | # Now we will also use the final filter, RHEF. 60 | 61 | base_rhef = radial.rhef(aia_map, radial_bin_edges=radial_bin_edges, application_radius=1 * u.R_sun) 62 | 63 | ########################################################################### 64 | # Finally we will plot the filtered maps with the original to demonstrate the effect of each. 65 | 66 | fig = plt.figure(figsize=(15, 15)) 67 | 68 | ax = fig.add_subplot(221, projection=aia_map) 69 | aia_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 70 | 71 | ax1 = fig.add_subplot(222, projection=base_nrgf) 72 | base_nrgf.plot(axes=ax1) 73 | ax1.set_title("NRGF") 74 | 75 | ax2 = fig.add_subplot(223, projection=base_fnrgf) 76 | base_fnrgf.plot(axes=ax2, clip_interval=(1, 99.99) * u.percent) 77 | ax2.set_title("FNRGF") 78 | 79 | ax3 = fig.add_subplot(224, projection=base_rhef) 80 | base_rhef.plot(axes=ax3) 81 | ax3.set_title("RHEF") 82 | 83 | ax.coords[0].set_ticklabel_visible(False) 84 | ax1.coords[0].set_ticklabel_visible(False) 85 | ax1.coords[1].set_ticklabel_visible(False) 86 | ax3.coords[1].set_ticklabel_visible(False) 87 | 88 | fig.tight_layout() 89 | 90 | plt.show() 91 | -------------------------------------------------------------------------------- /examples/radial_histogram_equalization.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================= 3 | Radial Histogram Equalization 4 | ============================= 5 | 6 | This example applies the Radial Histogram Equalizing Filter (`sunkit_image.radial.rhef`) to a sunpy map. 7 | """ 8 | 9 | import matplotlib.pyplot as plt 10 | 11 | import astropy.units as u 12 | from astropy.coordinates import SkyCoord 13 | 14 | import sunpy.data.sample 15 | import sunpy.map 16 | 17 | import sunkit_image.enhance as enhance 18 | import sunkit_image.radial as radial 19 | from sunkit_image.utils import equally_spaced_bins 20 | 21 | ####################################################################################### 22 | # Let us use the sunpy sample data AIA image to showcase the RHE filter. 23 | 24 | aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 25 | 26 | # Create radial segments (RHEF should use a dense grid) 27 | radial_bin_edges = equally_spaced_bins(0, 2, aia_map.data.shape[0] // 2) 28 | radial_bin_edges *= u.R_sun 29 | 30 | rhef_map = radial.rhef(aia_map, radial_bin_edges=radial_bin_edges, progress=False) 31 | 32 | fig = plt.figure(figsize=(15, 10)) 33 | 34 | ax = fig.add_subplot(121, projection=aia_map) 35 | aia_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 36 | 37 | ax1 = fig.add_subplot(122, projection=aia_map) 38 | rhef_map.plot(axes=ax1) 39 | ax1.set_title(r"RHE Filtered Map, $\Upsilon$=0.35") 40 | 41 | ax1.coords[1].set_ticks_visible(False) 42 | ax1.coords[1].set_ticklabel_visible(False) 43 | fig.subplots_adjust(wspace=0, hspace=0) 44 | 45 | ####################################################################################### 46 | # The RHEF has one free parameter that works in post processing to modulate the output. 47 | # Here are some of the choices one could make. 48 | # `See this thesis (Gilly 2022) Eq 4.15 for details about upsilon. `__ 49 | 50 | # Define the list of upsilon pairs where the first number affects dark components and the second number affects bright ones 51 | upsilon_list = [ 52 | 0.35, 53 | None, 54 | (0.1, 0.1), 55 | (0.5, 0.5), 56 | (0.8, 0.8), 57 | ] 58 | 59 | # Crop the map to see better detail 60 | top_right = SkyCoord(1200 * u.arcsec, 0 * u.arcsec, frame=aia_map.coordinate_frame) 61 | bottom_left = SkyCoord(0 * u.arcsec, -1200 * u.arcsec, frame=aia_map.coordinate_frame) 62 | aia_map_cropped = aia_map.submap(bottom_left, top_right=top_right) 63 | fig, axes = plt.subplots(2, 3, figsize=(15, 10), subplot_kw={"projection": aia_map_cropped}) 64 | axes = axes.flatten() 65 | 66 | aia_map_cropped.plot(axes=axes[0], clip_interval=(1, 99.99) * u.percent) 67 | axes[0].coords[0].set_ticks_visible(False) 68 | axes[0].coords[0].set_ticklabel_visible(False) 69 | for i, upsilon in enumerate(upsilon_list): 70 | out_map = radial.rhef(aia_map, upsilon=upsilon, method="scipy", progress=False) 71 | out_map_crop = out_map.submap(bottom_left, top_right=top_right) 72 | out_map_crop.plot(axes=axes[i + 1]) 73 | axes[i + 1].set_title(f"Upsilon = {upsilon}") 74 | if i != 2: 75 | axes[i + 1].coords[1].set_ticks_visible(False) 76 | axes[i + 1].coords[1].set_ticklabel_visible(False) 77 | if i in [0, 1]: 78 | axes[i + 1].coords[0].set_ticks_visible(False) 79 | axes[i + 1].coords[0].set_ticklabel_visible(False) 80 | fig.tight_layout() 81 | 82 | ####################################################################################### 83 | # Note that multiple filters can be used in a row to get a better output image. 84 | # Here, we will use both :func:`~.mgn` and :func:`~.wow`, then apply RHE filter after. 85 | 86 | mgn_map = enhance.mgn(aia_map) 87 | wow_map = enhance.wow(aia_map) 88 | rhef_mgn_map = radial.rhef(mgn_map, progress=False) 89 | rhef_wow_map = radial.rhef(wow_map, progress=False) 90 | 91 | fig, axes = plt.subplots(2, 3, figsize=(15, 10), subplot_kw={"projection": aia_map}) 92 | axes = axes.flatten() 93 | 94 | rhef_map.plot(axes=axes[0]) 95 | axes[0].set_title("RHEF(aia_map)") 96 | 97 | mgn_map.plot(axes=axes[1], norm=None) 98 | axes[1].set_title("MGN(aia_map)") 99 | 100 | wow_map.plot(axes=axes[2], norm=None) 101 | axes[2].set_title("WOW(aia_map)") 102 | 103 | combo_data = (rhef_map.data + rhef_mgn_map.data) / 2 104 | combo_map = sunpy.map.Map(combo_data, rhef_map.meta) 105 | combo_map.plot_settings["norm"] = None 106 | combo_map.plot(axes=axes[3]) 107 | axes[3].set_title("AVG( RHEF(aia_map), RHEF(MGN(aia_map) )") 108 | 109 | rhef_mgn_map.plot(axes=axes[4]) 110 | axes[4].set_title("RHEF( MGN(aia_map) )") 111 | 112 | rhef_wow_map.plot(axes=axes[5]) 113 | axes[5].set_title("RHEF( WOW(aia_map) )") 114 | 115 | for i, ax in enumerate(axes): 116 | if i not in [0, 3]: 117 | ax.coords[1].set_ticks_visible(False) 118 | ax.coords[1].set_ticklabel_visible(False) 119 | if i in [0, 1, 2]: 120 | ax.coords[0].set_ticks_visible(False) 121 | ax.coords[0].set_ticklabel_visible(False) 122 | fig.tight_layout() 123 | 124 | plt.show() 125 | -------------------------------------------------------------------------------- /examples/remove_cosmic_rays.py: -------------------------------------------------------------------------------- 1 | """ 2 | ======================== 3 | Removing Cosmic Ray Hits 4 | ======================== 5 | 6 | This example illustrates how to remove cosmic ray hits from a LASCO C2 FITS file. 7 | using `astroscrappy.detect_cosmics `__. 8 | 9 | Astroscrappy is a separate Python package and can be installed separately using ``pip`` or ``conda``. 10 | """ 11 | 12 | import astroscrappy 13 | import matplotlib.pyplot as plt 14 | 15 | import astropy.units as u 16 | from astropy.io import fits 17 | 18 | from sunpy.map import Map 19 | from sunpy.net import Fido 20 | from sunpy.net import attrs as a 21 | 22 | ############################################################################### 23 | # For more details on how to download and plot LASCO FITS file see 24 | # sunpy's example `Downloading and plotting LASCO C3 data `__. 25 | # To make this example work you need to have sunpy with all the "net" dependencies installed. 26 | 27 | ############################################################################### 28 | # In order to download the required FITS file, we use 29 | # `Fido `, sunpy's downloader client. 30 | # We need to define two search variables: a time range and the instrument. 31 | 32 | time_range = a.Time("2000/11/09 00:06", "2000/11/09 00:07") 33 | instrument = a.Instrument("LASCO") 34 | detector = a.Detector("C2") 35 | result = Fido.search(time_range, instrument) 36 | print(result) 37 | 38 | downloaded_files = Fido.fetch(result[0]) 39 | data, header = fits.open(downloaded_files[0])[0].data, fits.open(downloaded_files[0])[0].header 40 | 41 | # Add the missing meta information to the header 42 | header["CUNIT1"] = "arcsec" 43 | header["CUNIT2"] = "arcsec" 44 | 45 | ############################################################################### 46 | # With this fix we can load it into a map. 47 | 48 | lasco_map = Map(data, header) 49 | 50 | ############################################################################### 51 | # Now we will call the `astroscrappy.detect_cosmics `__ 52 | # to remove the cosmic ray hits. 53 | # 54 | # This algorithm can perform well with both high and low noise levels in the original data. 55 | # The function takes a `~numpy.ndarray` as input so we only pass the map data. 56 | # This particular image has lots of high intensity cosmic ray hits which 57 | # cannot be effectively removed by using the default set of parameters. 58 | # So we reduce ``sigclip``, the Laplacian to noise ratio from 4.5 to 2 to mark more hits. 59 | # We also reduce ``objlim``, the contrast between the Laplacian image and the fine structured image 60 | # to clean the high intensity bright cosmic ray hits. 61 | # We also modify the ``readnoise`` parameter to obtain better results. 62 | 63 | mask, clean_data = astroscrappy.detect_cosmics(lasco_map.data, sigclip=2, objlim=2, readnoise=4, verbose=False) 64 | 65 | ############################################################################### 66 | # This returns two variables - mask is a boolean array depicting whether there is 67 | # a cosmic ray hit at that pixel, clean_data is the cleaned image after removing those 68 | # hits. 69 | # We will need to create a new map with the cleaned data and the original metadata 70 | # and we can now plot the before and after. 71 | 72 | clean_lasco_map = Map(clean_data, lasco_map.meta) 73 | 74 | fig = plt.figure(figsize=(15, 10)) 75 | 76 | ax = fig.add_subplot(121, projection=lasco_map) 77 | lasco_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 78 | 79 | ax1 = fig.add_subplot(122, projection=clean_lasco_map) 80 | clean_lasco_map.plot(axes=ax1, clip_interval=(1, 99.99) * u.percent) 81 | ax1.set_title("Cosmic Rays removed") 82 | 83 | ax1.coords[1].set_ticks_visible(False) 84 | ax1.coords[1].set_ticklabel_visible(False) 85 | fig.tight_layout() 86 | 87 | plt.show() 88 | -------------------------------------------------------------------------------- /examples/rgb_composite.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================= 3 | Making an RGB composite image 4 | ============================= 5 | 6 | This example shows the process required to create an RGB composite image 7 | of three AIA images at different wavelengths. To read more about the 8 | algorithm used in this example, see this 9 | `Astropy tutorial `__. 10 | """ 11 | 12 | import matplotlib.pyplot as plt 13 | from matplotlib.lines import Line2D 14 | 15 | from astropy.visualization import make_lupton_rgb 16 | 17 | import sunpy.data.sample 18 | from sunpy.map import Map 19 | 20 | from sunkit_image.enhance import mgn 21 | 22 | ############################################################################### 23 | # We will use three AIA images from the sample data at the following 24 | # wavelengths: 171, 193, and 211 Angstroms. The 171 image shows the quiet 25 | # solar corona, 193 shows a hotter region of the corona, and 211 shows 26 | # active magnetic regions in the corona. 27 | 28 | maps = Map(sunpy.data.sample.AIA_171_IMAGE, sunpy.data.sample.AIA_193_IMAGE, sunpy.data.sample.AIA_211_IMAGE) 29 | 30 | ############################################################################### 31 | # Before the images are assigned colors and combined, they need to be 32 | # normalized so that features in each wavelength are visible in the combined 33 | # image. We will apply multi-scale Gaussian normalization using 34 | # `sunkit_image.enhance.mgn` to each map and then create the rgb composite. 35 | # 36 | # The ``k`` parameter is a scaling factor applied to the normalized image. A 37 | # value of 5 produces sharper details in the transformed image. In the 38 | # `~astropy.visualization.make_lupton_rgb` function, ``Q`` is a softening 39 | # parameter which we set to 0 and ``stretch`` controls the linear stretch 40 | # applied to the combined image. 41 | 42 | maps_mgn = [mgn(m, k=5) for m in maps] 43 | # The `~astropy.visualization.make_lupton_rgb` function takes three 2D arrays 44 | # so we need to pass the data attribute of each map. 45 | im_rgb = make_lupton_rgb(maps_mgn[0].data, maps_mgn[1].data, maps_mgn[2].data, Q=0, stretch=1) 46 | 47 | ############################################################################### 48 | # The output of the `astropy.visualization.make_lupton_rgb` algorithm is not 49 | # a Map, but instead an image. So, we need to create a WCS Axes using one of 50 | # original maps and manually set the label. In the first step below, we grab 51 | # the Set1 qualitative colormap to apply to the custom legend lines. 52 | 53 | cmap = plt.cm.Set1 54 | custom_lines = [ 55 | Line2D([0], [0], color=cmap(0), lw=4), 56 | Line2D([0], [0], color=cmap(2), lw=4), 57 | Line2D([0], [0], color=cmap(1), lw=4), 58 | ] 59 | 60 | fig = plt.figure(figsize=(15, 15)) 61 | 62 | ax = fig.add_subplot(111, projection=maps[0].wcs) 63 | im = ax.imshow(im_rgb) 64 | lon, lat = ax.coords 65 | lon.set_axislabel("Helioprojective Longitude") 66 | lat.set_axislabel("Helioprojective Latitude") 67 | ax.legend(custom_lines, ["AIA 171", "AIA 193", "AIA 211"]) 68 | ax.set_title("AIA RGB Composite") 69 | 70 | fig.tight_layout() 71 | 72 | plt.show() 73 | -------------------------------------------------------------------------------- /examples/tracing_loops.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================================ 3 | Tracing Coronal Loops and Extracting Intensities 4 | ================================================ 5 | 6 | This example traces out the coronal loops in a FITS image 7 | using `~sunkit_image.trace.occult2` and then extracts the intensity 8 | along one traced loop. 9 | 10 | In this example we will use the settings and the data from Markus Aschwanden's tutorial 11 | on his IDL implementation of the ``OCCULT2`` algorithm, which can be found 12 | `here `__. 13 | 14 | """ 15 | # sphinx_gallery_thumbnail_number = 1 16 | 17 | import matplotlib.pyplot as plt 18 | import numpy as np 19 | 20 | from astropy import units as u 21 | from astropy.io import fits 22 | 23 | import sunpy.map 24 | 25 | import sunkit_image.trace as trace 26 | 27 | ########################################################################### 28 | # We will be using `astropy.io.fits.open` to read the FITS file used in the tutorial 29 | # and read in the header and data information. 30 | 31 | with fits.open("http://data.sunpy.org/sunkit-image/trace_1998-05-19T22:21:43.000_171_1024.fits") as hdul: 32 | # We can now make this into a `sunpy.map.GenericMap`. 33 | trace_map = sunpy.map.Map(hdul[0].data, hdul[0].header) 34 | # We need to set the colormap manually to match the IDL tutorial as close as possible. 35 | trace_map.plot_settings["cmap"] = "goes-rsuvi304" 36 | 37 | ########################################################################### 38 | # Now the loop tracing will begin. We will use the same set of parameters 39 | # as in the IDL tutorial. 40 | # 41 | # The lowpass filter boxcar filter size ``nsm1`` is taken to be 3. 42 | # The minimum radius of curvature at any point in the loop ``rmin`` is 30 pixels. 43 | # The length of the smallest loop to be detected ``lmin`` is 25 pixels. 44 | # The maximum number of structures to be examined ``nstruc`` is 1000. 45 | # The number of extra points in the loop below noise level to terminate a loop tracing ``ngap`` is 0. 46 | # The base flux and median flux ratio ``qthresh1`` is 0.0. 47 | # The noise threshold in the image with respect to median flux ``qthresh2`` is 3.0 . 48 | # For the meaning of these parameters please consult the OCCULT2 article. 49 | 50 | loops = trace.occult2(trace_map, nsm1=3, rmin=30, lmin=25, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 51 | 52 | ############################################################################### 53 | # `~sunkit_image.trace.occult2` returns a list, each element of which is a detected loop. 54 | # Each detected loop is stored as a list of ``x`` positions in image pixels, and a list of ``y`` 55 | # positions in image pixels, of the pixels traced out by OCCULT2. 56 | # Now plot all the detected loops on the original image, we convert the image pixels to 57 | # to world coordinates to be plotted on the map. 58 | 59 | fig = plt.figure(figsize=(15, 15)) 60 | 61 | ax = fig.add_subplot(projection=trace_map) 62 | trace_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 63 | # We can now plot each loop in the list of loops. 64 | # We plot these in world coordinates, converting them through the `pixel_to_world` 65 | # functionality which converts the pixel coordinates to coordinates (in arcsec) on the ``trace_map``. 66 | for loop in loops: 67 | loop = np.array(loop) # convert to array as easier to index ``x`` and ``y`` coordinates 68 | coord_loops = trace_map.pixel_to_world(loop[:, 0] * u.pixel, loop[:, 1] * u.pixel) 69 | ax.plot_coord(coord_loops, color="b") 70 | 71 | fig.tight_layout() 72 | 73 | ############################################################################### 74 | # Finally, we can use the traced loops location information to extract the intensity values. 75 | 76 | # Since we only currently get pixel locations, we need to get the word coordinates of the first loop. 77 | first_loop = np.array(loops[0]) 78 | loop_coords = trace_map.pixel_to_world(first_loop[:, 0] * u.pixel, first_loop[:, 1] * u.pixel) 79 | 80 | # Now we can extract the intensity along the loop 81 | intensity = sunpy.map.sample_at_coords(trace_map, loop_coords) 82 | 83 | # Finally, we can calculate the angular separation along the loop 84 | angular_separation = loop_coords.separation(loop_coords[0]).to(u.arcsec) 85 | 86 | # Plot the loop location and its intensity profile 87 | fig = plt.figure(figsize=(10, 4)) 88 | ax = fig.add_subplot(121, projection=trace_map) 89 | trace_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 90 | ax.plot_coord(loop_coords, color="r") 91 | 92 | ax = fig.add_subplot(122) 93 | ax.plot(angular_separation, intensity) 94 | ax.set_xlabel("Distance along loop [Arcsec]") 95 | ax.set_ylabel("Intensity") 96 | 97 | fig.tight_layout() 98 | 99 | plt.show() 100 | -------------------------------------------------------------------------------- /examples/watroo_wow.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================== 3 | Wavelets Optimized Whitening (WOW) 4 | ================================== 5 | 6 | This example applies Wavelets Optimized Whitening to a `sunpy.map.Map` using `sunkit_image.enhance.wow`. 7 | """ 8 | 9 | import matplotlib.pyplot as plt 10 | 11 | from astropy import units as u 12 | 13 | import sunpy.data.sample 14 | import sunpy.map 15 | 16 | import sunkit_image.enhance as enhance 17 | 18 | ########################################################################### 19 | # `sunpy` provides a range of sample data with a number of suitable images. 20 | # Here will just use AIA 171 21 | 22 | aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 23 | 24 | ########################################################################### 25 | # Applying Wavelets Optimized Whitening on a solar image. 26 | # 27 | # The `sunkit_image.enhance.wow` function takes either a `sunpy.map.Map` or a `numpy.ndarray` as a input. 28 | # We will use the bilateral flavor of the algorithm, and denoising coefficients in the first three wavelet 29 | # planes equal to 5, 2, & 1 sigma of the local noise. The noise is estimated automatically. 30 | # It is possible to pass a noise map for more optimal results. 31 | 32 | wow_map = enhance.wow(aia_map, bilateral=1, denoise_coefficients=[5, 2, 1]) 33 | 34 | ########################################################################### 35 | # Now we will plot the final result and compare that to the original image. 36 | 37 | fig = plt.figure(figsize=(10, 7)) 38 | 39 | ax = fig.add_subplot(121, projection=aia_map) 40 | aia_map.plot(axes=ax, clip_interval=(1, 99.99) * u.percent) 41 | 42 | ax1 = fig.add_subplot(122, projection=wow_map) 43 | wow_map.plot(axes=ax1, clip_interval=(1, 99.99) * u.percent, norm=None) 44 | ax1.set_title("Wavelets Optimized Whitening (WOW)") 45 | 46 | ax1.coords[1].set_ticks_visible(False) 47 | ax1.coords[1].set_ticklabel_visible(False) 48 | fig.tight_layout() 49 | 50 | plt.show() 51 | -------------------------------------------------------------------------------- /licenses/LICENSE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024, The SunPy Community 2 | 3 | Redistribution and use in source and binary forms, with or without modification, 4 | are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | -------------------------------------------------------------------------------- /licenses/LICENSE_ASDA.rst: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Jiajia Liu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /licenses/LICENSE_NOISE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Masayuki Tanaka 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution 12 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 13 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 15 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 16 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 19 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 20 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 21 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | -------------------------------------------------------------------------------- /licenses/README.rst: -------------------------------------------------------------------------------- 1 | Licenses 2 | ======== 3 | 4 | This directory holds license and credit information for the package, 5 | works the package is derived from, and/or datasets. 6 | 7 | Ensure that you pick a package licence which is in this folder and it matches 8 | the one mentioned in the top level README.rst file. If you are using the 9 | pre-rendered version of this template check for the word 'Other' in the README. 10 | -------------------------------------------------------------------------------- /licenses/TEMPLATE_LICENSE.rst: -------------------------------------------------------------------------------- 1 | This project is based upon the OpenAstronomy package template 2 | (https://github.com/OpenAstronomy/package-template/) which is licensed under the terms 3 | of the following licence. 4 | 5 | --- 6 | 7 | Copyright (c) 2018, OpenAstronomy Developers 8 | All rights reserved. 9 | 10 | Redistribution and use in source and binary forms, with or without modification, 11 | are permitted provided that the following conditions are met: 12 | 13 | * Redistributions of source code must retain the above copyright notice, this 14 | list of conditions and the following disclaimer. 15 | * Redistributions in binary form must reproduce the above copyright notice, this 16 | list of conditions and the following disclaimer in the documentation and/or 17 | other materials provided with the distribution. 18 | * Neither the name of the Astropy Team nor the names of its contributors may be 19 | used to endorse or promote products derived from this software without 20 | specific prior written permission. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 23 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 26 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 27 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 29 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=62.1", 4 | "setuptools_scm[toml]>=8.0.0", 5 | "wheel", 6 | ] 7 | build-backend = "setuptools.build_meta" 8 | 9 | [project] 10 | name = "sunkit_image" 11 | dynamic = ["version"] 12 | description = "An image processing toolbox for Solar Physics." 13 | requires-python = ">=3.10" 14 | readme = { file = "README.rst", content-type = "text/x-rst" } 15 | license = { file = "LICENSE.txt" } 16 | keywords = ["solar physics", "solar", "science", "image analysis"] 17 | authors = [ 18 | { name = "The SunPy Community", email = "sunpy@googlegrouups.com" }, 19 | ] 20 | classifiers = [ 21 | "Development Status :: 3 - Alpha", 22 | "Intended Audience :: Science/Research", 23 | "License :: OSI Approved :: BSD License", 24 | "Natural Language :: English", 25 | "Operating System :: OS Independent", 26 | "Programming Language :: Python", 27 | "Programming Language :: Python :: 3", 28 | "Programming Language :: Python :: 3.10", 29 | "Programming Language :: Python :: 3.11", 30 | "Programming Language :: Python :: 3.12", 31 | "Topic :: Scientific/Engineering :: Physics", 32 | ] 33 | dependencies = [ 34 | "astropy>=5.3.0", 35 | "numpy>=1.23.5", 36 | "matplotlib>=3.5.0", 37 | "scipy>=1.10.1", 38 | "scikit-image>=0.20.0", 39 | "sunpy[map]>=6.0.0", 40 | ] 41 | 42 | [project.optional-dependencies] 43 | core = ["sunkit_image"] 44 | all = ["sunkit_image[watroo]"] 45 | watroo = ["watroo"] 46 | tests = [ 47 | "sunkit_image[all]", 48 | "dask", 49 | "pytest-astropy", 50 | "pytest-mpl", 51 | "pytest-xdist", 52 | "sunpy[data,net]>=6.0.0", 53 | ] 54 | docs = [ 55 | "sphinx", 56 | "sphinx-automodapi", 57 | "sphinx-changelog", 58 | "sunpy-sphinx-theme", 59 | "packaging", 60 | "sunkit_image[all]", 61 | "astroscrappy", 62 | "dask", 63 | "matplotlib", 64 | "sphinx-design", 65 | "sphinx-gallery", 66 | "sunpy[data,net]>=6.0.0", 67 | ] 68 | dev = ["sunkit_image[all,tests,docs]"] 69 | 70 | [project.urls] 71 | Homepage = "https://sunpy.org" 72 | "Source Code" = "https://github.com/sunpy/sunkit-image" 73 | Download = "https://pypi.org/project/sunkit-image" 74 | Documentation = "https://docs.sunpy.org/projects/sunkit-image" 75 | Changelog = "https://docs.sunpy.org/projects/sunkit-image/en/stable/whatsnew/changelog.html" 76 | "Issue Tracker" = "https://github.com/sunpy/sunkit-image/issues" 77 | 78 | [tool.setuptools] 79 | zip-safe = false 80 | include-package-data = true 81 | 82 | [tool.setuptools.packages.find] 83 | include = ["sunkit_image*"] 84 | exclude = ["sunkit_image._dev*"] 85 | namespaces = false 86 | 87 | [tool.setuptools_scm] 88 | version_file = "sunkit_image/_version.py" 89 | 90 | [tool.gilesbot] 91 | [tool.gilesbot.pull_requests] 92 | enabled = true 93 | 94 | [tool.gilesbot.towncrier_changelog] 95 | enabled = true 96 | verify_pr_number = true 97 | changelog_skip_label = "No Changelog Entry Needed" 98 | help_url = "https://github.com/sunpy/sunkit-image/blob/main/changelog/README.rst" 99 | 100 | changelog_missing_long = "There isn't a changelog file in this pull request. Please add a changelog file to the `changelog/` directory following the instructions in the changelog [README](https://github.com/sunpy/sunkit-image/blob/main/changelog/README.rst)." 101 | 102 | type_incorrect_long = "The changelog file you added is not one of the allowed types. Please use one of the types described in the changelog [README](https://github.com/sunpy/sunkit-image/blob/main/changelog/README.rst)" 103 | 104 | number_incorrect_long = "The number in the changelog file you added does not match the number of this pull request. Please rename the file." 105 | 106 | # TODO: This should be in towncrier.toml but Giles currently only works looks in 107 | # pyproject.toml we should move this back when it's fixed. 108 | [tool.towncrier] 109 | package = "sunkit_image" 110 | filename = "CHANGELOG.rst" 111 | directory = "changelog/" 112 | issue_format = "`#{issue} `__" 113 | title_format = "{version} ({project_date})" 114 | 115 | [[tool.towncrier.type]] 116 | directory = "breaking" 117 | name = "Breaking Changes" 118 | showcontent = true 119 | 120 | [[tool.towncrier.type]] 121 | directory = "deprecation" 122 | name = "Deprecations" 123 | showcontent = true 124 | 125 | [[tool.towncrier.type]] 126 | directory = "removal" 127 | name = "Removals" 128 | showcontent = true 129 | 130 | [[tool.towncrier.type]] 131 | directory = "feature" 132 | name = "New Features" 133 | showcontent = true 134 | 135 | [[tool.towncrier.type]] 136 | directory = "bugfix" 137 | name = "Bug Fixes" 138 | showcontent = true 139 | 140 | [[tool.towncrier.type]] 141 | directory = "doc" 142 | name = "Documentation" 143 | showcontent = true 144 | 145 | [[tool.towncrier.type]] 146 | directory = "trivial" 147 | name = "Internal Changes" 148 | showcontent = true 149 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | minversion = 7.0 3 | testpaths = 4 | sunkit_image 5 | docs 6 | norecursedirs = 7 | .tox 8 | build 9 | docs[\/]_build 10 | docs/generated 11 | *.egg-info 12 | examples 13 | sunkit_image[\/]_dev 14 | .history 15 | sunkit_image[\/]extern 16 | doctest_plus = enabled 17 | doctest_optionflags = 18 | NORMALIZE_WHITESPACE 19 | FLOAT_CMP 20 | ELLIPSIS 21 | text_file_format = rst 22 | mpl-results-path = figure_test_images 23 | mpl-use-full-test-name = True 24 | addopts = 25 | --doctest-rst 26 | -p no:unraisableexception 27 | -p no:theadexception 28 | -m "not mpl_image_compare" 29 | --dist no 30 | --arraydiff 31 | --doctest-ignore-import-errors 32 | --doctest-continue-on-failure 33 | markers = 34 | remote_data: marks this test function as needing remote data. 35 | online: marks this test function as needing online connectivity. 36 | mpl_image_compare: marks this test function as using hash-based Matplotlib figure verification. This mark is not meant to be directly applied, but is instead automatically applied when a test function uses the @sunpy.tests.helpers.figure_test decorator. 37 | remote_data_strict = True 38 | filterwarnings = 39 | # Turn all warnings into errors so they do not pass silently. 40 | error 41 | # Do not fail on pytest config issues (i.e. missing plugins) but do show them 42 | always::pytest.PytestConfigWarning 43 | # A list of warnings to ignore follows. If you add to this list, you MUST 44 | # add a comment or ideally a link to an issue that explains why the warning 45 | # is being ignored 46 | # 47 | # 48 | # // These come from the oldestdeps run // 49 | # This is due to dependencies building with a numpy version different from 50 | # the local installed numpy version, but should be fine 51 | # See https://github.com/numpy/numpy/issues/15748#issuecomment-598584838 52 | ignore:.*may indicate binary incompatibility.*:RuntimeWarning 53 | ignore:leap-second file is expired:astropy.utils.iers.iers.IERSStaleWarning 54 | ignore:leap-second auto-update failed:astropy.utils.exceptions.AstropyWarning 55 | -------------------------------------------------------------------------------- /ruff.toml: -------------------------------------------------------------------------------- 1 | # Allow unused variables when underscore-prefixed. 2 | lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" 3 | target-version = "py310" 4 | line-length = 120 5 | exclude=[ 6 | ".git,", 7 | "__pycache__", 8 | "build", 9 | "tools/**", 10 | ".history", 11 | ] 12 | lint.select = [ 13 | "A", 14 | "ARG", 15 | "ASYNC", 16 | "B", 17 | "BLE", 18 | "C4", 19 | # "C90", 20 | "COM", 21 | # "D", 22 | "DTZ", 23 | "E", 24 | "EM", 25 | "ERA", 26 | "EXE", 27 | "F", 28 | "FBT", 29 | "FLY", 30 | # "FURB", 31 | "G", 32 | "I", 33 | "ICN", 34 | "INP", 35 | "INT", 36 | "ISC", 37 | "LOG", 38 | # "N", 39 | "NPY", 40 | "PERF", 41 | "PGH", 42 | "PIE", 43 | # "PL", 44 | "PLE", 45 | "PT", 46 | "PTH", 47 | "PYI", 48 | "Q", 49 | "RET", 50 | "RSE", 51 | "RUF", 52 | # "S", 53 | "SIM", 54 | "SLF", 55 | "SLOT", 56 | "T10", 57 | "T20", 58 | "TCH", 59 | "TID", 60 | "TRIO", 61 | "TRY", 62 | "UP", 63 | "W", 64 | "YTT", 65 | ] 66 | lint.extend-ignore = [ 67 | "E501", # Line too long 68 | "COM812", # May cause conflicts when used with the formatter 69 | "ISC001", # May cause conflicts when used with the formatter 70 | ] 71 | 72 | [lint.per-file-ignores] 73 | "examples/*.py" = [ 74 | "INP001", # examples is part of an implicit namespace package 75 | "T201", # We need print in our examples 76 | ] 77 | "docs/conf.py" = [ 78 | "INP001", # conf.py is part of an implicit namespace package 79 | ] 80 | 81 | [lint.pydocstyle] 82 | convention = "numpy" 83 | 84 | [format] 85 | docstring-code-format = true 86 | indent-style = "space" 87 | quote-style = "double" 88 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import setup 3 | 4 | setup() 5 | -------------------------------------------------------------------------------- /sunkit-test-env.yaml: -------------------------------------------------------------------------------- 1 | channels: 2 | - conda-forge 3 | 4 | dependencies: 5 | - tox 6 | -------------------------------------------------------------------------------- /sunkit_image/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | sunkit-image 3 | ============ 4 | 5 | A image processing toolbox for Solar Physics. 6 | 7 | * Homepage: https://sunpy.org 8 | * Documentation: https://sunkit-image.readthedocs.io/en/latest/ 9 | """ 10 | 11 | from .version import version as __version__ 12 | 13 | __all__: list[str] = ["__version__"] 14 | -------------------------------------------------------------------------------- /sunkit_image/_dev/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This package contains utilities that are only used when developing in a 3 | copy of the source repository. 4 | These files are not installed, and should not be assumed to exist at 5 | runtime. 6 | """ 7 | -------------------------------------------------------------------------------- /sunkit_image/_dev/scm_version.py: -------------------------------------------------------------------------------- 1 | # Try to use setuptools_scm to get the current version; this is only used 2 | # in development installations from the git repository. 3 | from pathlib import Path 4 | 5 | try: 6 | from setuptools_scm import get_version 7 | 8 | version = get_version(root=Path('../..'), relative_to=__file__) 9 | except ImportError: 10 | raise 11 | except Exception as e: 12 | raise ValueError('setuptools_scm can not determine version.') from e 13 | -------------------------------------------------------------------------------- /sunkit_image/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import tempfile 4 | import warnings 5 | import importlib.util 6 | from pathlib import Path 7 | 8 | import numpy as np 9 | import pytest 10 | import skimage 11 | 12 | import astropy 13 | import astropy.config.paths 14 | from astropy.io.fits.verify import VerifyWarning 15 | from astropy.utils.data import get_pkg_data_filename 16 | 17 | import sunpy.data.sample 18 | import sunpy.map 19 | from sunpy.coordinates import Helioprojective, get_earth 20 | from sunpy.map.header_helper import make_fitswcs_header 21 | 22 | from sunkit_image.data.test import get_test_filepath 23 | 24 | # Force MPL to use non-gui backends for testing. 25 | try: 26 | import matplotlib as mpl 27 | import matplotlib.pyplot as plt 28 | 29 | HAVE_MATPLOTLIB = True 30 | mpl.use("Agg") 31 | except ImportError: 32 | HAVE_MATPLOTLIB = False 33 | 34 | # Don't actually import pytest_remotedata because that can do things to the 35 | # entrypoints code in pytest. 36 | remotedata_spec = importlib.util.find_spec("pytest_remotedata") 37 | HAVE_REMOTEDATA = remotedata_spec is not None 38 | # Do not collect the sample data file because this would download the sample data. 39 | collect_ignore = ["data/sample.py"] 40 | console_logger = logging.getLogger() 41 | console_logger.setLevel("INFO") 42 | 43 | 44 | @pytest.fixture(scope="session", autouse=True) 45 | def _tmp_config_dir(request): 46 | """ 47 | Globally set the default config for all tests. 48 | """ 49 | tmpdir = tempfile.TemporaryDirectory() 50 | 51 | os.environ["SUNPY_CONFIGDIR"] = str(tmpdir.name) 52 | astropy.config.paths.set_temp_config._temp_path = Path(tmpdir.name) 53 | astropy.config.paths.set_temp_cache._temp_path = Path(tmpdir.name) 54 | 55 | yield 56 | 57 | del os.environ["SUNPY_CONFIGDIR"] 58 | tmpdir.cleanup() 59 | astropy.config.paths.set_temp_config._temp_path = None 60 | astropy.config.paths.set_temp_cache._temp_path = None 61 | 62 | 63 | @pytest.fixture() 64 | def _undo_config_dir_patch(): 65 | """ 66 | Provide a way for certain tests to not have the config dir. 67 | """ 68 | oridir = os.environ["SUNPY_CONFIGDIR"] 69 | del os.environ["SUNPY_CONFIGDIR"] 70 | yield 71 | os.environ["SUNPY_CONFIGDIR"] = oridir 72 | 73 | 74 | @pytest.fixture(scope="session", autouse=True) 75 | def tmp_dl_dir(request): 76 | """ 77 | Globally set the default download directory for the test run to a tmp dir. 78 | """ 79 | with tempfile.TemporaryDirectory() as tmpdir: 80 | os.environ["SUNPY_DOWNLOADDIR"] = tmpdir 81 | yield Path(tmpdir) 82 | del os.environ["SUNPY_DOWNLOADDIR"] 83 | 84 | 85 | @pytest.fixture() 86 | def _undo_download_dir_patch(): 87 | """ 88 | Provide a way for certain tests to not have tmp download dir. 89 | """ 90 | oridir = os.environ["SUNPY_DOWNLOADDIR"] 91 | del os.environ["SUNPY_DOWNLOADDIR"] 92 | yield 93 | os.environ["SUNPY_DOWNLOADDIR"] = oridir 94 | 95 | 96 | @pytest.fixture(scope="session", autouse=True) 97 | def _hide_parfive_progress(request): 98 | """ 99 | Set the PARFIVE_HIDE_PROGRESS to hide the parfive progress bar in tests. 100 | """ 101 | os.environ["PARFIVE_HIDE_PROGRESS"] = "True" 102 | yield 103 | del os.environ["PARFIVE_HIDE_PROGRESS"] 104 | 105 | 106 | def pytest_runtest_teardown(item): 107 | # Clear the pyplot figure stack if it is not empty after the test 108 | # You can see these log messages by passing "-o log_cli=true" to pytest on the command line 109 | if HAVE_MATPLOTLIB and plt.get_fignums(): 110 | msg = f"Removing {len(plt.get_fignums())} pyplot figure(s) " f"left open by {item.name}" 111 | console_logger.info(msg) 112 | plt.close("all") 113 | 114 | 115 | @pytest.fixture() 116 | def granule_map(): 117 | return sunpy.map.Map(get_pkg_data_filename("dkist_photosphere.fits", package="sunkit_image.data.test")) 118 | 119 | 120 | @pytest.fixture() 121 | def granule_map_he(): 122 | granule_map = sunpy.map.Map(get_pkg_data_filename("dkist_photosphere.fits", package="sunkit_image.data.test")) 123 | # min-max normalization to [0, 1] 124 | map_norm = (granule_map.data - np.nanmin(granule_map.data)) / ( 125 | np.nanmax(granule_map.data) - np.nanmin(granule_map.data) 126 | ) 127 | return skimage.filters.rank.equalize( 128 | skimage.util.img_as_ubyte(map_norm), 129 | footprint=skimage.morphology.disk(radius=100), 130 | ) 131 | 132 | 133 | @pytest.fixture() 134 | def granule_minimap1(): 135 | # Array with "intergranule region" 136 | arr = np.ones((10, 10)) 137 | arr[0, 0] = 0 138 | observer = get_earth() 139 | frame = Helioprojective(observer=observer, obstime=observer.obstime) 140 | ref_coord = astropy.coordinates.SkyCoord(0, 0, unit="arcsec", frame=frame) 141 | header = make_fitswcs_header( 142 | arr, 143 | ref_coord, 144 | ) 145 | return sunpy.map.GenericMap(arr, header) 146 | 147 | 148 | @pytest.fixture() 149 | def granule_minimap2(): 150 | # Modified array with "intergranule region" 151 | arr = np.ones((10, 10)) 152 | arr[1, 1] = 0 153 | observer = get_earth() 154 | frame = Helioprojective(observer=observer, obstime=observer.obstime) 155 | ref_coord = astropy.coordinates.SkyCoord(0, 0, unit="arcsec", frame=frame) 156 | header = make_fitswcs_header( 157 | arr, 158 | ref_coord, 159 | ) 160 | return sunpy.map.GenericMap(arr, header) 161 | 162 | 163 | @pytest.fixture() 164 | def granule_minimap3(): 165 | # Array with no "intergranule region" 166 | arr = np.ones((10, 10)) 167 | observer = get_earth() 168 | frame = Helioprojective(observer=observer, obstime=observer.obstime) 169 | ref_coord = astropy.coordinates.SkyCoord(0, 0, unit="arcsec", frame=frame) 170 | header = make_fitswcs_header( 171 | arr, 172 | ref_coord, 173 | ) 174 | return sunpy.map.GenericMap(arr, header) 175 | 176 | @pytest.fixture(params=["array", "map"]) 177 | def aia_171(request): 178 | # VerifyWarning: Invalid 'BLANK' keyword in header. 179 | # The 'BLANK' keyword is only applicable to integer data, and will be ignored in this HDU. 180 | with warnings.catch_warnings(): 181 | warnings.simplefilter("ignore", category=VerifyWarning) 182 | smap = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 183 | return smap if request.param == "map" else smap.data 184 | 185 | 186 | @pytest.fixture() 187 | def aia_171_cutout(): 188 | return sunpy.map.Map(get_test_filepath("aia_171_cutout.fits")) 189 | 190 | 191 | @pytest.fixture() 192 | def aia_171_map(): 193 | # VerifyWarning: Invalid 'BLANK' keyword in header. 194 | # The 'BLANK' keyword is only applicable to integer data, and will be ignored in this HDU. 195 | with warnings.catch_warnings(): 196 | warnings.simplefilter("ignore", category=VerifyWarning) 197 | return sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) 198 | 199 | 200 | @pytest.fixture() 201 | def hmi_map(): 202 | hmi_file = get_test_filepath("hmi_continuum_test_lowres_data.fits") 203 | return sunpy.map.Map(hmi_file) 204 | -------------------------------------------------------------------------------- /sunkit_image/data/README.rst: -------------------------------------------------------------------------------- 1 | Data directory 2 | ============== 3 | 4 | This directory contains data files included with the package source 5 | code distribution. Note that this is intended only for relatively small files 6 | - large files should be externally hosted and downloaded as needed. 7 | -------------------------------------------------------------------------------- /sunkit_image/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/data/__init__.py -------------------------------------------------------------------------------- /sunkit_image/data/test/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from astropy.utils.data import get_pkg_data_filename 4 | 5 | __all__ = ["get_test_filepath"] 6 | 7 | 8 | def get_test_filepath(filename, **kwargs): 9 | """ 10 | Return the full path to a test file in the ``data`` directory. 11 | 12 | Parameters 13 | ---------- 14 | filename : `str` 15 | The name of the file inside the ``data`` directory. 16 | 17 | Return 18 | ------ 19 | filepath : `str` 20 | The full path to the file. 21 | 22 | Notes 23 | ----- 24 | 25 | This is a wrapper around `~astropy.utils.data.get_pkg_data_filename` which 26 | sets the ``package`` kwarg to be ``sunkit_image.data.test``. 27 | """ 28 | return get_pkg_data_filename(filename, package="sunkit_image.data.test", **kwargs) 29 | -------------------------------------------------------------------------------- /sunkit_image/data/test/aia_171_cutout.fits: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/data/test/aia_171_cutout.fits -------------------------------------------------------------------------------- /sunkit_image/data/test/asda_correct.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/data/test/asda_correct.npz -------------------------------------------------------------------------------- /sunkit_image/data/test/asda_vxvy.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/data/test/asda_vxvy.npz -------------------------------------------------------------------------------- /sunkit_image/data/test/dkist_photosphere.fits: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/data/test/dkist_photosphere.fits -------------------------------------------------------------------------------- /sunkit_image/data/test/hmi_continuum_test_lowres_data.fits: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/data/test/hmi_continuum_test_lowres_data.fits -------------------------------------------------------------------------------- /sunkit_image/stara.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains an implementation of the Sunspot Tracking And Recognition 3 | Algorithm (STARA). 4 | """ 5 | 6 | import numpy as np 7 | from skimage.filters import median 8 | from skimage.morphology import disk, white_tophat 9 | from skimage.util import invert 10 | 11 | import astropy.units as u 12 | 13 | import sunpy.map 14 | 15 | __all__ = ["stara"] 16 | 17 | 18 | @u.quantity_input 19 | def stara( 20 | smap, 21 | circle_radius: u.deg = 100 * u.arcsec, 22 | median_box: u.deg = 10 * u.arcsec, 23 | threshold=6000, 24 | limb_filter: u.percent = None, 25 | ): 26 | """ 27 | A method for automatically detecting sunspots in white-light data using 28 | morphological operations. 29 | 30 | Parameters 31 | ---------- 32 | smap : `sunpy.map.GenericMap` 33 | The map to apply the algorithm to. 34 | circle_radius : `astropy.units.Quantity`, optional 35 | The angular size of the structuring element used in the 36 | `skimage.morphology.white_tophat`. This is the maximum radius of 37 | detected features. By default, this is set to 100 arcseconds. 38 | median_box : `astropy.units.Quantity`, optional 39 | The size of the structuring element for the median filter, features 40 | smaller than this will be averaged out. The default value is 10 arcseconds. 41 | threshold : `int`, optional 42 | The threshold used for detection, this will be subject to detector 43 | degradation. The default value of 6000, is a reasonable value for HMI continuum 44 | images. 45 | limb_filter : `astropy.units.Quantity`, optional 46 | If set, ignore features close to the limb within a percentage of the 47 | radius of the disk. A value of 10% generally filters out false 48 | detections around the limb with HMI continuum images. 49 | 50 | Returns 51 | ------- 52 | `numpy.ndarray` 53 | A 2D boolean array of the same shape as the input solar map. Each element in the array 54 | represents a pixel in the solar map, and its value is `True` if the corresponding pixel 55 | is identified as part of a sunspot (based on the specified threshold), and `False` otherwise. 56 | 57 | References 58 | ---------- 59 | * Fraser Watson and Fletcher Lyndsay 60 | "Automated sunspot detection and the evolution of sunspot magnetic fields during solar cycle 23" 61 | Proceedings of the International Astronomical Union, vol. 6, no. S273, pp. 51-55, 2010. (doi:10.1017/S1743921311014992)[https://doi.org/10.1017/S1743921311014992] 62 | """ 63 | data = invert(smap.data) 64 | 65 | # Filter things that are close to limb to reduce false detections 66 | if limb_filter is not None: 67 | hpc_coords = sunpy.map.all_coordinates_from_map(smap) 68 | r = np.sqrt(hpc_coords.Tx**2 + hpc_coords.Ty**2) / (smap.rsun_obs - smap.rsun_obs * limb_filter) 69 | data[r > 1] = np.nan 70 | 71 | # Median filter to remove detections based on hot pixels 72 | m_pix = int((median_box / smap.scale[0]).to_value(u.pix)) 73 | 74 | # Need to account for https://github.com/scikit-image/scikit-image/pull/7566/files 75 | import skimage 76 | if skimage.__version__ < "0.25.0": 77 | from skimage.morphology import square 78 | function = square(m_pix) 79 | else: 80 | from skimage.morphology import footprint_rectangle 81 | function = footprint_rectangle((m_pix, m_pix)) 82 | med = median(data, function, behavior="ndimage") 83 | 84 | # Construct the pixel structuring element 85 | c_pix = int((circle_radius / smap.scale[0]).to_value(u.pix)) 86 | circle = disk(c_pix / 2) 87 | 88 | finite = white_tophat(med, circle) 89 | finite[np.isnan(finite)] = 0 90 | 91 | return finite > threshold 92 | -------------------------------------------------------------------------------- /sunkit_image/tests/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains package tests. 3 | """ 4 | -------------------------------------------------------------------------------- /sunkit_image/tests/figure_hashes_mpl_390_ft_261_sunpy_600_astropy_610.json: -------------------------------------------------------------------------------- 1 | { 2 | "sunkit_image.tests.test_enhance.test_mgn[array]": "f7a8e5a7377422c652a47217981d1c3bd1a66b5c211eae6ef18e4cef2660e438", 3 | "sunkit_image.tests.test_enhance.test_mgn[map]": "50bb490a6cc2408befe13c7f2a54f7433df80c2473dd21b619ace35de7e8f250", 4 | "sunkit_image.tests.test_enhance.test_mgn_submap": "cf3948d3ffa8ed4eacc500bee170db68bb1a96d2cec1c92e48f74c01827dc397", 5 | "sunkit_image.tests.test_enhance.test_mgn_cutout": "5b54d80ba0040a6eec181b8a83b5d294a4fbc60a5818651d67fc6dbf2690940f", 6 | "sunkit_image.tests.test_enhance.test_wow[array]": "fe6ee53b57b2fc6d8ce8a96e4273c3ddbe17057aeded32611b8e7294648d28e5", 7 | "sunkit_image.tests.test_enhance.test_wow[map]": "f5535fad0fe2d6ff2bfdfb10a984bb6a21ece95d971070482136fe6e6f874f1a", 8 | "sunkit_image.tests.test_enhance.test_wow_submap": "73b8f7749ded1fd6898a7a83da4f755d4b6391c0215ca459be07982aeff9dae8", 9 | "sunkit_image.tests.test_enhance.test_wow_cutout": "880e8f961e6943cd2ee4ff32379ea0444e4b5cf1494b6b42b0dc027ee7adf532", 10 | "sunkit_image.tests.test_radial.test_fig_nrgf": "57b79f69ba537ff2ee37a879048f7d1232173567cc8c7b2e46988cb8b11a5575", 11 | "sunkit_image.tests.test_radial.test_fig_fnrgf": "42044b0b483cd9623c59530c93f4e71b817df335050a71bb9c026cbf429cd7bd", 12 | "sunkit_image.tests.test_radial.test_fig_rhef": "1c97e4b501d6f614b8777ef36c97a1d17645287020d2979c206f84d51cce6db6", 13 | "sunkit_image.tests.test_radial.test_multifig_rhef": "d8963086739e0f011394a9ceb007d851bf12f9b542decd9b3dcaeeb993e6cda0", 14 | "sunkit_image.tests.test_stara.test_stara_plot": "88a1fafc42b22264a68a9b633bfa3bbe22ce3e89cbf8db15920a9d28b62e49f6", 15 | "sunkit_image.tests.test_trace.test_occult2_fig[array]": "e59f4c476b6b27788ab8dc397cb0d2f34f8d6032eced289fd2eabeb324b39558", 16 | "sunkit_image.tests.test_trace.test_occult2_fig[map]": "e59f4c476b6b27788ab8dc397cb0d2f34f8d6032eced289fd2eabeb324b39558", 17 | "sunkit_image.tests.test_trace.test_occult2_cutout": "1b18459111342c3bab4a2c8fb08b012eef4a69767e08753d8918fe4987fa165d" 18 | } -------------------------------------------------------------------------------- /sunkit_image/tests/helpers.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from functools import wraps 3 | 4 | import matplotlib as mpl 5 | import matplotlib.pyplot as plt 6 | import pytest 7 | 8 | import astropy 9 | 10 | import sunpy 11 | from sunpy.tests.helpers import skip_windows 12 | 13 | __all__ = ["figure_test", "get_hash_library_name", "skip_windows"] 14 | 15 | 16 | def get_hash_library_name(): 17 | """ 18 | Generate the hash library name for this env. 19 | """ 20 | ft2_version = f"{mpl.ft2font.__freetype_version__.replace('.', '')}" 21 | mpl_version = ( 22 | "dev" if (("dev" in mpl.__version__) or ("rc" in mpl.__version__)) else mpl.__version__.replace(".", "") 23 | ) 24 | astropy_version = ( 25 | "dev" 26 | if (("dev" in astropy.__version__) or ("rc" in astropy.__version__)) 27 | else astropy.__version__.replace(".", "") 28 | ) 29 | sunpy_version = "dev" if "dev" in sunpy.__version__ else sunpy.__version__.replace(".", "") 30 | return f"figure_hashes_mpl_{mpl_version}_ft_{ft2_version}_sunpy_{sunpy_version}_astropy_{astropy_version}.json" 31 | 32 | 33 | def figure_test(test_function): 34 | """ 35 | A decorator which marks the test as comparing the hash of the returned 36 | figure to the hash library in the repository. A `matplotlib.figure.Figure` 37 | object should be returned or ``plt.gcf()`` will be called to get the figure 38 | object to compare to. 39 | 40 | Examples 41 | -------- 42 | .. code:: 43 | @figure_test 44 | def test_simple_plot(): 45 | plt.plot([0,1]) 46 | """ 47 | hash_library_name = get_hash_library_name() 48 | hash_library_file = Path(__file__).parent / hash_library_name 49 | 50 | @pytest.mark.remote_data() 51 | @pytest.mark.mpl_image_compare( 52 | hash_library=hash_library_file, 53 | savefig_kwargs={"metadata": {"Software": None}}, 54 | style="default", 55 | ) 56 | @wraps(test_function) 57 | def test_wrapper(*args, **kwargs): 58 | ret = test_function(*args, **kwargs) 59 | if ret is None: 60 | ret = plt.gcf() 61 | return ret 62 | 63 | return test_wrapper 64 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_asda.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from sunkit_image import asda 5 | from sunkit_image.data.test import get_test_filepath 6 | 7 | 8 | def test_asda_artificial(): 9 | """ 10 | Generate an artificial vortex using the Lamb_Oseen class in asda, then 11 | perform the vortex detection. 12 | """ 13 | # Generate an artificial vortex 14 | vmax = 2.0 # rotating speed 15 | rmax = 50 # radius 16 | ratio = 0.2 # ratio of expanding speed over rotating speed 17 | alpha = 1.256430 18 | rcore = rmax / np.sqrt(alpha) 19 | gamma = 2 * np.pi * vmax * rmax * (1 + 1 / (2 * alpha)) 20 | with pytest.raises(ValueError, match="Shape of velocity field's vx and vy do not match"): 21 | asda.generate_velocity_field(np.zeros((1, 2)), np.zeros((2, 1)), 0, 0) 22 | with pytest.raises(TypeError, match="Keyword 'r' must be an integer"): 23 | asda.generate_velocity_field(np.zeros((1, 2)), np.zeros((1, 2)), 0, 0, 0.8) 24 | with pytest.raises(ValueError, match="Shape of velocity field's vx and vy do not match"): 25 | asda.calculate_gamma_values(np.zeros((1, 2)), np.zeros((2, 1)), 0, 0) 26 | with pytest.raises(TypeError, match="Keyword 'r' must be an integer"): 27 | asda.calculate_gamma_values(np.zeros((1, 2)), np.zeros((1, 2)), 1, 0.8) 28 | with pytest.raises(TypeError, match="Keyword 'factor' must be an integer"): 29 | asda.calculate_gamma_values(np.zeros((1, 2)), np.zeros((1, 2)), 0.8, 3) 30 | with pytest.raises(TypeError, match="Keyword 'factor' must be an integer"): 31 | asda.get_vortex_edges(gamma=np.zeros((1, 2)), factor=0.8) 32 | with pytest.raises(ValueError, match="Shape of velocity field's vx and vy do not match"): 33 | asda.get_vortex_properties(np.zeros((1, 2)), np.zeros((2, 1)), 0) 34 | 35 | # Generate vx and vy 36 | with pytest.warns(UserWarning, match="One of the input parameters is missing, setting both to 'None'"): 37 | vx, vy = asda.get_velocity_field( 38 | gamma=gamma, 39 | rcore=rcore, 40 | ratio_vradial=ratio, 41 | x_range=[-100, 100, 200], 42 | y_range=[-100, 100, 200], 43 | x=np.meshgrid, 44 | ) 45 | 46 | vx, vy = asda.get_velocity_field( 47 | gamma=gamma, 48 | rcore=rcore, 49 | ratio_vradial=ratio, 50 | x_range=[-100, 100, 200], 51 | y_range=[-100, 100, 200], 52 | ) 53 | 54 | # perform vortex detection 55 | gamma = asda.calculate_gamma_values(vx, vy) 56 | # properties of the detected vortex 57 | center_edge = asda.get_vortex_edges(gamma) 58 | (ve, vr, vc, ia) = asda.get_vortex_properties(vx, vy, center_edge) 59 | np.testing.assert_almost_equal(ve[0], 0.39996991917753405) 60 | np.testing.assert_almost_equal(vr[0], 1.999849595887626) 61 | assert vc == ([0.0, 0.0],) 62 | assert ia == (None,) 63 | assert len(center_edge) == 5 64 | np.testing.assert_allclose(center_edge["center"], np.array([[100.0, 100.0]])) 65 | np.testing.assert_almost_equal(center_edge["peak"], 0.9605688248523583) 66 | np.testing.assert_almost_equal(center_edge["radius"], 50.0732161286822) 67 | assert len(center_edge["points"][0]) == 7877 68 | assert len(center_edge["edge"][0]) == 280 69 | 70 | np.testing.assert_allclose(center_edge["center"][0][0], 100) 71 | np.testing.assert_allclose(center_edge["center"][0][1], 100) 72 | 73 | np.testing.assert_allclose(vmax, vr[0], atol=0.001) 74 | np.testing.assert_allclose(vmax * ratio, ve[0], atol=0.001) 75 | 76 | np.testing.assert_allclose(vc[0][0], 0.0) 77 | np.testing.assert_allclose(vc[0][1], 0.0) 78 | np.testing.assert_allclose(rmax, center_edge["radius"][0], atol=0.1) 79 | 80 | 81 | def test_real_data(): 82 | """ 83 | Run the test on real data and compare with the correct answer. 84 | 85 | Notes: 86 | Input velocity field and image (if there is any) are all stored in 87 | default Python order (i.e. [y, x] of the data). 88 | 89 | Output gamma values are in the same order, thus the same shape as 90 | velocity field. 91 | 92 | other outputs are in the order of [x, y], i.e., vc = [vx, vy], 93 | edge = [[x1, y1], [x2, y2],...], points = [[x1, y1], [x2, y2],...] 94 | in units of pixel 95 | """ 96 | 97 | # file which stores the velocity field data 98 | vel_file = get_test_filepath("asda_vxvy.npz") 99 | # file that stores the correct detection result 100 | cor_file = get_test_filepath("asda_correct.npz") 101 | # load velocity field and data 102 | vxvy = np.load(vel_file, allow_pickle=True) 103 | vx = vxvy["vx"] 104 | vy = vxvy["vy"] 105 | data = vxvy["data"] 106 | 107 | # Perform swirl detection 108 | factor = 1 109 | r = 3 110 | # Gamma1 and Gamma2 111 | gamma = asda.calculate_gamma_values(vx, vy, factor, r) 112 | # Determine Swirls 113 | center_edge = asda.get_vortex_edges(gamma) 114 | # Properties of Swirls 115 | ve, vr, vc, ia = asda.get_vortex_properties(vx, vy, center_edge, data) 116 | # load correct detect results 117 | correct = dict(np.load(cor_file, allow_pickle=True)) 118 | 119 | # compare between detection result and correct detection result 120 | # number of swirls 121 | n = len(ve) 122 | nc = len(correct["ve"]) 123 | assert n == nc 124 | 125 | # find correspondences 126 | pos = [] 127 | i = 0 128 | for cen in center_edge["center"]: 129 | cen = [int(cen[0]), int(cen[1])] 130 | idx = np.where(correct["center"] == cen) 131 | assert np.size(idx[0]) >= 2 132 | pos.append(np.bincount(idx[0]).argmax()) 133 | 134 | # perform comparison 135 | peak_diff = [] 136 | radius_diff = [] 137 | vr_diff = [] 138 | ve_diff = [] 139 | vc_diff = [] 140 | ia_diff = [] 141 | for i in np.arange(n): 142 | idx = pos[i] 143 | peak_diff.append((center_edge["peak"][i] - correct["peak"][idx]) / correct["peak"][idx] * 100) 144 | radius_diff.append((center_edge["radius"][i] - correct["radius"][idx]) / correct["radius"][idx] * 100) 145 | vr_diff.append((vr[i] - correct["vr"][idx]) / correct["vr"][idx] * 100) 146 | ve_diff.append((ve[i] - correct["ve"][idx]) / correct["ve"][idx] * 100) 147 | vc_diff.append((vc[i] - correct["vc"][idx]) / correct["vc"][idx] * 100) 148 | ia_diff.append((ia[i] - correct["ia"][idx]) / correct["ia"][idx] * 100) 149 | 150 | # Should be no differences 151 | for diff in [peak_diff, radius_diff, vr_diff, ve_diff, vc_diff, ia_diff]: 152 | assert np.allclose(np.mean(diff), 0, atol=1e-5) 153 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_enhance.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pytest 4 | 5 | import astropy.units as u 6 | from astropy.coordinates import SkyCoord 7 | 8 | import sunpy.map 9 | 10 | import sunkit_image.enhance as enhance 11 | from sunkit_image.tests.helpers import figure_test 12 | 13 | pytestmark = [pytest.mark.filterwarnings("ignore:Missing metadata for observer"), pytest.mark.filterwarnings("ignore:Missing metadata for observation time")] 14 | 15 | 16 | @figure_test 17 | @pytest.mark.remote_data() 18 | def test_mgn(aia_171): 19 | out = enhance.mgn(aia_171) 20 | if isinstance(out, sunpy.map.GenericMap): 21 | fig = plt.figure() 22 | ax = fig.add_subplot(111, projection=out) 23 | out.plot(axes=ax) 24 | return fig 25 | fig = plt.figure() 26 | ax = fig.add_subplot(111) 27 | ax.imshow(out, origin="lower", interpolation="nearest", cmap="sdoaia171") 28 | return fig 29 | 30 | 31 | @pytest.fixture() 32 | def map_test(): 33 | return np.ones((4, 4), dtype=float) 34 | 35 | 36 | def test_nans_raise_warning(map_test): 37 | map_test[0, 0] = np.nan 38 | with pytest.warns(UserWarning, match="One or more entries in the input data are NaN."): 39 | enhance.mgn(map_test) 40 | 41 | 42 | @figure_test 43 | @pytest.mark.remote_data() 44 | def test_mgn_submap(aia_171_map): 45 | top_right = SkyCoord(0 * u.arcsec, -200 * u.arcsec, frame=aia_171_map.coordinate_frame) 46 | bottom_left = SkyCoord(-900 * u.arcsec, -900 * u.arcsec, frame=aia_171_map.coordinate_frame) 47 | aia_171_map_submap = aia_171_map.submap(bottom_left, top_right=top_right) 48 | out = enhance.mgn(aia_171_map_submap) 49 | fig = plt.figure() 50 | ax = fig.add_subplot(111, projection=out) 51 | out.plot(axes=ax) 52 | return fig 53 | 54 | 55 | @figure_test 56 | def test_mgn_cutout(aia_171_cutout): 57 | out = enhance.mgn(aia_171_cutout) 58 | fig = plt.figure() 59 | ax = fig.add_subplot(111, projection=out) 60 | out.plot(axes=ax, clip_interval=(1, 99) * u.percent) 61 | return fig 62 | 63 | 64 | @figure_test 65 | @pytest.mark.remote_data() 66 | def test_wow(aia_171): 67 | out = enhance.wow(aia_171) 68 | if isinstance(out, sunpy.map.GenericMap): 69 | fig = plt.figure() 70 | ax = fig.add_subplot(111, projection=out) 71 | out.plot(axes=ax) 72 | return fig 73 | fig = plt.figure() 74 | ax = fig.add_subplot(111) 75 | ax.imshow(out, origin="lower", interpolation="nearest", cmap="sdoaia171") 76 | return fig 77 | 78 | 79 | @figure_test 80 | @pytest.mark.remote_data() 81 | def test_wow_submap(aia_171_map): 82 | top_right = SkyCoord(0 * u.arcsec, -200 * u.arcsec, frame=aia_171_map.coordinate_frame) 83 | bottom_left = SkyCoord(-900 * u.arcsec, -900 * u.arcsec, frame=aia_171_map.coordinate_frame) 84 | aia_171_map_submap = aia_171_map.submap(bottom_left, top_right=top_right) 85 | out = enhance.wow(aia_171_map_submap) 86 | fig = plt.figure() 87 | ax = fig.add_subplot(111, projection=out) 88 | out.plot(axes=ax) 89 | return fig 90 | 91 | 92 | @figure_test 93 | def test_wow_cutout(aia_171_cutout): 94 | out = enhance.wow(aia_171_cutout) 95 | fig = plt.figure() 96 | ax = fig.add_subplot(111, projection=out) 97 | out.plot(axes=ax) 98 | return fig 99 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_granule.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | import sunpy 5 | from sunpy.map import all_pixel_indices_from_map 6 | 7 | from sunkit_image.granule import ( 8 | _get_threshold, 9 | _mark_brightpoint, 10 | _trim_intergranules, 11 | segment, 12 | segments_overlap_fraction, 13 | ) 14 | 15 | pytestmark = [pytest.mark.filterwarnings("ignore:Missing metadata for observer")] 16 | 17 | 18 | def test_segment(granule_map): 19 | segmented = segment(granule_map, skimage_method="li", mark_dim_centers=True) 20 | assert isinstance(segmented, sunpy.map.mapbase.GenericMap) 21 | # Check pixels are not empty. 22 | initial_pix = all_pixel_indices_from_map(granule_map).value 23 | seg_pixels = all_pixel_indices_from_map(segmented).value 24 | assert np.size(seg_pixels) > 0 25 | assert seg_pixels.shape == initial_pix.shape 26 | # Check that the values in the array have changed 27 | assert np.any(np.not_equal(granule_map.data, segmented.data)) 28 | 29 | 30 | def test_segment_errors(granule_map): 31 | with pytest.raises(TypeError, match="Input must be an instance of a sunpy.map.GenericMap"): 32 | segment(np.array([[1, 2, 3], [1, 2, 3]])) 33 | with pytest.raises(ValueError, match="Method must be one of: li, otsu, yen, mean, minimum, triangle, isodata"): 34 | segment(granule_map, skimage_method="banana") 35 | 36 | 37 | def test_get_threshold(): 38 | test_arr1 = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) 39 | threshold1 = _get_threshold(test_arr1, "li") 40 | assert isinstance(threshold1, np.float64) 41 | # Check that different arrays return different thresholds. 42 | test_arr2 = np.array([[2, 3, 4, 5, 6], [2, 3, 4, 5, 6]]) 43 | threshold2 = _get_threshold(test_arr2, "li") 44 | assert threshold1 != threshold2 45 | 46 | 47 | def test_get_threshold_range(): 48 | test_arr1 = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) 49 | threshold1 = _get_threshold(test_arr1, "li") 50 | assert 0 < threshold1 < np.max(test_arr1) 51 | 52 | 53 | def test_get_threshold_errors(): 54 | with pytest.raises(TypeError, match="Input data must be an instance of a np.ndarray"): 55 | _get_threshold([], "li") 56 | with pytest.raises(ValueError, match="Method must be one of: li, otsu, yen, mean, minimum, triangle, isodata"): 57 | _get_threshold(np.array([[1, 2], [1, 2]]), "banana") 58 | 59 | 60 | def test_trim_intergranules(granule_map): 61 | thresholded = np.uint8(granule_map.data > np.nanmedian(granule_map.data)) 62 | # Check that returned array is not empty. 63 | assert np.size(thresholded) > 0 64 | # Check that the correct dimensions are returned. 65 | assert thresholded.shape == _trim_intergranules(thresholded).shape 66 | # Check that erroneous zero values are caught and re-assigned 67 | # e.g. inside of pad region, returned array has fewer 0-valued pixels then input 68 | middles_removed = _trim_intergranules(thresholded) 69 | pad = int(np.shape(thresholded)[0] / 200) 70 | assert not np.count_nonzero(middles_removed[pad:-pad, pad:-pad]) < np.count_nonzero(thresholded[pad:-pad, pad:-pad]) 71 | # Check that when mark=True, erroneous 0 values are set to 3 72 | middles_marked = _trim_intergranules(thresholded, mark=True) 73 | marked_as_3 = np.count_nonzero(middles_marked[middles_marked == 3]) 74 | assert marked_as_3 != 0 75 | # Check that when mark=False, erroneous 0 values are "removed" (set to 1), returning NO 3 values 76 | middles_marked = _trim_intergranules(thresholded, mark=False) 77 | marked_as_3 = np.count_nonzero(middles_marked[middles_marked == 3]) 78 | assert marked_as_3 == 0 79 | 80 | 81 | def test_trim_intergranules_errors(): 82 | rng = np.random.default_rng() 83 | # Check that raises error if passed array is not binary. 84 | data = rng.integers(low=0, high=10, size=(10, 10)) 85 | with pytest.raises(ValueError, match="segmented_image must only have values of 1 and 0."): 86 | _trim_intergranules(data) 87 | 88 | 89 | def test_mark_brightpoint(granule_map, granule_map_he): 90 | thresholded = np.uint8(granule_map.data > np.nanmedian(granule_map_he)) 91 | brightpoint_marked, _, _ = _mark_brightpoint( 92 | thresholded, 93 | granule_map.data, 94 | granule_map_he, 95 | resolution=0.016, 96 | bp_min_flux=None, 97 | ) 98 | # Check that the correct dimensions are returned. 99 | assert thresholded.shape == brightpoint_marked.shape 100 | # Check that returned array is not empty. 101 | assert np.size(brightpoint_marked) > 0 102 | # Check that the returned array has some pixels of value 2 (for a dataset that we know has brightpoints by eye). 103 | assert (brightpoint_marked == 2).sum() > 0 104 | 105 | 106 | def test_mark_brightpoint_error(granule_map, granule_map_he): 107 | # Check that errors are raised for incorrect granule_map. 108 | with pytest.raises(ValueError, match="segmented_image must have only"): 109 | _mark_brightpoint(granule_map.data, granule_map.data, granule_map_he, resolution=0.016, bp_min_flux=None) 110 | 111 | 112 | def test_segments_overlap_fraction(granule_minimap1): 113 | # Check that segments_overlap_fraction is 1 when Maps are equal. 114 | assert segments_overlap_fraction(granule_minimap1, granule_minimap1) == 1.0 115 | 116 | 117 | def test_segments_overlap_fraction2(granule_minimap1, granule_minimap2): 118 | # Check that segments_overlap_fraction is between 0 and 1 when Maps are not equal. 119 | assert segments_overlap_fraction(granule_minimap1, granule_minimap2) <= 1 120 | assert segments_overlap_fraction(granule_minimap1, granule_minimap2) >= 0 121 | 122 | 123 | def test_segments_overlap_fraction_errors(granule_minimap3): 124 | # Check that error is raised if there are no granules or intergranules in image. 125 | with pytest.raises(Exception, match="clustering failed"): 126 | segments_overlap_fraction(granule_minimap3, granule_minimap3) 127 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_radial.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import pytest 4 | 5 | import astropy.units as u 6 | from astropy.coordinates import SkyCoord 7 | 8 | import sunpy 9 | import sunpy.data.sample 10 | import sunpy.map 11 | import sunpy.visualization.colormaps.cm 12 | 13 | import sunkit_image.radial as rad 14 | import sunkit_image.utils as utils 15 | from sunkit_image.tests.helpers import figure_test, skip_windows 16 | 17 | pytestmark = [pytest.mark.filterwarnings("ignore:Missing metadata for observer"), pytest.mark.filterwarnings("ignore:Missing metadata for observation time")] 18 | 19 | 20 | @pytest.fixture() 21 | def map_test1(): 22 | x = np.linspace(-2, 2, 5) 23 | grid = np.meshgrid(x, x.T) 24 | test_data1 = np.sqrt(grid[0] ** 2 + grid[1] ** 2) 25 | test_data1 *= 10 26 | test_data1 = 28 - test_data1 27 | test_data1 = np.round(test_data1) 28 | header = {"cunit1": "arcsec", "cunit2": "arcsec", "CTYPE1": "HPLN-TAN", "CTYPE2": "HPLT-TAN"} 29 | return sunpy.map.Map((test_data1, header)) 30 | 31 | 32 | @pytest.fixture() 33 | def map_test2(): 34 | x = np.linspace(-2, 2, 5) 35 | grid = np.meshgrid(x, x.T) 36 | test_data1 = np.sqrt(grid[0] ** 2 + grid[1] ** 2) 37 | test_data1 *= 10 38 | test_data1 = 28 - test_data1 39 | test_data1 = np.round(test_data1) 40 | header = {"cunit1": "arcsec", "cunit2": "arcsec", "CTYPE1": "HPLN-TAN", "CTYPE2": "HPLT-TAN"} 41 | test_data2 = np.where(test_data1[:, 0:2] == 6, 8, test_data1[:, 0:2]) 42 | test_data2 = np.concatenate((test_data2, test_data1[:, 2:]), axis=1) 43 | return sunpy.map.Map((test_data2, header)) 44 | 45 | 46 | @pytest.fixture() 47 | def radial_bin_edges(): 48 | radial_bins = utils.equally_spaced_bins(inner_value=0.001, outer_value=0.003, nbins=5) 49 | return radial_bins * u.R_sun 50 | 51 | 52 | def test_nrgf(map_test1, map_test2, radial_bin_edges): 53 | result = np.zeros_like(map_test1.data) 54 | expect = rad.nrgf(map_test1, radial_bin_edges=radial_bin_edges, application_radius=0.001 * u.R_sun, fill=0) 55 | 56 | assert np.allclose(expect.data.shape, map_test1.data.shape) 57 | assert np.allclose(expect.data, result) 58 | 59 | # Hand calculated 60 | result1 = [ 61 | [0.0, 1.0, 0.0, -1.0, 0.0], 62 | [1.0, 0.0, 0.0, 0.0, -1.0], 63 | [0.0, 0.0, 0.0, 0.0, 0.0], 64 | [1.0, 0.0, 0.0, 0.0, -1.0], 65 | [0.0, 1.0, 0.0, -1.0, 0.0], 66 | ] 67 | 68 | expect1 = rad.nrgf(map_test2, radial_bin_edges=radial_bin_edges, application_radius=0.001 * u.R_sun, fill=0) 69 | 70 | assert np.allclose(expect1.data.shape, map_test2.data.shape) 71 | assert np.allclose(expect1.data, result1) 72 | 73 | 74 | def test_fnrgf(map_test1, map_test2, radial_bin_edges): 75 | order = 1 76 | result = [ 77 | [-0.0, 96.0, 128.0, 96.0, -0.0], 78 | [96.0, 224.0, 288.0, 224.0, 96.0], 79 | [128.0, 288.0, 0.0, 288.0, 128.0], 80 | [96.0, 224.0, 288.0, 224.0, 96.0], 81 | [-0.0, 96.0, 128.0, 96.0, -0.0], 82 | ] 83 | expect = rad.fnrgf( 84 | map_test1, 85 | radial_bin_edges=radial_bin_edges, 86 | order=order, 87 | mean_attenuation_range=[1.0, 0.0], 88 | std_attenuation_range=[1.0, 0.0], 89 | cutoff=0, 90 | application_radius=0.001 * u.R_sun, 91 | number_angular_segments=4, 92 | fill=0, 93 | ) 94 | assert np.allclose(expect.data.shape, map_test1.data.shape) 95 | assert np.allclose(expect.data, result) 96 | 97 | result1 = [ 98 | [-0.0, 128.0, 128.0, 96.0, -0.0], 99 | [128.0, 224.0, 288.0, 224.0, 96.0], 100 | [128.0, 288.0, 0.0, 288.0, 128.0], 101 | [128.0, 224.0, 288.0, 224.0, 96.0], 102 | [-0.0, 128.0, 128.0, 96.0, -0.0], 103 | ] 104 | expect1 = rad.fnrgf( 105 | map_test2, 106 | radial_bin_edges=radial_bin_edges, 107 | order=order, 108 | mean_attenuation_range=[1.0, 0.0], 109 | std_attenuation_range=[1.0, 0.0], 110 | cutoff=0, 111 | application_radius=0.001 * u.R_sun, 112 | number_angular_segments=4, 113 | fill=0, 114 | ) 115 | 116 | assert np.allclose(expect1.data.shape, map_test2.data.shape) 117 | assert np.allclose(expect1.data, result1) 118 | 119 | order = 5 120 | result2 = [ 121 | [-0.0, 90.52799999982116, 126.73137084989847, 90.52799999984676, -0.0], 122 | [90.52800000024544, 207.2, 285.14558441227155, 207.2, 90.5280000001332], 123 | [126.73137084983244, 285.1455844119744, 0.0, 280.05441558770406, 124.4686291500961], 124 | [90.52800000015233, 207.2, 280.05441558772844, 207.2, 90.5280000000401], 125 | [0.0, 90.52799999986772, 124.46862915010152, 90.52799999989331, -0.0], 126 | ] 127 | 128 | expect2 = rad.fnrgf( 129 | map_test1, 130 | radial_bin_edges=radial_bin_edges, 131 | order=order, 132 | mean_attenuation_range=[1.0, 0.0], 133 | std_attenuation_range=[1.0, 0.0], 134 | cutoff=0, 135 | application_radius=0.001 * u.R_sun, 136 | number_angular_segments=4, 137 | fill=0, 138 | ) 139 | 140 | assert np.allclose(expect2.data.shape, map_test1.data.shape) 141 | assert np.allclose(expect2.data, result2) 142 | 143 | result3 = [ 144 | [-0.0, 120.55347470594926, 126.73137084989847, 90.67852529365966, -0.0], 145 | [120.70526403418884, 207.2, 285.14558441227155, 207.2, 90.52673596626707], 146 | [126.73137084983244, 285.1455844119744, 0.0, 280.05441558770406, 124.4686291500961], 147 | [120.70526403406846, 207.2, 280.05441558772844, 207.2, 90.52673596617021], 148 | [0.0, 120.55347470601022, 124.46862915010152, 90.67852529370734, -0.0], 149 | ] 150 | 151 | expect3 = rad.fnrgf( 152 | map_test2, 153 | radial_bin_edges=radial_bin_edges, 154 | order=order, 155 | mean_attenuation_range=[1.0, 0.0], 156 | std_attenuation_range=[1.0, 0.0], 157 | cutoff=0, 158 | application_radius=0.001 * u.R_sun, 159 | number_angular_segments=4, 160 | fill=0, 161 | ) 162 | 163 | assert np.allclose(expect3.data.shape, map_test2.data.shape) 164 | assert np.allclose(expect3.data, result3) 165 | 166 | 167 | def test_fnrgf_errors(map_test1): 168 | with pytest.raises(ValueError, match="Minimum value of order is 1"): 169 | rad.fnrgf( 170 | map_test1, 171 | order=0, 172 | mean_attenuation_range=[1.0, 0.0], 173 | std_attenuation_range=[1.0, 0.0], 174 | cutoff=0, 175 | ) 176 | 177 | @figure_test 178 | @pytest.mark.remote_data() 179 | def test_fig_nrgf(aia_171_map): 180 | radial_bin_edges = utils.equally_spaced_bins() 181 | radial_bin_edges *= u.R_sun 182 | out = rad.nrgf(aia_171_map, radial_bin_edges=radial_bin_edges) 183 | out.plot() 184 | 185 | 186 | @figure_test 187 | @pytest.mark.remote_data() 188 | def test_fig_fnrgf(aia_171_map): 189 | radial_bin_edges = utils.equally_spaced_bins() 190 | radial_bin_edges *= u.R_sun 191 | order = 20 192 | out = rad.fnrgf(aia_171_map, radial_bin_edges=radial_bin_edges, order=order, mean_attenuation_range=[1.0, 0.0], std_attenuation_range=[1.0, 0.0], cutoff=0) 193 | out.plot() 194 | 195 | 196 | @figure_test 197 | @pytest.mark.remote_data() 198 | def test_fig_rhef(aia_171_map): 199 | radial_bin_edges = utils.equally_spaced_bins(0, 2, aia_171_map.data.shape[1]) 200 | radial_bin_edges *= u.R_sun 201 | out = rad.rhef(aia_171_map, radial_bin_edges=radial_bin_edges, upsilon=None, method="scipy") 202 | out.plot() 203 | 204 | 205 | @figure_test 206 | @pytest.mark.remote_data() 207 | def test_multifig_rhef(aia_171_map): 208 | radial_bin_edges = utils.equally_spaced_bins(0, 2, aia_171_map.data.shape[1]) 209 | radial_bin_edges *= u.R_sun 210 | 211 | # Define the list of upsilon pairs where the first number affects dark components and the second number affects bright ones 212 | upsilon_list = [ 213 | 0.35, 214 | None, 215 | (0.1, 0.1), 216 | (0.5, 0.5), 217 | (0.8, 0.8), 218 | ] 219 | 220 | # Crop the figures to see better detail 221 | top_right = SkyCoord(1200 * u.arcsec, 0 * u.arcsec, frame=aia_171_map.coordinate_frame) 222 | bottom_left = SkyCoord(0 * u.arcsec, -1200 * u.arcsec, frame=aia_171_map.coordinate_frame) 223 | aia_map_cropped = aia_171_map.submap(bottom_left, top_right=top_right) 224 | fig, axes = plt.subplots( 225 | 2, 3, figsize=(15, 10), sharex="all", sharey="all", subplot_kw={"projection": aia_map_cropped} 226 | ) 227 | axes = axes.flatten() 228 | 229 | aia_map_cropped.plot(axes=axes[0], clip_interval=(1, 99.99) * u.percent) 230 | axes[0].set_title("Original AIA Map") 231 | 232 | # Loop through the upsilon_list and plot each filtered map 233 | for i, upsilon in enumerate(upsilon_list): 234 | out_map = rad.rhef(aia_171_map, upsilon=upsilon, method="scipy") 235 | out_map_crop = out_map.submap(bottom_left, top_right=top_right) 236 | out_map_crop.plot(axes=axes[i + 1]) 237 | axes[i + 1].set_title(f"Upsilon = {upsilon}") 238 | 239 | fig.tight_layout() 240 | 241 | return fig 242 | 243 | def test_set_attenuation_coefficients(): 244 | order = 1 245 | # Hand calculated 246 | expect1 = [[1, 0.0], [1, 0.0]] 247 | 248 | result1 = rad._set_attenuation_coefficients(order) 249 | assert np.allclose(expect1, result1) 250 | 251 | order = 3 252 | # Hand calculated 253 | expect2 = [[1.0, 0.66666667, 0.33333333, 0.0], [1.0, 0.66666667, 0.33333333, 0.0]] 254 | 255 | result2 = rad._set_attenuation_coefficients(order) 256 | assert np.allclose(expect2, result2) 257 | 258 | expect3 = [[1.0, 0.66666667, 0.0, 0.0], [1.0, 0.66666667, 0.0, 0.0]] 259 | 260 | result3 = rad._set_attenuation_coefficients(order, cutoff=2) 261 | assert np.allclose(expect3, result3) 262 | 263 | with pytest.raises(ValueError, match="Cutoff cannot be greater than order \\+ 1"): 264 | rad._set_attenuation_coefficients(order, cutoff=5) 265 | 266 | 267 | def test_fit_polynomial_to_log_radial_intensity(): 268 | radii = (0.001, 0.002) * u.R_sun 269 | intensity = np.asarray([1, 2]) 270 | degree = 1 271 | expected = np.polyfit(radii.to(u.R_sun).value, np.log(intensity), degree) 272 | 273 | assert np.allclose(rad._fit_polynomial_to_log_radial_intensity(radii, intensity, degree), expected) 274 | 275 | 276 | def test_calculate_fit_radial_intensity(): 277 | polynomial = np.asarray([1, 2, 3]) 278 | radii = (0.001, 0.002) * u.R_sun 279 | expected = np.exp(np.poly1d(polynomial)(radii.to(u.R_sun).value)) 280 | 281 | assert np.allclose(rad._calculate_fit_radial_intensity(radii, polynomial), expected) 282 | 283 | 284 | def test_normalize_fit_radial_intensity(): 285 | polynomial = np.asarray([1, 2, 3]) 286 | radii = (0.001, 0.002) * u.R_sun 287 | normalization_radii = (0.003, 0.004) * u.R_sun 288 | expected = rad._calculate_fit_radial_intensity(radii, polynomial) / rad._calculate_fit_radial_intensity( 289 | normalization_radii, 290 | polynomial, 291 | ) 292 | 293 | assert np.allclose(rad._normalize_fit_radial_intensity(radii, polynomial, normalization_radii), expected) 294 | 295 | 296 | @skip_windows 297 | def test_intensity_enhance(map_test1): 298 | degree = 1 299 | fit_range = [1, 1.5] * u.R_sun 300 | normalization_radius = 1 * u.R_sun 301 | summarize_bin_edges = "center" 302 | scale = 1 * map_test1.rsun_obs 303 | radial_bin_edges = u.Quantity(utils.equally_spaced_bins()) * u.R_sun 304 | 305 | radial_intensity = utils.get_radial_intensity_summary(map_test1, radial_bin_edges, scale=scale) 306 | 307 | map_r = utils.find_pixel_radii(map_test1).to(u.R_sun) 308 | 309 | radial_bin_summary = utils.bin_edge_summary(radial_bin_edges, summarize_bin_edges).to(u.R_sun) 310 | 311 | fit_here = np.logical_and( 312 | fit_range[0].to(u.R_sun).value <= radial_bin_summary.to(u.R_sun).value, 313 | radial_bin_summary.to(u.R_sun).value <= fit_range[1].to(u.R_sun).value, 314 | ) 315 | 316 | polynomial = rad._fit_polynomial_to_log_radial_intensity( 317 | radial_bin_summary[fit_here], 318 | radial_intensity[fit_here], 319 | degree, 320 | ) 321 | 322 | enhancement = 1 / rad._normalize_fit_radial_intensity(map_r, polynomial, normalization_radius) 323 | enhancement[map_r < normalization_radius] = 1 324 | 325 | assert np.allclose( 326 | enhancement * map_test1.data, 327 | rad.intensity_enhance(map_test1, radial_bin_edges=radial_bin_edges, scale=scale).data, 328 | ) 329 | 330 | 331 | @skip_windows 332 | def test_intensity_enhance_errors(map_test1): 333 | fit_range = [1, 1.5] * u.R_sun 334 | scale = 1 * map_test1.rsun_obs 335 | with pytest.raises(ValueError, match="The fit range must be strictly increasing."): 336 | rad.intensity_enhance(map_test1, scale=scale, fit_range=fit_range[::-1]) 337 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_stara.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import astropy.units as u 4 | 5 | from sunkit_image.stara import stara 6 | from sunkit_image.tests.helpers import figure_test 7 | 8 | 9 | def test_stara(hmi_map): 10 | hmi_upscaled = hmi_map.resample((512, 512) * u.pixel) 11 | result = stara(hmi_upscaled) 12 | assert isinstance(result, np.ndarray) 13 | assert result.shape == hmi_upscaled.data.shape 14 | total_true_value_count = sum(result.ravel()) 15 | assert total_true_value_count == 5033 16 | 17 | 18 | def test_stara_threshold_adjustment(hmi_map): 19 | hmi_upscaled = hmi_map.resample((512, 512) * u.pixel) 20 | # Apply STARA with a lower threshold, expecting to detect more features 21 | lower_threshold_result = stara(hmi_upscaled, threshold=2000) 22 | higher_threshold_result = stara(hmi_upscaled, threshold=8000) 23 | # A lower limb filter, would detect more features 24 | lower_limb_filtered_result = stara(hmi_upscaled, limb_filter=5 * u.percent) 25 | higher_limb_filtered_result = stara(hmi_upscaled, limb_filter=30 * u.percent) 26 | # Assert that the lower threshold results in more features detected 27 | assert lower_threshold_result.sum() > higher_threshold_result.sum(), "Lower threshold should detect more features" 28 | assert ( 29 | lower_limb_filtered_result.sum() > higher_limb_filtered_result.sum() 30 | ), "Lower Limb filter should detect more features" 31 | 32 | 33 | @figure_test 34 | def test_stara_plot(hmi_map): 35 | import matplotlib.pyplot as plt 36 | 37 | hmi_upscaled = hmi_map.resample((1024, 1024) * u.pixel) 38 | segmentation = stara(hmi_upscaled) 39 | fig = plt.figure() 40 | ax = plt.subplot(projection=hmi_upscaled) 41 | hmi_upscaled.plot(axes=ax, autoalign=True) 42 | ax.contour(segmentation, levels=0) 43 | plt.title("Sunspots identified by STARA") 44 | return fig 45 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_time_lag.py: -------------------------------------------------------------------------------- 1 | import dask.array 2 | import numpy as np 3 | import pytest 4 | 5 | import astropy.units as u 6 | 7 | from sunkit_image.time_lag import cross_correlation, get_lags, max_cross_correlation, time_lag 8 | 9 | 10 | @pytest.mark.parametrize( 11 | ("shape_in", "shape_out"), 12 | [((20, 5, 5), (39, 5, 5)), ((100, 10), (199, 10)), ((1000,), (1999,))], 13 | ) 14 | def test_cross_correlation_array_shapes(shape_in, shape_out): 15 | rng = np.random.default_rng() 16 | s_a = rng.random(shape_in) 17 | s_b = rng.random(shape_in) 18 | time = np.linspace(0, 1, shape_in[0]) * u.s 19 | lags = get_lags(time) 20 | cc = cross_correlation(s_a, s_b, lags) 21 | assert cc.shape == shape_out 22 | 23 | 24 | @pytest.mark.parametrize("shape", [((5, 5)), ((10,)), ((1,))]) 25 | def test_max_cc_time_lag_array_shapes(shape): 26 | time = np.linspace(0, 1, 10) * u.s 27 | shape_in = time.shape + shape 28 | rng = np.random.default_rng() 29 | s_a = rng.random(shape_in) 30 | s_b = rng.random(shape_in) 31 | max_cc = max_cross_correlation(s_a, s_b, time) 32 | tl = time_lag(s_a, s_b, time) 33 | assert max_cc.shape == shape 34 | assert tl.shape == shape 35 | 36 | 37 | @pytest.mark.parametrize("shape", [((5, 5)), ((10,)), ((1,))]) 38 | def test_time_lag_calculation(shape): 39 | def gaussian_pulse(x, x0, sigma): 40 | return np.exp(-((x - x0) ** 2) / (2 * sigma**2)) 41 | 42 | time = np.linspace(0, 1, 500) * u.s 43 | s_a = gaussian_pulse(time, 0.4 * u.s, 0.02 * u.s) 44 | s_b = gaussian_pulse(time, 0.6 * u.s, 0.02 * u.s) 45 | s_a = s_a * np.ones(shape + time.shape) 46 | s_b = s_b * np.ones(shape + time.shape) 47 | tl = time_lag(s_a.T, s_b.T, time) 48 | assert u.allclose(tl, 0.2 * u.s, rtol=5e-3) 49 | 50 | 51 | @pytest.mark.parametrize( 52 | "shape_in", 53 | [ 54 | ((20, 5, 5)), 55 | ((100, 10)), 56 | ((1000, 1)), 57 | ], 58 | ) 59 | def test_preserve_array_types(shape_in): 60 | rng = np.random.default_rng() 61 | s_a = rng.random(shape_in) 62 | s_b = rng.random(shape_in) 63 | time = np.linspace(0, 1, shape_in[0]) * u.s 64 | # Numpy arrays 65 | max_cc = max_cross_correlation(s_a, s_b, time) 66 | tl = time_lag(s_a, s_b, time) 67 | assert isinstance(max_cc, np.ndarray) 68 | assert isinstance(tl, u.Quantity) 69 | # Dask arrays 70 | s_a = dask.array.from_array(s_a) 71 | s_b = dask.array.from_array(s_b) 72 | max_cc = max_cross_correlation(s_a, s_b, time) 73 | tl = time_lag(s_a, s_b, time) 74 | assert isinstance(max_cc, dask.array.Array) 75 | assert isinstance(tl, dask.array.Array) 76 | 77 | 78 | @pytest.mark.parametrize( 79 | "shape_in", 80 | [ 81 | ((20, 5, 5)), 82 | ((100, 10)), 83 | ((1000, 1)), 84 | ], 85 | ) 86 | def test_dask_numpy_consistent(shape_in): 87 | rng = np.random.default_rng() 88 | s_a = rng.random(shape_in) 89 | s_b = rng.random(shape_in) 90 | time = np.linspace(0, 1, shape_in[0]) * u.s 91 | max_cc = max_cross_correlation(s_a, s_b, time) 92 | tl = time_lag(s_a, s_b, time) 93 | s_a = dask.array.from_array(s_a) 94 | s_b = dask.array.from_array(s_b) 95 | max_cc_dask = max_cross_correlation(s_a, s_b, time) 96 | tl_dask = time_lag(s_a, s_b, time) 97 | assert u.allclose(tl, tl_dask.compute(), rtol=0.0, atol=None) 98 | assert u.allclose(max_cc, max_cc_dask.compute(), rtol=0.0, atol=None) 99 | 100 | 101 | @pytest.mark.parametrize( 102 | "shape_in", 103 | [ 104 | ((20, 5, 5)), 105 | ((100, 10)), 106 | ((1000, 1)), 107 | ], 108 | ) 109 | def test_quantity_numpy_consistent(shape_in): 110 | # Test that Quantities can be used as inputs for the signals and that 111 | # it gives equivalent results to using bare numpy arrays 112 | rng = np.random.default_rng() 113 | s_a = rng.random(shape_in) * u.ct / u.s 114 | s_b = rng.random(shape_in) * u.ct / u.s 115 | time = np.linspace(0, 1, shape_in[0]) * u.s 116 | for func in [time_lag, max_cross_correlation]: 117 | result_numpy = func(s_a.value, s_b.value, time) 118 | result_quantity = func(s_a, s_b, time) 119 | assert u.allclose(result_numpy, result_quantity, rtol=0.0, atol=None) 120 | 121 | 122 | @pytest.mark.parametrize( 123 | ("shape_a", "shape_b", "lags", "exception"), 124 | [ 125 | ((10, 1), (10, 1), np.array([-1, -0.5, 0.1, 1]) * u.s, "Lags must be evenly sampled"), 126 | ((10, 2, 3), (10, 2, 4), np.linspace(-1, 1, 19) * u.s, "Signals must have same shape."), 127 | ( 128 | (20, 5), 129 | (20, 5), 130 | np.linspace(-1, 1, 10) * u.s, 131 | "First dimension of signal must be equal in length to time array.", 132 | ), 133 | ], 134 | ) 135 | def test_exceptions(shape_a, shape_b, lags, exception): 136 | rng = np.random.default_rng() 137 | s_a = rng.random(shape_a) 138 | s_b = rng.random(shape_b) 139 | with pytest.raises(ValueError, match=exception): 140 | cross_correlation(s_a, s_b, lags) 141 | 142 | 143 | def test_bounds(): 144 | time = np.linspace(0, 1, 10) * u.s 145 | shape = (*time.shape, 5, 5) 146 | rng = np.random.default_rng() 147 | s_a = rng.random(shape) 148 | s_b = rng.random(shape) 149 | bounds = (-0.5, 0.5) * u.s 150 | max_cc = max_cross_correlation(s_a, s_b, time, lag_bounds=bounds) 151 | tl = time_lag(s_a, s_b, time, lag_bounds=bounds) 152 | assert isinstance(max_cc, np.ndarray) 153 | assert isinstance(tl, u.Quantity) 154 | # Make sure this works with Dask and that these are still Dask arrays 155 | s_a = dask.array.from_array(s_a, chunks=s_a.shape) 156 | s_b = dask.array.from_array(s_b, chunks=s_b.shape) 157 | max_cc = max_cross_correlation(s_a, s_b, time, lag_bounds=bounds) 158 | tl = time_lag(s_a, s_b, time, lag_bounds=bounds) 159 | assert isinstance(max_cc, dask.array.Array) 160 | assert isinstance(tl, dask.array.Array) 161 | -------------------------------------------------------------------------------- /sunkit_image/tests/test_trace.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import pytest 6 | 7 | from astropy.io import fits 8 | 9 | import sunpy.map 10 | 11 | import sunkit_image.data.test as data 12 | from sunkit_image.tests.helpers import figure_test 13 | from sunkit_image.trace import ( 14 | _curvature_radius, 15 | _erase_loop_in_image, 16 | _initial_direction_finding, 17 | _loop_add, 18 | bandpass_filter, 19 | occult2, 20 | smooth, 21 | ) 22 | 23 | 24 | @pytest.fixture(params=["array", "map"]) 25 | def image_remote(request): 26 | with warnings.catch_warnings(): 27 | warnings.simplefilter("ignore", category=fits.verify.VerifyWarning) 28 | data, header = fits.getdata( 29 | "http://data.sunpy.org/sunkit-image/trace_1998-05-19T22:21:43.000_171_1024.fits", 30 | header=True, 31 | ) 32 | if request.param == "map": 33 | return sunpy.map.Map((data, header)) 34 | if request.param == "array": 35 | return data 36 | msg = f"Invalid request parameter {request.param}" 37 | raise ValueError(msg) 38 | 39 | 40 | @pytest.fixture() 41 | def filepath_IDL(): 42 | return data.get_test_filepath("IDL.txt") 43 | 44 | 45 | @pytest.mark.remote_data() 46 | def test_occult2_remote(image_remote, filepath_IDL): 47 | # Testing on the same input files as in the IDL tutorial 48 | loops = occult2(image_remote, nsm1=3, rmin=30, lmin=25, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 49 | # Taking all the x and y coordinates in separate lists 50 | x = [] 51 | y = [] 52 | for loop in loops: 53 | for points in loop: 54 | x.append(points[0]) 55 | y.append(points[1]) 56 | # Creating a numpy array of all the loop points for ease of comparison 57 | X = np.array(x) 58 | Y = np.array(y) 59 | coords_py = np.c_[X, Y] 60 | # Now we will test on the IDL output data 61 | # Reading the IDL file 62 | expect = np.loadtxt(filepath_IDL) 63 | # Validating the number of loops 64 | assert np.allclose(expect[-1, 0] + 1, len(loops)) 65 | # Taking all the coords from the IDL form 66 | coords_idl = expect[:, 1:3] 67 | # Checking all the coordinates must be close to each other 68 | assert np.allclose(coords_py, coords_idl, atol=1e-5) 69 | # We devise one more test where we will find the distance between the Python and IDL points 70 | # For the algorithm to work correctly this distance should be very small. 71 | diff = coords_idl - coords_py 72 | square_diff = diff**2 73 | sum_diff = np.sum(square_diff, axis=1) 74 | distance = np.sqrt(sum_diff) 75 | # The maximum distance between the IDL points and the Python points was found to be 0.11 pixels. 76 | assert all(distance < 0.11) 77 | 78 | 79 | @figure_test 80 | @pytest.mark.remote_data() 81 | def test_occult2_fig(image_remote): 82 | loops = occult2(image_remote, nsm1=3, rmin=30, lmin=25, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 83 | fig = plt.figure() 84 | ax = fig.add_subplot(111) 85 | for loop in loops: 86 | x, y = zip(*loop, strict=False) 87 | ax.plot(x, y, "b") 88 | 89 | 90 | @figure_test 91 | def test_occult2_cutout(aia_171_cutout): 92 | loops = occult2(aia_171_cutout, nsm1=3, rmin=30, lmin=25, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 93 | fig = plt.figure() 94 | ax = fig.add_subplot(111) 95 | for loop in loops: 96 | x, y = zip(*loop, strict=False) 97 | ax.plot(x, y, "b") 98 | return fig 99 | 100 | 101 | @pytest.fixture() 102 | def test_image(): 103 | # An image containing a loop in a straight line 104 | ima = np.zeros((3, 3), dtype=np.float32) 105 | ima[0, 1] = 5 106 | ima[1, 1] = 3 107 | ima[2, 1] = 0 108 | return ima 109 | 110 | 111 | @pytest.fixture() 112 | def image_test(): 113 | # An image containing a loop in a straight line 114 | ima = np.zeros((15, 15), dtype=np.float32) 115 | ima[:, 7] = 1 116 | ima[3:12, 7] = [4, 3, 6, 12, 4, 3, 4, 2, 1] 117 | return ima 118 | 119 | 120 | def test_occult2(test_image, image_test): 121 | # The first test were valid loops are detected 122 | loops = occult2(image_test, nsm1=1, rmin=30, lmin=0, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 123 | for loop in loops: 124 | x, y = zip(*loop, strict=False) 125 | plt.plot(x, y, "b") 126 | # From the input image it is clear that all x coordinate are 7 127 | assert np.allclose(np.round(x), np.ones(8) * 7) 128 | # All the y coords are [11, 10, ..., 4] 129 | assert np.allclose(np.round(y), np.arange(11, 3, -1)) 130 | # This check will return an empty list as no loop is detected 131 | loops = occult2(image_test, nsm1=1, rmin=30, lmin=25, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 132 | assert not loops 133 | # This check is used to verify whether the RuntimeError is triggered 134 | with pytest.raises(RuntimeError) as record: 135 | occult2(test_image, nsm1=3, rmin=30, lmin=25, nstruc=1000, ngap=0, qthresh1=0.0, qthresh2=3.0) 136 | assert str(record.value) == ( 137 | "The filter size is very large compared to the size of the image." 138 | " The entire image zeros out while smoothing the image edges after filtering." 139 | ) 140 | 141 | 142 | @pytest.fixture() 143 | def test_map(): 144 | map_test = [[1.0, 1.0, 1.0, 1.0], [1.0, 5.0, 5.0, 1.0], [1.0, 5.0, 5.0, 1.0], [1.0, 1.0, 1.0, 1.0]] 145 | return np.array(map_test) 146 | 147 | 148 | @pytest.fixture() 149 | def test_map_ones(): 150 | return np.ones((4, 4), dtype=np.float32) 151 | 152 | 153 | def test_bandpass_filter_ones(test_map_ones): 154 | expect = np.zeros((4, 4)) 155 | result = bandpass_filter(test_map_ones) 156 | 157 | assert np.allclose(expect, result) 158 | 159 | 160 | def test_bandpass_filter(test_map): 161 | expect = np.array( 162 | [ 163 | [0.0, 0.0, 0.0, 0.0], 164 | [0.0, 2.22222222, 2.22222222, 0.0], 165 | [0.0, 2.22222222, 2.22222222, 0.0], 166 | [0.0, 0.0, 0.0, 0.0], 167 | ], 168 | ) 169 | 170 | result = bandpass_filter(test_map) 171 | assert np.allclose(expect, result) 172 | 173 | 174 | def test_bandpass_filter_error(test_map_ones): 175 | with pytest.raises(ValueError, match="nsm1 should be less than nsm2"): 176 | bandpass_filter(test_map_ones, 5, 1) 177 | 178 | 179 | def test_smooth_ones(test_map_ones): 180 | filtered = smooth(test_map_ones, 1) 181 | assert np.allclose(filtered, test_map_ones) 182 | 183 | filtered = smooth(test_map_ones, 4) 184 | assert np.allclose(filtered, test_map_ones) 185 | 186 | 187 | def test_smooth(test_map): 188 | filtered = smooth(test_map, 1) 189 | assert np.allclose(filtered, test_map) 190 | filtered = smooth(test_map, 3) 191 | expect = np.array( 192 | [ 193 | [1.0, 1.0, 1.0, 1.0], 194 | [1.0, 2.77777777, 2.77777777, 1.0], 195 | [1.0, 2.77777777, 2.77777777, 1.0], 196 | [1.0, 1.0, 1.0, 1.0], 197 | ], 198 | ) 199 | assert np.allclose(filtered, expect) 200 | 201 | 202 | def test_erase_loop_in_image(test_map_ones, test_map): 203 | # The starting point of a dummy loop 204 | istart = 0 205 | jstart = 1 206 | width = 1 207 | # The coordinates of the dummy loop 208 | xloop = [1, 2, 3] 209 | yloop = [1, 1, 1] 210 | result = _erase_loop_in_image(test_map_ones, istart, jstart, width, xloop, yloop) 211 | expect = np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]) 212 | assert np.allclose(expect, result) 213 | result = _erase_loop_in_image(test_map, istart, jstart, width, xloop, yloop) 214 | expect = np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]) 215 | assert np.allclose(expect, result) 216 | 217 | 218 | def test_initial_direction_finding(test_image): 219 | # The starting point of the loop i.e. the maximumflux position 220 | xstart = 0 221 | ystart = 1 222 | nlen = 30 223 | # The angle returned is with respect to the ``x`` axis. 224 | al = _initial_direction_finding(test_image, xstart, ystart, nlen) 225 | # The angle returned is zero because the image has loop in the ``y`` direction but the function 226 | # assumes the image is transposed so it takes the straight line in the ``x`` direction. 227 | assert np.allclose(al, 0.0) 228 | 229 | 230 | def test_curvature_radius(test_image): 231 | xl = np.zeros((3), dtype=np.float32) 232 | yl = np.zeros((3), dtype=np.float32) 233 | zl = np.zeros((3), dtype=np.float32) 234 | al = np.zeros((3), dtype=np.float32) 235 | ir = np.zeros((3), dtype=np.float32) 236 | xl[0] = 0 237 | yl[0] = 1 238 | zl[0] = 5 239 | al[0] = 0.0 240 | # Using the similar settings in as in the IDL tutorial. 241 | # This is forward tracing where the first point is after the starting point is being traced. 242 | xl, yl, zl, al = _curvature_radius(test_image, 30, xl, yl, zl, al, ir, 0, 30, 0) 243 | assert np.allclose(np.ceil(xl[1]), 1) 244 | assert np.allclose(np.ceil(yl[1]), 1) 245 | assert np.allclose(zl[1], 3) 246 | # This is forward tracing where the second point is after the starting point is being traced. 247 | xl, yl, zl, al = _curvature_radius(test_image, 30, xl, yl, zl, al, ir, 1, 30, 0) 248 | assert np.allclose(np.ceil(xl[2]), 2) 249 | assert np.allclose(np.ceil(yl[2]), 1) 250 | assert np.allclose(zl[2], 0) 251 | 252 | 253 | @pytest.fixture() 254 | def parameters_add_loop(): 255 | # Here we are creating dummy coordinates and flux for a loop 256 | xloop = np.ones(8, dtype=np.float32) * 7 257 | yloop = np.arange(11, 3, -1, dtype=np.float32) 258 | iloop = 0 259 | np1 = len(xloop) 260 | # Calculate the length of each point 261 | lengths = np.zeros((np1), dtype=np.float32) 262 | for ip in range(1, np1): 263 | lengths[ip] = lengths[ip - 1] + np.sqrt((xloop[ip] - xloop[ip - 1]) ** 2 + (yloop[ip] - yloop[ip - 1]) ** 2) 264 | # The empty structures in which the first loop is stored 265 | loops = [] 266 | return (lengths, xloop, yloop, iloop, loops) 267 | 268 | 269 | def test_add_loop(parameters_add_loop): 270 | # We call the add_loop function and the values should be placed in the structures 271 | loops, iloop = _loop_add(*parameters_add_loop) 272 | expect_loops = [[[7.0, 11.0], [7.0, 10.0], [7.0, 9.0], [7.0, 8.0], [7.0, 7.0], [7.0, 6.0], [7.0, 5.0]]] 273 | assert np.allclose(loops, expect_loops) 274 | assert np.allclose(iloop, 1) 275 | 276 | 277 | def test_parameters_add_loop(parameters_add_loop): 278 | lengths, xloop, yloop, iloop, loops = parameters_add_loop 279 | assert np.allclose(lengths, np.arange(0, 8)) 280 | assert np.allclose(xloop, np.ones(8) * 7) 281 | assert np.allclose(yloop, np.arange(11, 3, -1)) 282 | assert np.allclose(iloop, 0) 283 | assert not loops 284 | -------------------------------------------------------------------------------- /sunkit_image/time_lag.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains functions for calculating the cross-correlation and time 3 | lag between intensity cubes. 4 | 5 | Useful for understanding time variability in EUV light curves. 6 | """ 7 | 8 | import numpy as np 9 | 10 | import astropy.units as u 11 | 12 | DASK_INSTALLED = False 13 | try: 14 | import dask.array # do this here so that Dask is not a hard requirement 15 | 16 | DASK_INSTALLED = True 17 | except ImportError: 18 | pass 19 | 20 | __all__ = [ 21 | "cross_correlation", 22 | "get_lags", 23 | "max_cross_correlation", 24 | "time_lag", 25 | ] 26 | 27 | 28 | @u.quantity_input 29 | def get_lags(time: u.s): 30 | """ 31 | Convert an array of evenly spaced times to an array of time lags evenly 32 | spaced between ``-max(time)`` and ``max(time)``. 33 | """ 34 | delta_t = np.diff(time) 35 | if not np.allclose(delta_t, delta_t[0]): 36 | msg = "Times must be evenly sampled" 37 | raise ValueError(msg) 38 | delta_t = delta_t.cumsum(axis=0) 39 | return np.hstack([-delta_t[::-1], np.array([0]), delta_t]) 40 | 41 | 42 | @u.quantity_input 43 | def cross_correlation(signal_a, signal_b, lags: u.s): 44 | r""" 45 | Compute cross-correlation between two signals, as a function of lag. 46 | 47 | By the convolution theorem the cross-correlation between two signals 48 | can be computed as, 49 | 50 | .. math:: 51 | 52 | \mathcal{C}_{AB}(\tau) &= \mathcal{I}_A(t)\star\mathcal{I}_B(t) \\ 53 | &= \mathcal{I}_A(-t)\ast\mathcal{I}_B(t) \\ 54 | &= \mathscr{F}^{-1}\{\mathscr{F}\{\mathcal{I}_A(-t)\}\mathscr{F}\{\mathcal{I}_B(t)\}\} 55 | 56 | where each signal has been centered and scaled by its mean and standard 57 | deviation, 58 | 59 | .. math:: 60 | 61 | \mathcal{I}_c(t)=\frac{I_c(t)-\bar{I}_c}{\sigma_{c}} 62 | 63 | Additionally, :math:`\mathcal{C}_{AB}` is normalized by the length of 64 | the time series. 65 | 66 | Parameters 67 | ----------- 68 | signal_a : array-like 69 | The first dimension should correspond to the time dimension 70 | and must have length ``(len(lags) + 1)/2``. 71 | signal_b : array-like 72 | Must have the same dimensions as ``signal_a``. 73 | lags : `~astropy.units.Quantity` 74 | Evenly spaced time lags corresponding to the time dimension of 75 | ``signal_a`` and ``signal_b`` running from ``-max(time)`` to 76 | ``max(time)``. This is easily constructed using :func:`get_lags` 77 | 78 | Returns 79 | ------- 80 | array-like 81 | Cross-correlation as a function of ``lags``. The first dimension will be 82 | the same as that of ``lags`` and the subsequent dimensions will be 83 | consistent with dimensions of ``signal_a`` and ``signal_b``. 84 | 85 | See Also 86 | --------- 87 | get_lags 88 | time_lag 89 | max_cross_correlation 90 | 91 | References 92 | ----------- 93 | * https://en.wikipedia.org/wiki/Convolution_theorem 94 | * Viall, N.M. and Klimchuk, J.A. 95 | Evidence for Widespread Cooling in an Active Region Observed with the SDO Atmospheric Imaging Assembly 96 | ApJ, 753, 35, 2012 97 | (https://doi.org/10.1088/0004-637X/753/1/35) 98 | * Appendix C in Barnes, W.T., Bradshaw, S.J., Viall, N.M. 99 | Understanding Heating in Active Region Cores through Machine Learning. I. Numerical Modeling and Predicted Observables 100 | ApJ, 880, 56, 2019 101 | (https://doi.org/10.3847/1538-4357/ab290c) 102 | """ 103 | # NOTE: it is assumed that the arrays have already been appropriately 104 | # interpolated and chunked (if using Dask) 105 | delta_lags = np.diff(lags) 106 | if not u.allclose(delta_lags, delta_lags[0]): 107 | msg = "Lags must be evenly sampled" 108 | raise ValueError(msg) 109 | n_time = (lags.shape[0] + 1) // 2 110 | if signal_a.shape != signal_b.shape: 111 | msg = "Signals must have same shape." 112 | raise ValueError(msg) 113 | if signal_a.shape[0] != n_time: 114 | msg = "First dimension of signal must be equal in length to time array." 115 | raise ValueError(msg) 116 | # Reverse the first timeseries 117 | signal_a = signal_a[::-1] 118 | # Normalize by mean and standard deviation 119 | fill_value = signal_a.max() 120 | std_a = signal_a.std(axis=0) 121 | # Avoid dividing by zero by replacing with some non-zero dummy value. Note that 122 | # what this value is does not matter as it will be multiplied by zero anyway 123 | # since std_dev == 0 any place that signal - signal_mean == 0. We use the max 124 | # of the signal as the fill_value in order to support Quantities. 125 | std_a = np.where(std_a == 0, fill_value, std_a) 126 | v_a = (signal_a - signal_a.mean(axis=0)[np.newaxis]) / std_a[np.newaxis] 127 | std_b = signal_b.std(axis=0) 128 | std_b = np.where(std_b == 0, fill_value, std_b) 129 | v_b = (signal_b - signal_b.mean(axis=0)[np.newaxis]) / std_b[np.newaxis] 130 | # Cross-correlation is inverse of product of FFTS (by convolution theorem) 131 | fft_a = np.fft.rfft(v_a, axis=0, n=lags.shape[0]) 132 | fft_b = np.fft.rfft(v_b, axis=0, n=lags.shape[0]) 133 | cc = np.fft.irfft(fft_a * fft_b, axis=0, n=lags.shape[0]) 134 | # Normalize by the length of the timeseries 135 | return cc / signal_a.shape[0] 136 | 137 | 138 | def _get_bounds_indices(lags, bounds): 139 | # The start and stop indices are computed in this way 140 | # because Dask does not like "fancy" multidimensional indexing 141 | start = 0 142 | stop = lags.shape[0] + 1 143 | if bounds is not None: 144 | (indices,) = np.where(np.logical_and(lags >= bounds[0], lags <= bounds[1])) 145 | start = indices[0] 146 | stop = indices[-1] + 1 147 | return start, stop 148 | 149 | 150 | def _dask_check(lags, indices): 151 | # In order for the time lag to be returned as a Dask array, the lags array, 152 | # which is, in general, a Quantity, must also be a Dask array. 153 | # This function is needed for two reasons: 154 | # 1. astropy.units.Quantity do not play nice with each other and their behavior 155 | # seems to vary from one numpy version to the next. To avoid this ill-defined 156 | # behavior, we will do all of our Dask-ing on a Dask array created from the 157 | # bare numpy array and re-attach the units at the end. 158 | # 2. Dask arrays do not like "fancy" multidimensional indexing. Therefore, we must 159 | # flatten the indices first and then reshape the time lag array in order to 160 | # preserve the laziness of the array evaluation. 161 | if DASK_INSTALLED and isinstance(indices, dask.array.Array): 162 | lags_lazy = dask.array.from_array(lags.value, chunks=lags.shape) 163 | lags_select = lags_lazy[indices.flatten()].reshape(indices.shape) 164 | # NOTE: Reset array priority to force multiplication to defer to Dask 165 | # rather than Quantity. See https://github.com/sunpy/sunkit-image/issues/260 166 | # for more information. 167 | old_priority = lags_select.__array_priority__ 168 | lags_select.__array_priority__ = lags.unit.__array_priority__ + 1 169 | lags_select = lags_select * lags.unit 170 | lags_select.__array_priority__ = old_priority 171 | return lags_select 172 | return lags[indices] 173 | 174 | 175 | @u.quantity_input 176 | def time_lag(signal_a, signal_b, time: u.s, lag_bounds: u.s = None, **kwargs): 177 | r""" 178 | Compute the time lag that maximizes the cross-correlation between 179 | ``signal_a`` and ``signal_b``. 180 | 181 | For a pair of signals :math:`A,B`, e.g. time series from two EUV channels 182 | on AIA, the time lag is the lag which maximizes the cross-correlation, 183 | 184 | .. math:: 185 | 186 | \tau_{AB} = \mathop{\mathrm{arg\,max}}_{\tau}\mathcal{C}_{AB}, 187 | 188 | where :math:`\mathcal{C}_{AB}` is the cross-correlation as a function of 189 | lag (computed in :func:`cross_correlation`). Qualitatively, this can be 190 | thought of as how much ``signal_a`` needs to be shifted in time to best 191 | "match" ``signal_b``. Note that the sign of :math:`\tau_{AB}`` is determined 192 | by the ordering of the two signals such that, 193 | 194 | .. math:: 195 | 196 | \tau_{AB} = -\tau_{BA}. 197 | 198 | Parameters 199 | ---------- 200 | signal_a : array-like 201 | The first dimension must be the same length as ``time``. 202 | signal_b : array-like 203 | Must have the same dimensions as ``signal_a``. 204 | time : `~astropy.units.Quantity` 205 | Time array corresponding to the intensity time series 206 | ``signal_a`` and ``signal_b``. 207 | lag_bounds : `~astropy.units.Quantity`, optional 208 | Minimum and maximum lag to consider when finding the time 209 | lag that maximizes the cross-correlation. This is useful 210 | for minimizing boundary effects. 211 | 212 | Other Parameters 213 | ---------------- 214 | array_check_hook : function 215 | Function to apply to the resulting time lag result. This should take in the 216 | ``lags`` array and the indices that specify the location of the maximum of the 217 | cross-correlation and return an array that has used those indices to select 218 | the ``lags`` which maximize the cross-correlation. As an example, if ``lags`` 219 | and ``indices`` are both `~numpy.ndarray` objects, this would just return 220 | ``lags[indices]``. It is probably only necessary to specify this if you 221 | are working with arrays that are something other than a `~numpy.ndarray` 222 | or `~dask.array.Array` object. 223 | 224 | Returns 225 | ------- 226 | array-like 227 | Lag which maximizes the cross-correlation. The dimensions will be 228 | consistent with those of ``signal_a`` and ``signal_b``, i.e. if the 229 | input arrays are of dimension ``(K,M,N)``, the resulting array 230 | will have dimensions ``(M,N)``. Similarly, if the input signals 231 | are one-dimensional time series ``(K,)``, the result will have 232 | dimension ``(1,)``. 233 | 234 | References 235 | ---------- 236 | * Viall, N.M. and Klimchuk, J.A. 237 | Evidence for Widespread Cooling in an Active Region Observed with the SDO Atmospheric Imaging Assembly 238 | ApJ, 753, 35, 2012 239 | (https://doi.org/10.1088/0004-637X/753/1/35) 240 | """ 241 | array_check = kwargs.get("array_check_hook", _dask_check) 242 | lags = get_lags(time) 243 | cc = cross_correlation(signal_a, signal_b, lags) 244 | start, stop = _get_bounds_indices(lags, lag_bounds) 245 | i_max_cc = cc[start:stop].argmax(axis=0) 246 | return array_check(lags[start:stop], i_max_cc) 247 | 248 | 249 | @u.quantity_input 250 | def max_cross_correlation(signal_a, signal_b, time: u.s, lag_bounds: u.s = None): 251 | """ 252 | Compute the maximum value of the cross-correlation between ``signal_a`` and 253 | ``signal_b``. 254 | 255 | This is the maximum value of the cross-correlation as a function of 256 | lag (computed in :func:`cross_correlation`). This will always be between 257 | -1 (perfectly anti-correlated) and +1 (perfectly correlated) though 258 | in practice is nearly always between 0 and +1. 259 | 260 | Parameters 261 | ---------- 262 | signal_a : array-like 263 | The first dimension must be the same length as ``time``. 264 | signal_b : array-like 265 | Must have the same dimensions as ``signal_a``. 266 | time : array-like 267 | Time array corresponding to the intensity time series 268 | ``signal_a`` and ``signal_b``. 269 | lag_bounds : `~astropy.units.Quantity`, optional 270 | Minimum and maximum lag to consider when finding the time 271 | lag that maximizes the cross-correlation. This is useful 272 | for minimizing boundary effects. 273 | 274 | Returns 275 | ------- 276 | array-like 277 | Maximum value of the cross-correlation. The dimensions will be 278 | consistent with those of ``signal_a`` and ``signal_b``, i.e. if the 279 | input arrays are of dimension ``(K,M,N)``, the resulting array 280 | will have dimensions ``(M,N)``. Similarly, if the input signals 281 | are one-dimensional time series ``(K,)``, the result will have 282 | dimension ``(1,)``. 283 | 284 | References 285 | ---------- 286 | * Viall, N.M. and Klimchuk, J.A. 287 | Evidence for Widespread Cooling in an Active Region Observed with the SDO Atmospheric Imaging Assembly 288 | ApJ, 753, 35, 2012 289 | (https://doi.org/10.1088/0004-637X/753/1/35) 290 | """ 291 | lags = get_lags(time) 292 | cc = cross_correlation(signal_a, signal_b, lags) 293 | start, stop = _get_bounds_indices(lags, lag_bounds) 294 | return cc[start:stop].max(axis=0) 295 | -------------------------------------------------------------------------------- /sunkit_image/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .decorators import * 2 | from .noise import * 3 | from .utils import * 4 | -------------------------------------------------------------------------------- /sunkit_image/utils/decorators.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from functools import wraps 3 | from collections.abc import Callable 4 | 5 | import numpy as np 6 | 7 | from sunpy.map import GenericMap, Map 8 | 9 | __all__ = ["accept_array_or_map"] 10 | 11 | 12 | def accept_array_or_map(*, arg_name: str, output_to_map=True) -> Callable[[Callable], Callable]: 13 | """ 14 | Decorator that allows a function to accept an array or a 15 | `sunpy.map.GenericMap` as an argument. 16 | 17 | This can be applied to functions that: 18 | 19 | - Take a single array or map as input 20 | - Return a single array that has the same pixel coordinates 21 | as the input array. 22 | 23 | Parameters 24 | ---------- 25 | arg_name : `str` 26 | Name of data/map argument in function signature. 27 | output_to_map : `bool`, optional 28 | If `True` (the default), convert the function return to a map if a map 29 | is given as input. For this to work the decorated function must return 30 | an array where pixels have the same coordinates as the input map data. 31 | """ 32 | 33 | def decorate(f: Callable) -> Callable: 34 | sig = inspect.signature(f) 35 | if arg_name not in sig.parameters: 36 | msg = f"Could not find '{arg_name}' in function signature" 37 | raise RuntimeError(msg) 38 | 39 | @wraps(f) 40 | def inner(*args, **kwargs) -> np.ndarray | GenericMap: 41 | sig_bound = sig.bind(*args, **kwargs) 42 | map_arg = sig_bound.arguments[arg_name] 43 | if isinstance(map_arg, GenericMap): 44 | map_in = True 45 | sig_bound.arguments[arg_name] = map_arg.data 46 | elif isinstance(map_arg, np.ndarray): 47 | map_in = False 48 | else: 49 | msg = f"'{arg_name}' argument must be a sunpy map or numpy array (got type {type(map_arg)})" 50 | raise TypeError(msg) 51 | # Run decorated function 52 | array_out = f(*sig_bound.args, **sig_bound.kwargs) 53 | return Map(array_out, map_arg.meta) if map_in and output_to_map else array_out 54 | 55 | return inner 56 | 57 | return decorate 58 | -------------------------------------------------------------------------------- /sunkit_image/utils/noise.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module implements a series of functions for noise level estimation. 3 | """ 4 | 5 | import numpy as np 6 | from scipy.ndimage import correlate 7 | from scipy.stats import gamma 8 | from skimage.util import view_as_windows 9 | 10 | __all__ = ["conv2d_matrix", "noise_estimation", "noiselevel", "weak_texture_mask"] 11 | 12 | 13 | def noise_estimation(img, patchsize=7, decim=0, confidence=1 - 1e-6, iterations=3): 14 | """ 15 | Estimates the noise level of an image. 16 | 17 | Additive white Gaussian noise (AWGN) is a basic noise model used in Information Theory 18 | to mimic the effect of many random processes that occur in nature. 19 | 20 | Parameters 21 | ---------- 22 | img: `numpy.ndarray` 23 | Single Numpy image array. 24 | patchsize : `int`, optional 25 | Patch size, defaults to 7. 26 | decim : `int`, optional 27 | Decimation factor, defaults to 0. 28 | If you use large number, the calculation will be accelerated. 29 | confidence : `float`, optional 30 | Confidence interval to determine the threshold for the weak texture. 31 | In this algorithm, this value is usually set the value very close to one. 32 | Defaults to 0.99. 33 | iterations : `int`, optional 34 | Number of iterations, defaults to 3. 35 | 36 | Returns 37 | ------- 38 | `dict` 39 | A dictionary containing the estimated noise levels, ``nlevel``; threshold to extract weak texture 40 | patches at the last iteration, ``thresh``; number of extracted weak texture patches ``num`` and the 41 | weak texture mask, ``mask``. 42 | 43 | Examples 44 | -------- 45 | >>> import numpy as np 46 | >>> rng = np.random.default_rng(0) 47 | >>> noisy_image_array = rng.standard_normal((100, 100)) 48 | >>> estimate = noise_estimation(noisy_image_array, patchsize=11, iterations=10) 49 | >>> estimate["mask"] # doctest: +SKIP 50 | array([[1., 1., 1., ..., 1., 1., 0.], 51 | [1., 1., 1., ..., 1., 1., 0.], 52 | [1., 1., 1., ..., 1., 1., 0.], 53 | ..., 54 | [1., 1., 1., ..., 1., 1., 0.], 55 | [1., 1., 1., ..., 1., 1., 0.], 56 | [0., 0., 0., ..., 0., 0., 0.]]) 57 | >>> estimate["nlevel"] # doctest: +SKIP 58 | array([0.97398633]) 59 | >>> estimate["thresh"] # doctest: +SKIP 60 | array([164.21965135]) 61 | >>> estimate["num"] # doctest: +SKIP 62 | array([8100.]) 63 | 64 | References 65 | ---------- 66 | * Xinhao Liu, Masayuki Tanaka and Masatoshi Okutomi 67 | Noise Level Estimation Using Weak Textured Patches of a Single Noisy Image 68 | IEEE International Conference on Image Processing (ICIP), 2012. 69 | DOI: 10.1109/ICIP.2012.6466947 70 | 71 | * Xinhao Liu, Masayuki Tanaka and Masatoshi Okutomi 72 | Single-Image Noise Level Estimation for Blind Denoising Noisy Image 73 | IEEE Transactions on Image Processing, Vol.22, No.12, pp.5226-5237, December, 2013. 74 | DOI: 10.1109/TIP.2013.2283400 75 | """ 76 | try: 77 | img = np.array(img) 78 | except ValueError as e: 79 | msg = "Input image should be a numpy ndarray, or ndarray-compatible" 80 | raise TypeError(msg) from e 81 | 82 | try: 83 | patchsize = int(patchsize) 84 | except ValueError as e: 85 | msg = "patchsize must be an integer, or int-compatible" 86 | raise TypeError(msg) from e 87 | 88 | try: 89 | decim = int(decim) 90 | except ValueError as e: 91 | msg = "decim must be an integer, or int-compatible" 92 | raise TypeError(msg) from e 93 | 94 | try: 95 | confidence = float(confidence) 96 | except ValueError as e: 97 | msg = "confidence must be a float, or float-compatible, value between 0 and 1" 98 | raise TypeError(msg) from e 99 | 100 | if confidence < 0 or confidence > 1: 101 | msg = "confidence must be defined in the interval 0 <= confidence <= 1" 102 | raise ValueError(msg) 103 | 104 | try: 105 | iterations = int(iterations) 106 | except ValueError as e: 107 | msg = "iterations must be an integer, or int-compatible." 108 | raise TypeError(msg) from e 109 | 110 | nlevel, thresh, num = noiselevel(img, patchsize, decim, confidence, iterations) 111 | mask = weak_texture_mask(img, patchsize, thresh) 112 | 113 | return {"nlevel": nlevel, "thresh": thresh, "num": num, "mask": mask} 114 | 115 | 116 | def noiselevel(img, patchsize, decim, confidence, iterations): 117 | """ 118 | Calculates the noise level of the input array. 119 | 120 | Parameters 121 | ---------- 122 | img: `numpy.ndarray` 123 | Single Numpy image array. 124 | patchsize : `int`, optional 125 | Patch size, defaults to 7. 126 | decim : `int`, optional 127 | Decimation factor, defaults to 0. 128 | If you use large number, the calculation will be accelerated. 129 | confidence : `float`, optional 130 | Confidence interval to determine the threshold for the weak texture. 131 | In this algorithm, this value is usually set the value very close to one. 132 | Defaults to 0.99. 133 | iterations : `int`, optional 134 | Number of iterations, defaults to 3. 135 | 136 | Returns 137 | ------- 138 | `tuple` 139 | A tuple containing the estimated noise levels, threshold to extract weak texture 140 | patches at the last iteration, and number of extracted weak texture patches. 141 | """ 142 | if len(img.shape) < 3: 143 | img = np.expand_dims(img, 2) 144 | 145 | nlevel = np.ndarray(img.shape[2]) 146 | thresh = np.ndarray(img.shape[2]) 147 | num = np.ndarray(img.shape[2]) 148 | 149 | kh = np.expand_dims(np.expand_dims(np.array([-0.5, 0, 0.5]), 0), 2) 150 | imgh = correlate(img, kh, mode="nearest") 151 | imgh = imgh[:, 1 : imgh.shape[1] - 1, :] 152 | imgh = imgh * imgh 153 | 154 | kv = np.expand_dims(np.vstack(np.array([-0.5, 0, 0.5])), 2) 155 | imgv = correlate(img, kv, mode="nearest") 156 | imgv = imgv[1 : imgv.shape[0] - 1, :, :] 157 | imgv = imgv * imgv 158 | 159 | Dh = conv2d_matrix(np.squeeze(kh, 2), patchsize, patchsize) 160 | Dv = conv2d_matrix(np.squeeze(kv, 2), patchsize, patchsize) 161 | 162 | DD = np.transpose(Dh) @ Dh + np.transpose(Dv) @ Dv 163 | 164 | r = np.double(np.linalg.matrix_rank(DD)) 165 | Dtr = np.trace(DD) 166 | 167 | tau0 = gamma.ppf(confidence, r / 2, scale=(2 * Dtr / r)) 168 | 169 | for cha in range(img.shape[2]): 170 | X = view_as_windows(img[:, :, cha], (patchsize, patchsize)) 171 | X = X.reshape(int(X.size / patchsize**2), patchsize**2, order="F").transpose() 172 | 173 | Xh = view_as_windows(imgh[:, :, cha], (patchsize, patchsize - 2)) 174 | Xh = Xh.reshape( 175 | int(Xh.size / ((patchsize - 2) * patchsize)), 176 | ((patchsize - 2) * patchsize), 177 | order="F", 178 | ).transpose() 179 | 180 | Xv = view_as_windows(imgv[:, :, cha], (patchsize - 2, patchsize)) 181 | Xv = Xv.reshape( 182 | int(Xv.size / ((patchsize - 2) * patchsize)), 183 | ((patchsize - 2) * patchsize), 184 | order="F", 185 | ).transpose() 186 | 187 | Xtr = np.expand_dims(np.sum(np.concatenate((Xh, Xv), axis=0), axis=0), 0) 188 | 189 | if decim > 0: 190 | XtrX = np.transpose(np.concatenate((Xtr, X), axis=0)) 191 | XtrX = np.transpose(XtrX[XtrX[:, 0].argsort(),]) 192 | p = np.floor(XtrX.shape[1] / (decim + 1)) 193 | p = np.expand_dims(np.arange(0, p) * (decim + 1), 0) 194 | Xtr = XtrX[0, p.astype("int")] 195 | X = np.squeeze(XtrX[1 : XtrX.shape[1], p.astype("int")]) 196 | 197 | # noise level estimation 198 | tau = np.inf 199 | 200 | if X.shape[1] < X.shape[0]: 201 | sig2 = 0 202 | else: 203 | cov = (X @ np.transpose(X)) / (X.shape[1] - 1) 204 | d = np.flip(np.linalg.eig(cov)[0], axis=0) 205 | sig2 = d[0] 206 | 207 | for _ in range(1, iterations): 208 | # weak texture selection 209 | tau = sig2 * tau0 210 | p = Xtr < tau 211 | Xtr = Xtr[p] 212 | X = X[:, np.squeeze(p)] 213 | 214 | # noise level estimation 215 | if X.shape[1] < X.shape[0]: 216 | break 217 | 218 | cov = (X @ np.transpose(X)) / (X.shape[1] - 1) 219 | d = np.flip(np.linalg.eig(cov)[0], axis=0) 220 | sig2 = d[0] 221 | 222 | nlevel[cha] = np.sqrt(sig2) 223 | thresh[cha] = tau 224 | num[cha] = X.shape[1] 225 | 226 | # clean up 227 | img = np.squeeze(img) 228 | 229 | return nlevel, thresh, num 230 | 231 | 232 | def conv2d_matrix(H, rows, columns): 233 | """ 234 | Specialized 2D convolution matrix generation. 235 | 236 | Parameters 237 | ---------- 238 | H : `numpy.ndarray` 239 | Input matrix. 240 | rows : `numpy.ndarray` 241 | Rows in convolution matrix. 242 | columns : `numpy.ndarray` 243 | Columns in convolution matrix. 244 | 245 | Returns 246 | ------- 247 | T : `numpy.ndarray` 248 | The new convoluted matrix. 249 | """ 250 | s = np.shape(H) 251 | rows = int(rows) 252 | columns = int(columns) 253 | 254 | matr_row = rows - s[0] + 1 255 | matr_column = columns - s[1] + 1 256 | 257 | T = np.zeros([matr_row * matr_column, rows * columns]) 258 | 259 | k = 0 260 | for i in range(matr_row): 261 | for j in range(matr_column): 262 | for p in range(s[0]): 263 | start = (i + p) * columns + j 264 | T[k, start : start + s[1]] = H[p, :] 265 | 266 | k += 1 267 | return T 268 | 269 | 270 | def weak_texture_mask(img, patchsize, thresh): 271 | """ 272 | Calculates the weak texture mask. 273 | 274 | Parameters 275 | ---------- 276 | img: `numpy.ndarray` 277 | Single Numpy image array. 278 | patchsize : `int`, optional 279 | Patch size, defaults to 7. 280 | thresh: `numpy.ndarray` 281 | Threshold to extract weak texture patches at the last iteration. 282 | 283 | Returns 284 | ------- 285 | mask: `numpy.ndarray` 286 | Weak-texture mask. 0 and 1 represent non-weak-texture and weak-texture regions, respectively. 287 | """ 288 | if img.ndim < 3: 289 | img = np.expand_dims(img, 2) 290 | 291 | kh = np.expand_dims(np.transpose(np.vstack(np.array([-0.5, 0, 0.5]))), 2) 292 | imgh = correlate(img, kh, mode="nearest") 293 | imgh = imgh[:, 1 : imgh.shape[1] - 1, :] 294 | imgh = imgh * imgh 295 | 296 | kv = np.expand_dims(np.vstack(np.array([-0.5, 0, 0.5])), 1) 297 | imgv = correlate(img, kv, mode="nearest") 298 | imgv = imgv[1 : imgv.shape[0] - 1, :, :] 299 | imgv = imgv * imgv 300 | 301 | s = img.shape 302 | msk = np.zeros_like(img) 303 | 304 | for cha in range(s[2]): 305 | m = view_as_windows(img[:, :, cha], (patchsize, patchsize)) 306 | m = np.zeros_like(m.reshape(int(m.size / patchsize**2), patchsize**2, order="F").transpose()) 307 | 308 | Xh = view_as_windows(imgh[:, :, cha], (patchsize, patchsize - 2)) 309 | Xh = Xh.reshape( 310 | int(Xh.size / ((patchsize - 2) * patchsize)), 311 | ((patchsize - 2) * patchsize), 312 | order="F", 313 | ).transpose() 314 | 315 | Xv = view_as_windows(imgv[:, :, cha], (patchsize - 2, patchsize)) 316 | Xv = Xv.reshape( 317 | int(Xv.size / ((patchsize - 2) * patchsize)), 318 | ((patchsize - 2) * patchsize), 319 | order="F", 320 | ).transpose() 321 | 322 | Xtr = np.expand_dims(np.sum(np.concatenate((Xh, Xv), axis=0), axis=0), 0) 323 | 324 | p = Xtr < thresh[cha] 325 | ind = 0 326 | 327 | for col in range(s[1] - patchsize + 1): 328 | for row in range(s[0] - patchsize + 1): 329 | if p[:, ind]: 330 | msk[row : row + patchsize - 1, col : col + patchsize - 1, cha] = 1 331 | ind = ind + 1 332 | 333 | # clean up 334 | img = np.squeeze(img) 335 | 336 | return np.squeeze(msk) 337 | -------------------------------------------------------------------------------- /sunkit_image/utils/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunpy/sunkit-image/daf283f0745437bdf07a39037e0d37f1003b8b86/sunkit_image/utils/tests/__init__.py -------------------------------------------------------------------------------- /sunkit_image/utils/tests/test_noise.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from skimage import data 4 | 5 | import sunkit_image.utils.noise as nf 6 | 7 | 8 | @pytest.fixture() 9 | def img(): 10 | return data.camera() 11 | 12 | 13 | def test_conv2d_matrix_size(img): 14 | tt = nf.conv2d_matrix(img, 11.0, 7.0) 15 | assert tt.shape == ((11 - img.shape[0] + 1) * (7 - img.shape[1] + 1), 11.0 * 7.0) 16 | 17 | 18 | def test_noiselevel(img): 19 | noise_levels = np.array([5.0, 10.0, 20.0, 42.0]) 20 | n_levels = np.zeros_like(noise_levels) 21 | n_patches = np.zeros_like(noise_levels) 22 | 23 | rng = np.random.default_rng() 24 | for n in range(noise_levels.size): 25 | noise = img + rng.standard_normal(img.shape) * noise_levels[n] 26 | output = nf.noise_estimation(noise, patchsize=11, iterations=5) 27 | n_levels[n] = output["nlevel"][0] 28 | n_patches[n] = output["num"][0] 29 | 30 | assert np.abs(1 - n_levels.all() / noise_levels.all()) < 0.1 31 | assert all(n_patches > 10000.0) 32 | 33 | 34 | def test_weak_texture_mask(img): 35 | noise_levels = 5 36 | rng = np.random.default_rng() 37 | noise = img + rng.standard_normal(img.shape) * noise_levels 38 | output = nf.noise_estimation(noise, patchsize=11, iterations=5) 39 | 40 | assert np.sum(output["mask"]) / output["mask"].size < 1.0 41 | -------------------------------------------------------------------------------- /sunkit_image/utils/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | import astropy.units as u 7 | from astropy.tests.helper import assert_quantity_allclose 8 | 9 | import sunkit_image.utils as utils 10 | from sunkit_image import asda 11 | from sunkit_image.data.test import get_test_filepath 12 | 13 | 14 | def test_equally_spaced_bins(): 15 | # test the default 16 | esb = utils.equally_spaced_bins() 17 | assert esb.shape == (2, 100) 18 | assert esb[0, 0] == 1.0 19 | assert esb[1, 0] == 1.01 20 | assert esb[0, 99] == 1.99 21 | assert esb[1, 99] == 2.00 22 | # Bins are 0.015 wide 23 | esb2 = utils.equally_spaced_bins(inner_value=0.5) 24 | assert esb2.shape == (2, 100) 25 | assert esb2[0, 0] == 0.5 26 | assert esb2[1, 0] == 0.515 27 | assert esb2[0, 99] == 1.985 28 | assert esb2[1, 99] == 2.00 29 | # Bins are 0.2 wide 30 | esb2 = utils.equally_spaced_bins(outer_value=3.0) 31 | assert esb2.shape == (2, 100) 32 | assert esb2[0, 0] == 1.0 33 | assert esb2[1, 0] == 1.02 34 | assert esb2[0, 99] == 2.98 35 | assert esb2[1, 99] == 3.00 36 | # Bins are 0.01 wide 37 | esb2 = utils.equally_spaced_bins(nbins=1000) 38 | assert esb2.shape == (2, 1000) 39 | assert esb2[0, 0] == 1.0 40 | assert esb2[1, 0] == 1.001 41 | assert esb2[0, 999] == 1.999 42 | assert esb2[1, 999] == 2.000 43 | # The radii have the correct relative sizes 44 | with pytest.raises(ValueError, match="The inner value must be strictly less than the outer value."): 45 | utils.equally_spaced_bins(inner_value=1.0, outer_value=1.0) 46 | with pytest.raises(ValueError, match="The inner value must be strictly less than the outer value."): 47 | utils.equally_spaced_bins(inner_value=1.5, outer_value=1.0) 48 | # The number of bins is strictly greater than 0 49 | with pytest.raises(ValueError, match="The number of bins must be strictly greater than 0."): 50 | utils.equally_spaced_bins(nbins=0) 51 | 52 | 53 | def test_bin_edge_summary(): 54 | esb = utils.equally_spaced_bins() 55 | center = utils.bin_edge_summary(esb, "center") 56 | assert center.shape == (100,) 57 | assert center[0] == 1.005 58 | assert center[99] == 1.995 59 | left = utils.bin_edge_summary(esb, "left") 60 | assert left.shape == (100,) 61 | assert left[0] == 1.0 62 | assert left[99] == 1.99 63 | right = utils.bin_edge_summary(esb, "right") 64 | assert right.shape == (100,) 65 | assert right[0] == 1.01 66 | assert right[99] == 2.0 67 | # Correct selection of summary type 68 | with pytest.raises(ValueError, match='Keyword "binfit" must have value "center", "left" or "right"'): 69 | utils.bin_edge_summary(esb, "should raise the error") 70 | # The correct shape of bin edges are passed in 71 | with pytest.raises(ValueError, match="The bin edges must be two-dimensional with shape \\(2, nbins\\)"): 72 | utils.bin_edge_summary(np.arange(0, 10), "center") 73 | with pytest.raises(ValueError, match="The bin edges must be two-dimensional with shape \\(2, nbins\\)"): 74 | utils.bin_edge_summary(np.zeros((3, 4)), "center") 75 | 76 | 77 | @pytest.mark.remote_data() 78 | def test_find_pixel_radii(aia_171): 79 | if isinstance(aia_171, np.ndarray): 80 | pytest.skip("This test is not compatible with numpy arrays") 81 | # The known maximum radius 82 | known_maximum_pixel_radius = 1.84183121 83 | # Calculate the pixel radii 84 | pixel_radii = utils.find_pixel_radii(aia_171) 85 | # The shape of the pixel radii is the same as the input map 86 | assert pixel_radii.shape[0] == int(aia_171.dimensions[0].value) 87 | assert pixel_radii.shape[1] == int(aia_171.dimensions[1].value) 88 | # Make sure the unit is solar radii 89 | assert pixel_radii.unit == u.R_sun 90 | # Make sure the maximum 91 | assert_quantity_allclose((np.max(pixel_radii)).value, known_maximum_pixel_radius) 92 | # Test that the new scale is used 93 | pixel_radii = utils.find_pixel_radii(aia_171, scale=2 * aia_171.rsun_obs) 94 | assert_quantity_allclose(np.max(pixel_radii).value, known_maximum_pixel_radius / 2) 95 | 96 | 97 | @pytest.mark.remote_data() 98 | def test_get_radial_intensity_summary(aia_171): 99 | if isinstance(aia_171, np.ndarray): 100 | pytest.skip("This test is not compatible with numpy arrays") 101 | radial_bin_edges = u.Quantity(utils.equally_spaced_bins(inner_value=1, outer_value=1.5)) * u.R_sun 102 | summary = np.mean 103 | map_r = utils.find_pixel_radii(aia_171, scale=aia_171.rsun_obs).to(u.R_sun) 104 | nbins = radial_bin_edges.shape[1] 105 | lower_edge = [map_r > radial_bin_edges[0, i].to(u.R_sun) for i in range(nbins)] 106 | upper_edge = [map_r < radial_bin_edges[1, i].to(u.R_sun) for i in range(nbins)] 107 | with warnings.catch_warnings(): 108 | # We want to ignore RuntimeWarning: Mean of empty slice 109 | warnings.simplefilter("ignore", category=RuntimeWarning) 110 | expected = np.asarray([summary(aia_171.data[lower_edge[i] * upper_edge[i]]) for i in range(nbins)]) 111 | assert np.allclose(utils.get_radial_intensity_summary(aia_171, radial_bin_edges=radial_bin_edges), expected) 112 | 113 | 114 | def test_calculate_gamma(): 115 | vel_file = get_test_filepath("asda_vxvy.npz") 116 | get_test_filepath("asda_correct.npz") 117 | vxvy = np.load(vel_file, allow_pickle=True) 118 | vx = vxvy["vx"] 119 | vy = vxvy["vy"] 120 | vxvy["data"] 121 | shape = vx.shape 122 | r = 3 123 | index = np.array([[i, j] for i in np.arange(r, shape[0] - r) for j in np.arange(r, shape[1] - r)]) 124 | vel = asda.generate_velocity_field(vx, vy, index[1], index[0], r) 125 | pm = np.array( 126 | [[i, j] for i in np.arange(-r, r + 1) for j in np.arange(-r, r + 1)], 127 | dtype=float, 128 | ) 129 | N = (2 * r + 1) ** 2 130 | pnorm = np.linalg.norm(pm, axis=1) 131 | cross = utils.utils._cross2d(pm, vel[..., 0]) 132 | vel_norm = np.linalg.norm(vel[..., 0], axis=2) 133 | sint = cross / (pnorm * vel_norm + 1e-10) 134 | expected = np.nansum(sint, axis=1) / N 135 | assert np.allclose(expected, utils.calculate_gamma(pm, vel[..., 0], pnorm, N)) 136 | 137 | 138 | def test_remove_duplicate(): 139 | rng = np.random.default_rng() 140 | test_data = rng.random(size=(5, 2)) 141 | data_ = np.append(test_data, [test_data[0]], axis=0) 142 | expected = np.delete(data_, -1, 0) 143 | with pytest.raises(ValueError, match="Polygon must be defined as a n x 2 array!"): 144 | utils.remove_duplicate(data_.T) 145 | assert (utils.remove_duplicate(data_) == expected).all() 146 | 147 | 148 | def test_points_in_poly(): 149 | test_data = np.asarray([[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 0]]) 150 | with pytest.raises(ValueError, match="Polygon must be defined as a n x 2 array!"): 151 | utils.points_in_poly(test_data.T) 152 | expected = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]] 153 | assert expected == utils.points_in_poly(test_data) 154 | 155 | 156 | def test_reform_2d(): 157 | test_data = np.asarray([[0, 0], [1, 2], [3, 4]]) 158 | with pytest.raises(TypeError, match="Parameter 'factor' must be an integer!"): 159 | utils.reform2d(test_data, 2.2) 160 | with pytest.raises(ValueError, match="Input array must be 2d!"): 161 | utils.reform2d(test_data[0], 2) 162 | expected = np.asarray( 163 | [ 164 | [0.0, 0.0, 0.0, 0.0], 165 | [0.5, 0.75, 1.0, 1.0], 166 | [1.0, 1.5, 2.0, 2.0], 167 | [2.0, 2.5, 3.0, 3.0], 168 | [3.0, 3.5, 4.0, 4.0], 169 | [3.0, 3.5, 4.0, 4.0], 170 | ], 171 | ) 172 | assert np.allclose(utils.reform2d(test_data, 2), expected) 173 | -------------------------------------------------------------------------------- /sunkit_image/version.py: -------------------------------------------------------------------------------- 1 | # NOTE: First try _dev.scm_version if it exists and setuptools_scm is installed 2 | # This file is not included in wheels/tarballs, so otherwise it will 3 | # fall back on the generated _version module. 4 | try: 5 | try: 6 | from ._dev.scm_version import version 7 | except ImportError: 8 | from ._version import version # type: ignore[no-redef] 9 | except Exception: # NOQA: BLE001 10 | import warnings 11 | 12 | warnings.warn( 13 | f'could not determine {__name__.split(".")[0]} package version; this indicates a broken installation' 14 | ) 15 | del warnings 16 | 17 | version = '0.0.0' 18 | 19 | __all__ = ["version"] 20 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | min_version = 4.0 3 | envlist = 4 | py{310,311,312}{,-figure} 5 | py312-devdeps 6 | py310-oldestdeps 7 | codestyle 8 | build_docs 9 | requires = 10 | setuptools 11 | pip 12 | tox-pypi-filter>=0.14 13 | 14 | [testenv] 15 | pypi_filter = https://raw.githubusercontent.com/sunpy/sunpy/main/.test_package_pins.txt 16 | # Run the tests in a temporary directory to make sure that we don't import 17 | # the package from the source tree 18 | allowlist_externals = 19 | /bin/bash 20 | /usr/bin/bash 21 | change_dir = .tmp/{envname} 22 | description = 23 | run tests 24 | oldestdeps: with the oldest supported version of key dependencies 25 | devdeps: with the latest developer version of key dependencies 26 | online: that require remote data 27 | pass_env = 28 | # A variable to tell tests we are on a CI system 29 | CI 30 | # Custom compiler locations (such as ccache) 31 | CC 32 | # Location of locales (needed by sphinx on some systems) 33 | LOCALE_ARCHIVE 34 | # If the user has set a LC override we should follow it 35 | LC_ALL 36 | set_env = 37 | MPLBACKEND = agg 38 | devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple https://pypi.anaconda.org/scientific-python-nightly-wheels/simple 39 | COLUMNS = 180 40 | SUNPY_SAMPLEDIR = {env:SUNPY_SAMPLEDIR:{toxinidir}/.tox/{envname}/sample_data/} 41 | PARFIVE_HIDE_PROGRESS = True 42 | deps = 43 | # For packages which publish nightly wheels this will pull the latest nightly 44 | devdeps: numpy>=0.0.dev0 45 | devdeps: sunpy>=0.0.dev0 46 | devdeps: scikit_image>=0.0.dev0 47 | # Packages without nightly wheels will be built from source like this 48 | # devdeps: git+https://github.com/scikit-image/scikit-image.git 49 | # Handle minimum dependencies via minimum_dependencies 50 | oldestdeps: minimum_dependencies 51 | # Figure tests need a tightly controlled environment 52 | figure-!devdeps: astropy==6.1.0 53 | figure-!devdeps: matplotlib==3.9.0 54 | figure-!devdeps: sunpy==6.0.0 55 | # The following indicates which extras_require will be installed 56 | extras = 57 | all 58 | tests 59 | commands_pre = 60 | oldestdeps: minimum_dependencies sunkit_image --filename requirements-min.txt 61 | oldestdeps: pip install -r requirements-min.txt 62 | oldestdeps: python -c "import astropy.time; astropy.time.update_leap_seconds()" 63 | pip freeze --all --no-input 64 | commands = 65 | # To amend the pytest command for different factors you can add a line 66 | # which starts with a factor like `online: --remote-data=any \` 67 | # If you have no factors which require different commands this is all you need: 68 | !figure: pip freeze --all --no-input 69 | # We have online figure tests we don't want to run. 70 | figure: /bin/bash -c "mkdir -p ./figure_test_images; python -c 'import matplotlib as mpl; print(mpl.ft2font.__file__, mpl.ft2font.__freetype_version__, mpl.ft2font.__freetype_build_type__)' > ./figure_test_images/figure_version_info.txt" 71 | figure: /bin/bash -c "pip freeze --all --no-input >> ./figure_test_images/figure_version_info.txt" 72 | figure: /bin/bash -c "cat ./figure_test_images/figure_version_info.txt" 73 | figure: python -c "import sunkit_image.tests.helpers as h; print(h.get_hash_library_name())" 74 | pytest \ 75 | -vvv \ 76 | -r fEs \ 77 | --pyargs sunkit_image \ 78 | --cov-report=xml \ 79 | --cov=sunkit_image \ 80 | --cov-config={toxinidir}/.coveragerc \ 81 | online: --remote-data=any \ 82 | {toxinidir}/docs \ 83 | figure: -m "mpl_image_compare" \ 84 | figure: --mpl \ 85 | figure: --remote-data=any \ 86 | figure: --mpl-generate-summary=html \ 87 | figure: --mpl-baseline-path=https://raw.githubusercontent.com/sunpy/sunpy-figure-tests/sunkit-image-main/figures/{envname} \ 88 | {posargs} 89 | 90 | [testenv:codestyle] 91 | pypi_filter = 92 | skip_install = true 93 | description = Run all style and file checks with pre-commit 94 | deps = 95 | pre-commit 96 | commands = 97 | pre-commit install-hooks 98 | pre-commit run --color always --all-files --show-diff-on-failure 99 | 100 | [testenv:build_docs] 101 | change_dir = docs 102 | description = Invoke sphinx-build to build the HTML docs 103 | extras = 104 | all 105 | docs 106 | commands = 107 | pip freeze --all --no-input 108 | sphinx-build --color -W --keep-going -b html -d _build/.doctrees . _build/html {posargs} 109 | python -c 'import pathlib; print("Documentation available under file://\{0\}".format(pathlib.Path(r"{toxinidir}") / "docs" / "_build" / "index.html"))' 110 | --------------------------------------------------------------------------------