├── .codespellignore
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── documentation.md
│ └── feature_request.md
├── dependabot.yml
├── pull_request_template.md
└── workflows
│ ├── ci.yml
│ ├── ci_fixed.yaml
│ └── python-publish.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── CHANGELOG.md
├── CITATION.cff
├── CODE_OF_CONDUCT.md
├── LICENSE
├── README.md
├── docs
├── Makefile
├── make.bat
└── source
│ ├── cite.rst
│ ├── concepts.rst
│ ├── conf.py
│ ├── development.rst
│ ├── example.ipynb
│ ├── help.rst
│ ├── images
│ ├── dash.png
│ ├── estimated_corrected_psfs.png
│ ├── estimated_psfs.png
│ ├── model.png
│ ├── star_distribution.png
│ └── transfer_kernels.png
│ └── index.rst
├── model_example.png
├── pyproject.toml
├── regularizepsf
├── __init__.py
├── builder.py
├── exceptions.py
├── psf.py
├── transform.py
├── util.py
└── visualize.py
├── requirements.txt
└── tests
├── __init__.py
├── data
└── compressed_dash.fits
├── helper.py
├── test_builder.py
├── test_psf.py
├── test_transform.py
└── test_util.py
/.codespellignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/.codespellignore
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: jmbhughes
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Additional context**
32 | Add any other context about the problem here.
33 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Documentation
3 | about: Improve documentation
4 | title: ''
5 | labels: documentation
6 | assignees: jmbhughes
7 | ---
8 |
9 | **How could we improve the documentation?**
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: jmbhughes
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "pip" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## PR summary
2 |
3 | *What does this PR introduce?*
4 |
5 | ## Todos
6 |
7 | *Please list tasks to complete this PR.*
8 |
9 | - [ ] implement new feature or fix bug
10 | - [ ] include tests
11 | - [ ] update associated documentation website entry
12 |
13 | ## Test plan
14 |
15 | *How does this PR verify the code works correctly?*
16 |
17 | ## Breaking changes
18 |
19 | *Document any breaking changes from this PR.*
20 |
21 | ## Related Issues
22 |
23 | *Link the issues related to this PR.*
24 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: CI
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | pull_request:
11 | branches:
12 | - main
13 | schedule:
14 | - cron: '0 0 * * MON'
15 |
16 | jobs:
17 | build:
18 |
19 | runs-on: ubuntu-latest
20 | strategy:
21 | fail-fast: false
22 | matrix:
23 | python-version: ["3.10", "3.11", "3.12", "3.13"]
24 |
25 | steps:
26 | - uses: actions/checkout@v3
27 | - name: Set up Python ${{ matrix.python-version }}
28 | uses: actions/setup-python@v3
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 | - name: Install dependencies
32 | run: |
33 | python -m pip install --upgrade pip
34 | pip install -e ".[test]"
35 | - name: Lint with ruff
36 | run: |
37 | ruff check .
38 | - name: Run codespell on source code
39 | uses: codespell-project/actions-codespell@v2
40 | with:
41 | skip: '*.fits,*.ipynb'
42 | ignore_words_file: .codespellignore
43 | path: punchbowl
44 | - name: Run codespell on documentation
45 | uses: codespell-project/actions-codespell@v2
46 | with:
47 | skip: '*.fits,*.ipynb'
48 | ignore_words_file: .codespellignore
49 | path: docs/source
50 | - name: Test with pytest
51 | run: |
52 | pytest --cov=regularizepsf/
53 | - name: Upload coverage to Codecov
54 | uses: codecov/codecov-action@v4
55 | env:
56 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
57 | with:
58 | fail_ci_if_error: true
59 | verbose: true
60 |
--------------------------------------------------------------------------------
/.github/workflows/ci_fixed.yaml:
--------------------------------------------------------------------------------
1 | name: CI pinned server environment
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 | schedule:
11 | - cron: '0 0 * * MON'
12 | workflow_dispatch:
13 |
14 | jobs:
15 | build:
16 |
17 | runs-on: ubuntu-latest
18 | strategy:
19 | fail-fast: false
20 | matrix:
21 | python-version: ["3.11"]
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python ${{ matrix.python-version }}
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: ${{ matrix.python-version }}
29 | - name: Grab requirements.txt
30 | run: |
31 | wget https://raw.githubusercontent.com/punch-mission/punch-mission/refs/heads/main/requirements.txt
32 | - name: Install dependencies
33 | run: |
34 | python -m pip install --upgrade pip
35 | pip install -r requirements.txt
36 | pip install -e ".[test]"
37 | - name: Test with pytest
38 | run: |
39 | pytest --cov=regularizepsf/
40 | - name: Upload coverage to Codecov
41 | uses: codecov/codecov-action@v4
42 | env:
43 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
44 | with:
45 | fail_ci_if_error: true
46 | verbose: true
47 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 | runs-on: ubuntu-latest
21 | permissions:
22 | id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
23 | steps:
24 | - uses: actions/checkout@v4
25 | - name: Set up Python
26 | uses: actions/setup-python@v5
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build -s
35 | - name: Publish package distributions to PyPI
36 | uses: pypa/gh-action-pypi-publish@release/v1
37 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # Custom
132 | .DS_Store
133 | .vscode
134 | .idea
135 | regularizepsf/helper.html
136 | *.c
137 | /venv-docs/
138 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | exclude: "docs/"
2 | repos:
3 | # This should be before any formatting hooks like isort
4 | - repo: https://github.com/astral-sh/ruff-pre-commit
5 | rev: "v0.11.4"
6 | hooks:
7 | - id: ruff
8 | args: ["--fix"]
9 | - repo: https://github.com/PyCQA/isort
10 | rev: 6.0.1
11 | hooks:
12 | - id: isort
13 | exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*|extern.*|sunpy/extern)$"
14 | - repo: https://github.com/codespell-project/codespell
15 | rev: v2.4.1
16 | hooks:
17 | - id: codespell
18 | files: ^.*\.(py|c|h|md|rst|yml)$
19 | args: [ "--ignore-words", ".codespellignore" ]
20 | - repo: https://github.com/pre-commit/pre-commit-hooks
21 | rev: v5.0.0
22 | hooks:
23 | - id: check-ast
24 | - id: check-case-conflict
25 | - id: trailing-whitespace
26 | exclude: ".*(.fits|.fts|.fit|.header|.txt)$"
27 | - id: check-yaml
28 | - id: debug-statements
29 | - id: check-added-large-files
30 | args: ['--enforce-all','--maxkb=15000']
31 | - id: end-of-file-fixer
32 | exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*|.json)$|^CITATION.rst$"
33 | - id: mixed-line-ending
34 | exclude: ".*(.fits|.fts|.fit|.header|.txt|tca.*)$"
35 | ci:
36 | autofix_prs: false
37 | autoupdate_schedule: "quarterly"
38 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the OS, Python version and other tools you might need
9 | build:
10 | os: ubuntu-22.04
11 | tools:
12 | python: "3.12"
13 |
14 |
15 | # Build documentation in the "docs/" directory with Sphinx
16 | sphinx:
17 | configuration: docs/source/conf.py
18 |
19 | # Optionally build your docs in additional formats such as PDF and ePub
20 | formats:
21 | - pdf
22 | - epub
23 |
24 | # Optional but recommended, declare the Python requirements required
25 | # to build your documentation
26 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
27 | python:
28 | install:
29 | - method: pip
30 | extra_requirements:
31 | - all
32 | - docs
33 | path: .
34 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | [This is available in GitHub](https://github.com/punch-mission/regularizepsf/releases?page=1)
4 |
5 | ## Latest: unreleased
6 |
7 | * specify codecov path by @jmbhughes in https://github.com/punch-mission/regularizepsf/pull/234
8 | * allow a single star mask by @svank in https://github.com/punch-mission/regularizepsf/pull/236
9 |
10 | ## Version 1.0.2: Nov 16, 2024
11 |
12 | - fix broken doc links by @jmbhughes in #228
13 | - Convert str to Path if needed by @jmbhughes in #230
14 |
15 | ## Version 1.0.1: Nov 2, 2024
16 |
17 | - add FITS saving and loading by @jmbhughes in #224
18 | - add visualization for kernels by @jmbhughes in #225
19 |
20 | ## Version 1.0.0: Nov 2, 2024
21 |
22 | - This version moves away from Cython in favor of batching FFTs in SciPy. It's much faster! Plus, you can configure it to run on the GPU (more on that soon). The interface has been completely reworked to a simpler and more elegant solution.
23 | - fix version in docs by @jmbhughes in #221
24 | - Full rewrite for speed and logical simplicity by @jmbhughes in #209
25 |
26 | ## Version 0.4.2: Nov 1, 2024
27 |
28 | - Create .readthedocs.yaml by @jmbhughes in #220
29 |
30 | ## Version 0.4.1: Oct 30, 2024
31 |
32 | - Creates a pinned environment CI by @jmbhughes in #216
33 | - add py3.13 to ci by @jmbhughes in #218
34 | - add py3.13 support with new release by @jmbhughes in #219
35 |
36 | ## Version 0.4.0: Aug 18, 2024
37 |
38 | - Create CITATION.cff by @jmbhughes in #202
39 | - Add variable PSF to variable PSF transforms by @jmbhughes in #203
40 | - bump version by @jmbhughes in #205
41 |
42 | ## Version 0.3.6: Aug 8, 2024
43 |
44 | - This release fixes a mistake in 0.3.5 where the package was not registered properly.
45 |
46 | ## Version 0.3.5: Aug 7, 2024
47 |
48 | - replace requirements_dev.txt in development guide by @jmbhughes in #188
49 | - fix class docstrings and resolve #189 by @jmbhughes in #194
50 | - fix python 3.12 CI failure, enable numpy 2.0 by @jmbhughes in #200
51 |
52 | ## Version 0.3.4: Jun 18, 2024
53 |
54 | - pin versions of numpy by @jmbhughes in #185
55 |
56 | ## Version 0.3.3: Jun 3, 2024
57 |
58 | - Bumps Python version to 3.10 by @jmbhughes in #177
59 |
60 | ## Version 0.3.2: Apr 11, 2024
61 |
62 | - Reverts required Python version to 3.9 instead of 3.10.
63 | - revert-python-bump by @jmbhughes in #165
64 |
65 | ## Version 0.3.1: Apr 2, 2024
66 |
67 | - switching docs to Sphinx by @jmbhughes in #110
68 | - Update docs and readme by @github-actions in #111
69 | - Fix docs build by @jmbhughes in #112
70 | - Fix docs build by @jmbhughes in #113
71 | - Update astropy requirement from ~=5.3 to ~=6.0 by @dependabot in #114
72 | - Revert "Update astropy requirement from ~=5.3 to ~=6.0 (#114)" by @jmbhughes in #117
73 | - Dependabot/pip/astropy approx eq 6.0 by @jmbhughes in #118
74 | - Update astropy requirement from ~=5.3 to ~=6.0 by @dependabot in #119
75 | - Adds pre-commit, updates requirements by @jmbhughes in #121
76 | - Update citations by @jmbhughes in #122
77 | - Finalize pre-commit by @jmbhughes in #123
78 | - Adds and repairs pre-commit, removes pickling of FunctionalCorrector by @github-actions in #124
79 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #126
80 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #128
81 | - Update pre-commit by @github-actions in #127
82 | - Add paper link by @jmbhughes in #129
83 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #130
84 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #132
85 | - Updates paper link, updates pre-commit by @github-actions in #131
86 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #133
87 | - Update scipy requirement from ~=1.11 to ~=1.12 by @dependabot in #134
88 | - Weekly merge to main by @github-actions in #135
89 | - links to Zenodo for software citation by @jmbhughes in #137
90 | - Updates citation by @github-actions in #138
91 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #139
92 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #140
93 | - Switch to gnu lgplv3 license by @jmbhughes in #142
94 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #143
95 | - update license, create PR template by @jmbhughes in #144
96 | - Update README.md by @jmbhughes in #145
97 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #146
98 | - Delete .github/workflows/monthly.yaml by @jmbhughes in #147
99 | - Update ci.yml by @jmbhughes in #148
100 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #150
101 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #151
102 | - fix link in readme by @jmbhughes in #152
103 | - adds notes about development guide by @jmbhughes in #153
104 | - [pre-commit.ci] pre-commit autoupdate by @pre-commit-ci in #154
105 | - updates release mechanism by @jmbhughes in #155
106 | - updates release mechanism by @jmbhughes in #157
107 |
108 |
109 | ## Version 0.2.3: Nov 2, 2023
110 |
111 | ### Summary
112 |
113 | - Versions of dependencies updates
114 | - Weekly PR automation created
115 | - Matplotlib tests now run properly
116 | - Citation updated
117 |
118 | ### What's Changed
119 |
120 | - Bump cython from 3.0.0 to 3.0.2 by @dependabot in #67
121 | - Bump scikit-image from 0.19.3 to 0.21.0 by @dependabot in #66
122 | - Bump astropy from 5.3.1 to 5.3.3 by @dependabot in #65
123 | - relax version pins to ~= instead of == by @jmbhughes in #77
124 | - adds pytest-mpl to requirements_dev.txt by @jmbhughes in #79
125 | - Update ci.yml by @jmbhughes in #81
126 | - relax version pins, fix mpl tests by @jmbhughes in #78
127 | - Update matplotlib requirement from ~=3.0 to ~=3.8 by @dependabot in #85
128 | - Update scipy requirement from ~=1.10 to ~=1.11 by @dependabot in #84
129 | - Update h5py requirement from ~=3.9 to ~=3.10 by @dependabot in #82
130 | - Update numpy requirement from ~=1.25 to ~=1.26 by @dependabot in #83
131 | - Updates dependency versions by @jmbhughes in #90
132 | - adds code of conduct link by @jmbhughes in #92
133 | - Create weeklypr.yaml by @jmbhughes in #95
134 | - Adds weekly PR, Updates Code of Conduct by @jmbhughes in #96
135 | - Weekly pr fix by @jmbhughes in #97
136 | - Add weekly PR by @jmbhughes in #98
137 | - updates citation, schedules weekly PR by @github-actions in #107
138 | - increment version by @jmbhughes in #108
139 |
140 | ## Version 0.2.2: Sep 22, 2023
141 |
142 | - Updates citation by @jmbhughes in #50
143 | - Create dependabot.yml by @jmbhughes in #51
144 | - Bump lmfit from 1.0.3 to 1.2.2 by @dependabot in #56
145 | - Bump cython from 0.29.32 to 3.0.0 by @dependabot in #55
146 | - Bump astropy from 5.1.1 to 5.3.1 by @dependabot in #54
147 | - Drop deepdish for h5py by @jmbhughes in #59
148 | - Normalize patches by the star-center value rather than the patch maximum by @svank in #60
149 | - increment version by @jmbhughes in #64
150 |
151 | ## Version 0.2.1: Jul 16, 2023
152 |
153 | - Update cite.md by @taniavsn in #33
154 | - Update ci by @jmbhughes in #36
155 | - resolves #39 by @jmbhughes in #40
156 | - docs: fix typo by @sumanchapai in #42
157 | - Update requirements.txt by @jmbhughes in #43
158 | - Create python-publish.yml by @jmbhughes in #44
159 | - Update python-publish.yml by @jmbhughes in #45
160 |
161 | ## Version 0.2.0: Apr 21, 2023
162 |
163 | - This release provides new visualization utilities by @svank. It also fixes some small bugs and improves the speed of model calculation.
164 |
165 | - added example code of conduct by @jmbhughes in #7
166 | - Allow passing a custom data loader to find_stars_and_average by @svank in #9
167 | - Wait to pad star cutouts until after averaging by @svank in #15
168 | - Round star coordinates before converting to array coords by @svank in #18
169 | - Avoid numpy dtype deprecation warnings by @svank in #22
170 | - Align interpolation points to data points by @svank in #23
171 | - Add percentile averaging mode by @svank in #20
172 | - Use tmp_path for temp file in test by @svank in #24
173 | - Automatically normalize all PSFs when creating ArrayCorrector by @svank in #28
174 | - Support providing masks for star-finding by @svank in #29
175 | - Take stellar cutouts from the BG-subtracted image by @svank in #19
176 | - updates citation by @jmbhughes in #31
177 | - Visualization utilities by @svank in #17
178 |
179 | ## Version 0.1.0: Feb 5, 2023
180 |
181 | - removes gpu option, adds simulate_observation by @jmbhughes in #4
182 | - fixes major bug when extracting stars and building a model
183 |
184 | ## Version 0.0.3: Dec 28, 2022
185 |
186 | Adds significantly more tests and documentation.
187 |
188 | ## Version 0.0.2: Dec 2, 2022
189 |
190 | Prerelease
191 |
192 | ## Version 0.0.1: Dec 2, 2022
193 |
194 | Prerelease
195 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Hughes"
5 | given-names: "J. Marcus"
6 | orcid: "https://orcid.org/0000-0003-3410-7650"
7 | - family-names: "Van Kooten"
8 | given-names: "Sam"
9 | orcid: "https://orcid.org/0000-0002-4472-8517"
10 | - family-names: "Varesano"
11 | given-names: "Tania"
12 | orcid: "https://orcid.org/0000-0003-0256-9295"
13 | - family-names: "Chapai"
14 | given-names: "Suman"
15 | - family-names: "DeForest"
16 | given-names: "Craig"
17 | orcid: "https://orcid.org/0000-0002-7164-2786"
18 | - family-names: "Seaton"
19 | given-names: "Daniel"
20 | orcid: "https://orcid.org/0000-0002-0494-2025"
21 | title: "regularizepsf"
22 | version: 1.0.2
23 | doi: 10.5281/zenodo.7392170
24 | date-released: 2024-11-02
25 | url: "https://github.com/punch-mission/regularizepsf"
26 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | [Please see our mission-wide code of conduct here.](https://github.com/punch-mission/punch-mission/blob/main/CODE_OF_CONDUCT.md)
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2024 PUNCH Science Operations Center
2 |
3 | This software may be used, modified, and distributed under the terms
4 | of the GNU Lesser General Public License v3 (LGPL-v3); both the
5 | LGPL-v3 and GNU General Public License v3 (GPL-v3) are reproduced
6 | below.
7 |
8 | There is NO WARRANTY associated with this software.
9 |
10 | GNU LESSER GENERAL PUBLIC LICENSE
11 | Version 3, 29 June 2007
12 |
13 | Copyright (C) 2007 Free Software Foundation, Inc.
14 | Everyone is permitted to copy and distribute verbatim copies
15 | of this license document, but changing it is not allowed.
16 |
17 |
18 | This version of the GNU Lesser General Public License incorporates
19 | the terms and conditions of version 3 of the GNU General Public
20 | License, supplemented by the additional permissions listed below.
21 |
22 | 0. Additional Definitions.
23 |
24 | As used herein, "this License" refers to version 3 of the GNU Lesser
25 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
26 | General Public License.
27 |
28 | "The Library" refers to a covered work governed by this License,
29 | other than an Application or a Combined Work as defined below.
30 |
31 | An "Application" is any work that makes use of an interface provided
32 | by the Library, but which is not otherwise based on the Library.
33 | Defining a subclass of a class defined by the Library is deemed a mode
34 | of using an interface provided by the Library.
35 |
36 | A "Combined Work" is a work produced by combining or linking an
37 | Application with the Library. The particular version of the Library
38 | with which the Combined Work was made is also called the "Linked
39 | Version".
40 |
41 | The "Minimal Corresponding Source" for a Combined Work means the
42 | Corresponding Source for the Combined Work, excluding any source code
43 | for portions of the Combined Work that, considered in isolation, are
44 | based on the Application, and not on the Linked Version.
45 |
46 | The "Corresponding Application Code" for a Combined Work means the
47 | object code and/or source code for the Application, including any data
48 | and utility programs needed for reproducing the Combined Work from the
49 | Application, but excluding the System Libraries of the Combined Work.
50 |
51 | 1. Exception to Section 3 of the GNU GPL.
52 |
53 | You may convey a covered work under sections 3 and 4 of this License
54 | without being bound by section 3 of the GNU GPL.
55 |
56 | 2. Conveying Modified Versions.
57 |
58 | If you modify a copy of the Library, and, in your modifications, a
59 | facility refers to a function or data to be supplied by an Application
60 | that uses the facility (other than as an argument passed when the
61 | facility is invoked), then you may convey a copy of the modified
62 | version:
63 |
64 | a) under this License, provided that you make a good faith effort to
65 | ensure that, in the event an Application does not supply the
66 | function or data, the facility still operates, and performs
67 | whatever part of its purpose remains meaningful, or
68 |
69 | b) under the GNU GPL, with none of the additional permissions of
70 | this License applicable to that copy.
71 |
72 | 3. Object Code Incorporating Material from Library Header Files.
73 |
74 | The object code form of an Application may incorporate material from
75 | a header file that is part of the Library. You may convey such object
76 | code under terms of your choice, provided that, if the incorporated
77 | material is not limited to numerical parameters, data structure
78 | layouts and accessors, or small macros, inline functions and templates
79 | (ten or fewer lines in length), you do both of the following:
80 |
81 | a) Give prominent notice with each copy of the object code that the
82 | Library is used in it and that the Library and its use are
83 | covered by this License.
84 |
85 | b) Accompany the object code with a copy of the GNU GPL and this license
86 | document.
87 |
88 | 4. Combined Works.
89 |
90 | You may convey a Combined Work under terms of your choice that,
91 | taken together, effectively do not restrict modification of the
92 | portions of the Library contained in the Combined Work and reverse
93 | engineering for debugging such modifications, if you also do each of
94 | the following:
95 |
96 | a) Give prominent notice with each copy of the Combined Work that
97 | the Library is used in it and that the Library and its use are
98 | covered by this License.
99 |
100 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
101 | document.
102 |
103 | c) For a Combined Work that displays copyright notices during
104 | execution, include the copyright notice for the Library among
105 | these notices, as well as a reference directing the user to the
106 | copies of the GNU GPL and this license document.
107 |
108 | d) Do one of the following:
109 |
110 | 0) Convey the Minimal Corresponding Source under the terms of this
111 | License, and the Corresponding Application Code in a form
112 | suitable for, and under terms that permit, the user to
113 | recombine or relink the Application with a modified version of
114 | the Linked Version to produce a modified Combined Work, in the
115 | manner specified by section 6 of the GNU GPL for conveying
116 | Corresponding Source.
117 |
118 | 1) Use a suitable shared library mechanism for linking with the
119 | Library. A suitable mechanism is one that (a) uses at run time
120 | a copy of the Library already present on the user's computer
121 | system, and (b) will operate properly with a modified version
122 | of the Library that is interface-compatible with the Linked
123 | Version.
124 |
125 | e) Provide Installation Information, but only if you would otherwise
126 | be required to provide such information under section 6 of the
127 | GNU GPL, and only to the extent that such information is
128 | necessary to install and execute a modified version of the
129 | Combined Work produced by recombining or relinking the
130 | Application with a modified version of the Linked Version. (If
131 | you use option 4d0, the Installation Information must accompany
132 | the Minimal Corresponding Source and Corresponding Application
133 | Code. If you use option 4d1, you must provide the Installation
134 | Information in the manner specified by section 6 of the GNU GPL
135 | for conveying Corresponding Source.)
136 |
137 | 5. Combined Libraries.
138 |
139 | You may place library facilities that are a work based on the
140 | Library side by side in a single library together with other library
141 | facilities that are not Applications and are not covered by this
142 | License, and convey such a combined library under terms of your
143 | choice, if you do both of the following:
144 |
145 | a) Accompany the combined library with a copy of the same work based
146 | on the Library, uncombined with any other library facilities,
147 | conveyed under the terms of this License.
148 |
149 | b) Give prominent notice with the combined library that part of it
150 | is a work based on the Library, and explaining where to find the
151 | accompanying uncombined form of the same work.
152 |
153 | 6. Revised Versions of the GNU Lesser General Public License.
154 |
155 | The Free Software Foundation may publish revised and/or new versions
156 | of the GNU Lesser General Public License from time to time. Such new
157 | versions will be similar in spirit to the present version, but may
158 | differ in detail to address new problems or concerns.
159 |
160 | Each version is given a distinguishing version number. If the
161 | Library as you received it specifies that a certain numbered version
162 | of the GNU Lesser General Public License "or any later version"
163 | applies to it, you have the option of following the terms and
164 | conditions either of that published version or of any later version
165 | published by the Free Software Foundation. If the Library as you
166 | received it does not specify a version number of the GNU Lesser
167 | General Public License, you may choose any version of the GNU Lesser
168 | General Public License ever published by the Free Software Foundation.
169 |
170 | If the Library as you received it specifies that a proxy can decide
171 | whether future versions of the GNU Lesser General Public License shall
172 | apply, that proxy's public statement of acceptance of any version is
173 | permanent authorization for you to choose that version for the
174 | Library.
175 |
176 |
177 | GNU GENERAL PUBLIC LICENSE
178 | Version 3, 29 June 2007
179 |
180 | Copyright (C) 2007 Free Software Foundation, Inc.
181 | Everyone is permitted to copy and distribute verbatim copies
182 | of this license document, but changing it is not allowed.
183 |
184 | Preamble
185 |
186 | The GNU General Public License is a free, copyleft license for
187 | software and other kinds of works.
188 |
189 | The licenses for most software and other practical works are designed
190 | to take away your freedom to share and change the works. By contrast,
191 | the GNU General Public License is intended to guarantee your freedom to
192 | share and change all versions of a program--to make sure it remains free
193 | software for all its users. We, the Free Software Foundation, use the
194 | GNU General Public License for most of our software; it applies also to
195 | any other work released this way by its authors. You can apply it to
196 | your programs, too.
197 |
198 | When we speak of free software, we are referring to freedom, not
199 | price. Our General Public Licenses are designed to make sure that you
200 | have the freedom to distribute copies of free software (and charge for
201 | them if you wish), that you receive source code or can get it if you
202 | want it, that you can change the software or use pieces of it in new
203 | free programs, and that you know you can do these things.
204 |
205 | To protect your rights, we need to prevent others from denying you
206 | these rights or asking you to surrender the rights. Therefore, you have
207 | certain responsibilities if you distribute copies of the software, or if
208 | you modify it: responsibilities to respect the freedom of others.
209 |
210 | For example, if you distribute copies of such a program, whether
211 | gratis or for a fee, you must pass on to the recipients the same
212 | freedoms that you received. You must make sure that they, too, receive
213 | or can get the source code. And you must show them these terms so they
214 | know their rights.
215 |
216 | Developers that use the GNU GPL protect your rights with two steps:
217 | (1) assert copyright on the software, and (2) offer you this License
218 | giving you legal permission to copy, distribute and/or modify it.
219 |
220 | For the developers' and authors' protection, the GPL clearly explains
221 | that there is no warranty for this free software. For both users' and
222 | authors' sake, the GPL requires that modified versions be marked as
223 | changed, so that their problems will not be attributed erroneously to
224 | authors of previous versions.
225 |
226 | Some devices are designed to deny users access to install or run
227 | modified versions of the software inside them, although the manufacturer
228 | can do so. This is fundamentally incompatible with the aim of
229 | protecting users' freedom to change the software. The systematic
230 | pattern of such abuse occurs in the area of products for individuals to
231 | use, which is precisely where it is most unacceptable. Therefore, we
232 | have designed this version of the GPL to prohibit the practice for those
233 | products. If such problems arise substantially in other domains, we
234 | stand ready to extend this provision to those domains in future versions
235 | of the GPL, as needed to protect the freedom of users.
236 |
237 | Finally, every program is threatened constantly by software patents.
238 | States should not allow patents to restrict development and use of
239 | software on general-purpose computers, but in those that do, we wish to
240 | avoid the special danger that patents applied to a free program could
241 | make it effectively proprietary. To prevent this, the GPL assures that
242 | patents cannot be used to render the program non-free.
243 |
244 | The precise terms and conditions for copying, distribution and
245 | modification follow.
246 |
247 | TERMS AND CONDITIONS
248 |
249 | 0. Definitions.
250 |
251 | "This License" refers to version 3 of the GNU General Public License.
252 |
253 | "Copyright" also means copyright-like laws that apply to other kinds of
254 | works, such as semiconductor masks.
255 |
256 | "The Program" refers to any copyrightable work licensed under this
257 | License. Each licensee is addressed as "you". "Licensees" and
258 | "recipients" may be individuals or organizations.
259 |
260 | To "modify" a work means to copy from or adapt all or part of the work
261 | in a fashion requiring copyright permission, other than the making of an
262 | exact copy. The resulting work is called a "modified version" of the
263 | earlier work or a work "based on" the earlier work.
264 |
265 | A "covered work" means either the unmodified Program or a work based
266 | on the Program.
267 |
268 | To "propagate" a work means to do anything with it that, without
269 | permission, would make you directly or secondarily liable for
270 | infringement under applicable copyright law, except executing it on a
271 | computer or modifying a private copy. Propagation includes copying,
272 | distribution (with or without modification), making available to the
273 | public, and in some countries other activities as well.
274 |
275 | To "convey" a work means any kind of propagation that enables other
276 | parties to make or receive copies. Mere interaction with a user through
277 | a computer network, with no transfer of a copy, is not conveying.
278 |
279 | An interactive user interface displays "Appropriate Legal Notices"
280 | to the extent that it includes a convenient and prominently visible
281 | feature that (1) displays an appropriate copyright notice, and (2)
282 | tells the user that there is no warranty for the work (except to the
283 | extent that warranties are provided), that licensees may convey the
284 | work under this License, and how to view a copy of this License. If
285 | the interface presents a list of user commands or options, such as a
286 | menu, a prominent item in the list meets this criterion.
287 |
288 | 1. Source Code.
289 |
290 | The "source code" for a work means the preferred form of the work
291 | for making modifications to it. "Object code" means any non-source
292 | form of a work.
293 |
294 | A "Standard Interface" means an interface that either is an official
295 | standard defined by a recognized standards body, or, in the case of
296 | interfaces specified for a particular programming language, one that
297 | is widely used among developers working in that language.
298 |
299 | The "System Libraries" of an executable work include anything, other
300 | than the work as a whole, that (a) is included in the normal form of
301 | packaging a Major Component, but which is not part of that Major
302 | Component, and (b) serves only to enable use of the work with that
303 | Major Component, or to implement a Standard Interface for which an
304 | implementation is available to the public in source code form. A
305 | "Major Component", in this context, means a major essential component
306 | (kernel, window system, and so on) of the specific operating system
307 | (if any) on which the executable work runs, or a compiler used to
308 | produce the work, or an object code interpreter used to run it.
309 |
310 | The "Corresponding Source" for a work in object code form means all
311 | the source code needed to generate, install, and (for an executable
312 | work) run the object code and to modify the work, including scripts to
313 | control those activities. However, it does not include the work's
314 | System Libraries, or general-purpose tools or generally available free
315 | programs which are used unmodified in performing those activities but
316 | which are not part of the work. For example, Corresponding Source
317 | includes interface definition files associated with source files for
318 | the work, and the source code for shared libraries and dynamically
319 | linked subprograms that the work is specifically designed to require,
320 | such as by intimate data communication or control flow between those
321 | subprograms and other parts of the work.
322 |
323 | The Corresponding Source need not include anything that users
324 | can regenerate automatically from other parts of the Corresponding
325 | Source.
326 |
327 | The Corresponding Source for a work in source code form is that
328 | same work.
329 |
330 | 2. Basic Permissions.
331 |
332 | All rights granted under this License are granted for the term of
333 | copyright on the Program, and are irrevocable provided the stated
334 | conditions are met. This License explicitly affirms your unlimited
335 | permission to run the unmodified Program. The output from running a
336 | covered work is covered by this License only if the output, given its
337 | content, constitutes a covered work. This License acknowledges your
338 | rights of fair use or other equivalent, as provided by copyright law.
339 |
340 | You may make, run and propagate covered works that you do not
341 | convey, without conditions so long as your license otherwise remains
342 | in force. You may convey covered works to others for the sole purpose
343 | of having them make modifications exclusively for you, or provide you
344 | with facilities for running those works, provided that you comply with
345 | the terms of this License in conveying all material for which you do
346 | not control copyright. Those thus making or running the covered works
347 | for you must do so exclusively on your behalf, under your direction
348 | and control, on terms that prohibit them from making any copies of
349 | your copyrighted material outside their relationship with you.
350 |
351 | Conveying under any other circumstances is permitted solely under
352 | the conditions stated below. Sublicensing is not allowed; section 10
353 | makes it unnecessary.
354 |
355 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
356 |
357 | No covered work shall be deemed part of an effective technological
358 | measure under any applicable law fulfilling obligations under article
359 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
360 | similar laws prohibiting or restricting circumvention of such
361 | measures.
362 |
363 | When you convey a covered work, you waive any legal power to forbid
364 | circumvention of technological measures to the extent such circumvention
365 | is effected by exercising rights under this License with respect to
366 | the covered work, and you disclaim any intention to limit operation or
367 | modification of the work as a means of enforcing, against the work's
368 | users, your or third parties' legal rights to forbid circumvention of
369 | technological measures.
370 |
371 | 4. Conveying Verbatim Copies.
372 |
373 | You may convey verbatim copies of the Program's source code as you
374 | receive it, in any medium, provided that you conspicuously and
375 | appropriately publish on each copy an appropriate copyright notice;
376 | keep intact all notices stating that this License and any
377 | non-permissive terms added in accord with section 7 apply to the code;
378 | keep intact all notices of the absence of any warranty; and give all
379 | recipients a copy of this License along with the Program.
380 |
381 | You may charge any price or no price for each copy that you convey,
382 | and you may offer support or warranty protection for a fee.
383 |
384 | 5. Conveying Modified Source Versions.
385 |
386 | You may convey a work based on the Program, or the modifications to
387 | produce it from the Program, in the form of source code under the
388 | terms of section 4, provided that you also meet all of these conditions:
389 |
390 | a) The work must carry prominent notices stating that you modified
391 | it, and giving a relevant date.
392 |
393 | b) The work must carry prominent notices stating that it is
394 | released under this License and any conditions added under section
395 | 7. This requirement modifies the requirement in section 4 to
396 | "keep intact all notices".
397 |
398 | c) You must license the entire work, as a whole, under this
399 | License to anyone who comes into possession of a copy. This
400 | License will therefore apply, along with any applicable section 7
401 | additional terms, to the whole of the work, and all its parts,
402 | regardless of how they are packaged. This License gives no
403 | permission to license the work in any other way, but it does not
404 | invalidate such permission if you have separately received it.
405 |
406 | d) If the work has interactive user interfaces, each must display
407 | Appropriate Legal Notices; however, if the Program has interactive
408 | interfaces that do not display Appropriate Legal Notices, your
409 | work need not make them do so.
410 |
411 | A compilation of a covered work with other separate and independent
412 | works, which are not by their nature extensions of the covered work,
413 | and which are not combined with it such as to form a larger program,
414 | in or on a volume of a storage or distribution medium, is called an
415 | "aggregate" if the compilation and its resulting copyright are not
416 | used to limit the access or legal rights of the compilation's users
417 | beyond what the individual works permit. Inclusion of a covered work
418 | in an aggregate does not cause this License to apply to the other
419 | parts of the aggregate.
420 |
421 | 6. Conveying Non-Source Forms.
422 |
423 | You may convey a covered work in object code form under the terms
424 | of sections 4 and 5, provided that you also convey the
425 | machine-readable Corresponding Source under the terms of this License,
426 | in one of these ways:
427 |
428 | a) Convey the object code in, or embodied in, a physical product
429 | (including a physical distribution medium), accompanied by the
430 | Corresponding Source fixed on a durable physical medium
431 | customarily used for software interchange.
432 |
433 | b) Convey the object code in, or embodied in, a physical product
434 | (including a physical distribution medium), accompanied by a
435 | written offer, valid for at least three years and valid for as
436 | long as you offer spare parts or customer support for that product
437 | model, to give anyone who possesses the object code either (1) a
438 | copy of the Corresponding Source for all the software in the
439 | product that is covered by this License, on a durable physical
440 | medium customarily used for software interchange, for a price no
441 | more than your reasonable cost of physically performing this
442 | conveying of source, or (2) access to copy the
443 | Corresponding Source from a network server at no charge.
444 |
445 | c) Convey individual copies of the object code with a copy of the
446 | written offer to provide the Corresponding Source. This
447 | alternative is allowed only occasionally and noncommercially, and
448 | only if you received the object code with such an offer, in accord
449 | with subsection 6b.
450 |
451 | d) Convey the object code by offering access from a designated
452 | place (gratis or for a charge), and offer equivalent access to the
453 | Corresponding Source in the same way through the same place at no
454 | further charge. You need not require recipients to copy the
455 | Corresponding Source along with the object code. If the place to
456 | copy the object code is a network server, the Corresponding Source
457 | may be on a different server (operated by you or a third party)
458 | that supports equivalent copying facilities, provided you maintain
459 | clear directions next to the object code saying where to find the
460 | Corresponding Source. Regardless of what server hosts the
461 | Corresponding Source, you remain obligated to ensure that it is
462 | available for as long as needed to satisfy these requirements.
463 |
464 | e) Convey the object code using peer-to-peer transmission, provided
465 | you inform other peers where the object code and Corresponding
466 | Source of the work are being offered to the general public at no
467 | charge under subsection 6d.
468 |
469 | A separable portion of the object code, whose source code is excluded
470 | from the Corresponding Source as a System Library, need not be
471 | included in conveying the object code work.
472 |
473 | A "User Product" is either (1) a "consumer product", which means any
474 | tangible personal property which is normally used for personal, family,
475 | or household purposes, or (2) anything designed or sold for incorporation
476 | into a dwelling. In determining whether a product is a consumer product,
477 | doubtful cases shall be resolved in favor of coverage. For a particular
478 | product received by a particular user, "normally used" refers to a
479 | typical or common use of that class of product, regardless of the status
480 | of the particular user or of the way in which the particular user
481 | actually uses, or expects or is expected to use, the product. A product
482 | is a consumer product regardless of whether the product has substantial
483 | commercial, industrial or non-consumer uses, unless such uses represent
484 | the only significant mode of use of the product.
485 |
486 | "Installation Information" for a User Product means any methods,
487 | procedures, authorization keys, or other information required to install
488 | and execute modified versions of a covered work in that User Product from
489 | a modified version of its Corresponding Source. The information must
490 | suffice to ensure that the continued functioning of the modified object
491 | code is in no case prevented or interfered with solely because
492 | modification has been made.
493 |
494 | If you convey an object code work under this section in, or with, or
495 | specifically for use in, a User Product, and the conveying occurs as
496 | part of a transaction in which the right of possession and use of the
497 | User Product is transferred to the recipient in perpetuity or for a
498 | fixed term (regardless of how the transaction is characterized), the
499 | Corresponding Source conveyed under this section must be accompanied
500 | by the Installation Information. But this requirement does not apply
501 | if neither you nor any third party retains the ability to install
502 | modified object code on the User Product (for example, the work has
503 | been installed in ROM).
504 |
505 | The requirement to provide Installation Information does not include a
506 | requirement to continue to provide support service, warranty, or updates
507 | for a work that has been modified or installed by the recipient, or for
508 | the User Product in which it has been modified or installed. Access to a
509 | network may be denied when the modification itself materially and
510 | adversely affects the operation of the network or violates the rules and
511 | protocols for communication across the network.
512 |
513 | Corresponding Source conveyed, and Installation Information provided,
514 | in accord with this section must be in a format that is publicly
515 | documented (and with an implementation available to the public in
516 | source code form), and must require no special password or key for
517 | unpacking, reading or copying.
518 |
519 | 7. Additional Terms.
520 |
521 | "Additional permissions" are terms that supplement the terms of this
522 | License by making exceptions from one or more of its conditions.
523 | Additional permissions that are applicable to the entire Program shall
524 | be treated as though they were included in this License, to the extent
525 | that they are valid under applicable law. If additional permissions
526 | apply only to part of the Program, that part may be used separately
527 | under those permissions, but the entire Program remains governed by
528 | this License without regard to the additional permissions.
529 |
530 | When you convey a copy of a covered work, you may at your option
531 | remove any additional permissions from that copy, or from any part of
532 | it. (Additional permissions may be written to require their own
533 | removal in certain cases when you modify the work.) You may place
534 | additional permissions on material, added by you to a covered work,
535 | for which you have or can give appropriate copyright permission.
536 |
537 | Notwithstanding any other provision of this License, for material you
538 | add to a covered work, you may (if authorized by the copyright holders of
539 | that material) supplement the terms of this License with terms:
540 |
541 | a) Disclaiming warranty or limiting liability differently from the
542 | terms of sections 15 and 16 of this License; or
543 |
544 | b) Requiring preservation of specified reasonable legal notices or
545 | author attributions in that material or in the Appropriate Legal
546 | Notices displayed by works containing it; or
547 |
548 | c) Prohibiting misrepresentation of the origin of that material, or
549 | requiring that modified versions of such material be marked in
550 | reasonable ways as different from the original version; or
551 |
552 | d) Limiting the use for publicity purposes of names of licensors or
553 | authors of the material; or
554 |
555 | e) Declining to grant rights under trademark law for use of some
556 | trade names, trademarks, or service marks; or
557 |
558 | f) Requiring indemnification of licensors and authors of that
559 | material by anyone who conveys the material (or modified versions of
560 | it) with contractual assumptions of liability to the recipient, for
561 | any liability that these contractual assumptions directly impose on
562 | those licensors and authors.
563 |
564 | All other non-permissive additional terms are considered "further
565 | restrictions" within the meaning of section 10. If the Program as you
566 | received it, or any part of it, contains a notice stating that it is
567 | governed by this License along with a term that is a further
568 | restriction, you may remove that term. If a license document contains
569 | a further restriction but permits relicensing or conveying under this
570 | License, you may add to a covered work material governed by the terms
571 | of that license document, provided that the further restriction does
572 | not survive such relicensing or conveying.
573 |
574 | If you add terms to a covered work in accord with this section, you
575 | must place, in the relevant source files, a statement of the
576 | additional terms that apply to those files, or a notice indicating
577 | where to find the applicable terms.
578 |
579 | Additional terms, permissive or non-permissive, may be stated in the
580 | form of a separately written license, or stated as exceptions;
581 | the above requirements apply either way.
582 |
583 | 8. Termination.
584 |
585 | You may not propagate or modify a covered work except as expressly
586 | provided under this License. Any attempt otherwise to propagate or
587 | modify it is void, and will automatically terminate your rights under
588 | this License (including any patent licenses granted under the third
589 | paragraph of section 11).
590 |
591 | However, if you cease all violation of this License, then your
592 | license from a particular copyright holder is reinstated (a)
593 | provisionally, unless and until the copyright holder explicitly and
594 | finally terminates your license, and (b) permanently, if the copyright
595 | holder fails to notify you of the violation by some reasonable means
596 | prior to 60 days after the cessation.
597 |
598 | Moreover, your license from a particular copyright holder is
599 | reinstated permanently if the copyright holder notifies you of the
600 | violation by some reasonable means, this is the first time you have
601 | received notice of violation of this License (for any work) from that
602 | copyright holder, and you cure the violation prior to 30 days after
603 | your receipt of the notice.
604 |
605 | Termination of your rights under this section does not terminate the
606 | licenses of parties who have received copies or rights from you under
607 | this License. If your rights have been terminated and not permanently
608 | reinstated, you do not qualify to receive new licenses for the same
609 | material under section 10.
610 |
611 | 9. Acceptance Not Required for Having Copies.
612 |
613 | You are not required to accept this License in order to receive or
614 | run a copy of the Program. Ancillary propagation of a covered work
615 | occurring solely as a consequence of using peer-to-peer transmission
616 | to receive a copy likewise does not require acceptance. However,
617 | nothing other than this License grants you permission to propagate or
618 | modify any covered work. These actions infringe copyright if you do
619 | not accept this License. Therefore, by modifying or propagating a
620 | covered work, you indicate your acceptance of this License to do so.
621 |
622 | 10. Automatic Licensing of Downstream Recipients.
623 |
624 | Each time you convey a covered work, the recipient automatically
625 | receives a license from the original licensors, to run, modify and
626 | propagate that work, subject to this License. You are not responsible
627 | for enforcing compliance by third parties with this License.
628 |
629 | An "entity transaction" is a transaction transferring control of an
630 | organization, or substantially all assets of one, or subdividing an
631 | organization, or merging organizations. If propagation of a covered
632 | work results from an entity transaction, each party to that
633 | transaction who receives a copy of the work also receives whatever
634 | licenses to the work the party's predecessor in interest had or could
635 | give under the previous paragraph, plus a right to possession of the
636 | Corresponding Source of the work from the predecessor in interest, if
637 | the predecessor has it or can get it with reasonable efforts.
638 |
639 | You may not impose any further restrictions on the exercise of the
640 | rights granted or affirmed under this License. For example, you may
641 | not impose a license fee, royalty, or other charge for exercise of
642 | rights granted under this License, and you may not initiate litigation
643 | (including a cross-claim or counterclaim in a lawsuit) alleging that
644 | any patent claim is infringed by making, using, selling, offering for
645 | sale, or importing the Program or any portion of it.
646 |
647 | 11. Patents.
648 |
649 | A "contributor" is a copyright holder who authorizes use under this
650 | License of the Program or a work on which the Program is based. The
651 | work thus licensed is called the contributor's "contributor version".
652 |
653 | A contributor's "essential patent claims" are all patent claims
654 | owned or controlled by the contributor, whether already acquired or
655 | hereafter acquired, that would be infringed by some manner, permitted
656 | by this License, of making, using, or selling its contributor version,
657 | but do not include claims that would be infringed only as a
658 | consequence of further modification of the contributor version. For
659 | purposes of this definition, "control" includes the right to grant
660 | patent sublicenses in a manner consistent with the requirements of
661 | this License.
662 |
663 | Each contributor grants you a non-exclusive, worldwide, royalty-free
664 | patent license under the contributor's essential patent claims, to
665 | make, use, sell, offer for sale, import and otherwise run, modify and
666 | propagate the contents of its contributor version.
667 |
668 | In the following three paragraphs, a "patent license" is any express
669 | agreement or commitment, however denominated, not to enforce a patent
670 | (such as an express permission to practice a patent or covenant not to
671 | sue for patent infringement). To "grant" such a patent license to a
672 | party means to make such an agreement or commitment not to enforce a
673 | patent against the party.
674 |
675 | If you convey a covered work, knowingly relying on a patent license,
676 | and the Corresponding Source of the work is not available for anyone
677 | to copy, free of charge and under the terms of this License, through a
678 | publicly available network server or other readily accessible means,
679 | then you must either (1) cause the Corresponding Source to be so
680 | available, or (2) arrange to deprive yourself of the benefit of the
681 | patent license for this particular work, or (3) arrange, in a manner
682 | consistent with the requirements of this License, to extend the patent
683 | license to downstream recipients. "Knowingly relying" means you have
684 | actual knowledge that, but for the patent license, your conveying the
685 | covered work in a country, or your recipient's use of the covered work
686 | in a country, would infringe one or more identifiable patents in that
687 | country that you have reason to believe are valid.
688 |
689 | If, pursuant to or in connection with a single transaction or
690 | arrangement, you convey, or propagate by procuring conveyance of, a
691 | covered work, and grant a patent license to some of the parties
692 | receiving the covered work authorizing them to use, propagate, modify
693 | or convey a specific copy of the covered work, then the patent license
694 | you grant is automatically extended to all recipients of the covered
695 | work and works based on it.
696 |
697 | A patent license is "discriminatory" if it does not include within
698 | the scope of its coverage, prohibits the exercise of, or is
699 | conditioned on the non-exercise of one or more of the rights that are
700 | specifically granted under this License. You may not convey a covered
701 | work if you are a party to an arrangement with a third party that is
702 | in the business of distributing software, under which you make payment
703 | to the third party based on the extent of your activity of conveying
704 | the work, and under which the third party grants, to any of the
705 | parties who would receive the covered work from you, a discriminatory
706 | patent license (a) in connection with copies of the covered work
707 | conveyed by you (or copies made from those copies), or (b) primarily
708 | for and in connection with specific products or compilations that
709 | contain the covered work, unless you entered into that arrangement,
710 | or that patent license was granted, prior to 28 March 2007.
711 |
712 | Nothing in this License shall be construed as excluding or limiting
713 | any implied license or other defenses to infringement that may
714 | otherwise be available to you under applicable patent law.
715 |
716 | 12. No Surrender of Others' Freedom.
717 |
718 | If conditions are imposed on you (whether by court order, agreement or
719 | otherwise) that contradict the conditions of this License, they do not
720 | excuse you from the conditions of this License. If you cannot convey a
721 | covered work so as to satisfy simultaneously your obligations under this
722 | License and any other pertinent obligations, then as a consequence you may
723 | not convey it at all. For example, if you agree to terms that obligate you
724 | to collect a royalty for further conveying from those to whom you convey
725 | the Program, the only way you could satisfy both those terms and this
726 | License would be to refrain entirely from conveying the Program.
727 |
728 | 13. Use with the GNU Affero General Public License.
729 |
730 | Notwithstanding any other provision of this License, you have
731 | permission to link or combine any covered work with a work licensed
732 | under version 3 of the GNU Affero General Public License into a single
733 | combined work, and to convey the resulting work. The terms of this
734 | License will continue to apply to the part which is the covered work,
735 | but the special requirements of the GNU Affero General Public License,
736 | section 13, concerning interaction through a network will apply to the
737 | combination as such.
738 |
739 | 14. Revised Versions of this License.
740 |
741 | The Free Software Foundation may publish revised and/or new versions of
742 | the GNU General Public License from time to time. Such new versions will
743 | be similar in spirit to the present version, but may differ in detail to
744 | address new problems or concerns.
745 |
746 | Each version is given a distinguishing version number. If the
747 | Program specifies that a certain numbered version of the GNU General
748 | Public License "or any later version" applies to it, you have the
749 | option of following the terms and conditions either of that numbered
750 | version or of any later version published by the Free Software
751 | Foundation. If the Program does not specify a version number of the
752 | GNU General Public License, you may choose any version ever published
753 | by the Free Software Foundation.
754 |
755 | If the Program specifies that a proxy can decide which future
756 | versions of the GNU General Public License can be used, that proxy's
757 | public statement of acceptance of a version permanently authorizes you
758 | to choose that version for the Program.
759 |
760 | Later license versions may give you additional or different
761 | permissions. However, no additional obligations are imposed on any
762 | author or copyright holder as a result of your choosing to follow a
763 | later version.
764 |
765 | 15. Disclaimer of Warranty.
766 |
767 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
768 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
769 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
770 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
771 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
772 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
773 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
774 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
775 |
776 | 16. Limitation of Liability.
777 |
778 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
779 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
780 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
781 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
782 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
783 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
784 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
785 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
786 | SUCH DAMAGES.
787 |
788 | 17. Interpretation of Sections 15 and 16.
789 |
790 | If the disclaimer of warranty and limitation of liability provided
791 | above cannot be given local legal effect according to their terms,
792 | reviewing courts shall apply local law that most closely approximates
793 | an absolute waiver of all civil liability in connection with the
794 | Program, unless a warranty or assumption of liability accompanies a
795 | copy of the Program in return for a fee.
796 |
797 | END OF TERMS AND CONDITIONS
798 |
799 | How to Apply These Terms to Your New Programs
800 |
801 | If you develop a new program, and you want it to be of the greatest
802 | possible use to the public, the best way to achieve this is to make it
803 | free software which everyone can redistribute and change under these terms.
804 |
805 | To do so, attach the following notices to the program. It is safest
806 | to attach them to the start of each source file to most effectively
807 | state the exclusion of warranty; and each file should have at least
808 | the "copyright" line and a pointer to where the full notice is found.
809 |
810 |
811 | Copyright (C)
812 |
813 | This program is free software: you can redistribute it and/or modify
814 | it under the terms of the GNU General Public License as published by
815 | the Free Software Foundation, either version 3 of the License, or
816 | (at your option) any later version.
817 |
818 | This program is distributed in the hope that it will be useful,
819 | but WITHOUT ANY WARRANTY; without even the implied warranty of
820 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
821 | GNU General Public License for more details.
822 |
823 | You should have received a copy of the GNU General Public License
824 | along with this program. If not, see .
825 |
826 | Also add information on how to contact you by electronic and paper mail.
827 |
828 | If the program does terminal interaction, make it output a short
829 | notice like this when it starts in an interactive mode:
830 |
831 | Copyright (C)
832 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
833 | This is free software, and you are welcome to redistribute it
834 | under certain conditions; type `show c' for details.
835 |
836 | The hypothetical commands `show w' and `show c' should show the appropriate
837 | parts of the General Public License. Of course, your program's commands
838 | might be different; for a GUI interface, you would use an "about box".
839 |
840 | You should also get your employer (if you work as a programmer) or school,
841 | if any, to sign a "copyright disclaimer" for the program, if necessary.
842 | For more information on this, and how to apply and follow the GNU GPL, see
843 | .
844 |
845 | The GNU General Public License does not permit incorporating your program
846 | into proprietary programs. If your program is a subroutine library, you
847 | may consider it more useful to permit linking proprietary applications with
848 | the library. If this is what you want to do, use the GNU Lesser General
849 | Public License instead of this License. But first, please read
850 | .
851 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # regularizepsf
2 | [](https://codecov.io/gh/punch-mission/regularizepsf)
3 | [](https://zenodo.org/badge/latestdoi/555583385)
4 | [](https://badge.fury.io/py/regularizepsf)
5 | [](https://github.com/punch-mission/regularizepsf/actions/workflows/ci.yml)
6 |
7 | A package for manipulating and correcting variable point spread functions.
8 |
9 | Below is an example of correcting model data using the package. An initial image of a simplified starfield (a) is synthetically observed with a slowly
10 | varying PSF (b), then regularized with this technique (c). The final image visually matches a direct convolution of
11 | the initial image with the target PSF (d). The panels are gamma-corrected to highlight the periphery of the model PSFs.
12 | 
13 |
14 | ## Getting started
15 |
16 | `pip install regularizepsf` and then follow along with the [documentation](https://regularizepsf.readthedocs.io/en/latest/index.html).
17 |
18 | ## Contributing
19 | We encourage all contributions. If you have a problem with the code or would like to see a new feature, please open an issue. Or you can submit a pull request.
20 |
21 | If you're contributing code please see [this package's development guide](https://regularizepsf.readthedocs.io/en/latest/development.html).
22 |
23 | ## License
24 | See [LICENSE file](LICENSE)
25 |
26 | ## Need help?
27 | Please ask a question in our [discussions](https://github.com/punch-mission/regularizepsf/discussions)
28 |
29 | ## Citation
30 | Please cite [the associated paper](https://iopscience.iop.org/article/10.3847/1538-3881/acc578) if you use this technique:
31 |
32 | ```
33 | @article{Hughes_2023,
34 | doi = {10.3847/1538-3881/acc578},
35 | url = {https://dx.doi.org/10.3847/1538-3881/acc578},
36 | year = {2023},
37 | month = {apr},
38 | publisher = {The American Astronomical Society},
39 | volume = {165},
40 | number = {5},
41 | pages = {204},
42 | author = {J. Marcus Hughes and Craig E. DeForest and Daniel B. Seaton},
43 | title = {Coma Off It: Regularizing Variable Point-spread Functions},
44 | journal = {The Astronomical Journal}
45 | }
46 | ```
47 |
48 | If you use this software, please also cite the package with the specific version used. [Zenodo always has the most up-to-date citation](https://zenodo.org/records/10066960).
49 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/cite.rst:
--------------------------------------------------------------------------------
1 | Citation
2 | ======================
3 |
4 | To cite `the associated paper `::
5 |
6 | @article{Hughes_2023,
7 | doi = {10.3847/1538-3881/acc578},
8 | url = {https://dx.doi.org/10.3847/1538-3881/acc578},
9 | year = {2023},
10 | month = {apr},
11 | publisher = {The American Astronomical Society},
12 | volume = {165},
13 | number = {5},
14 | pages = {204},
15 | author = {J. Marcus Hughes and Craig E. DeForest and Daniel B. Seaton},
16 | title = {Coma Off It: Regularizing Variable Point-spread Functions},
17 | journal = {The Astronomical Journal}
18 | }
19 |
20 |
21 | If you use this software, please also cite the package with the specific version used.
22 | `Zenodo always has the most up-to-date citation. `_
23 |
--------------------------------------------------------------------------------
/docs/source/concepts.rst:
--------------------------------------------------------------------------------
1 | Concepts
2 | ==========
3 |
4 | For a very thorough and mathematical treatment of the technique see `our Astronomical Journal paper `_.
5 |
6 | Overview of the technique
7 | -------------------------
8 | A point spread function (PSF) describes how the optical system spreads light from sources.
9 | The basic premise of this technique is to model the point spread function of an imager using stars as our point sources.
10 | Then, we calculate the inverse PSF and apply it. We could directly convolve the inverse PSF but convolutions are slow.
11 | A convolution in the image is the same as multiplying in Fourier space, a much faster operation, so we do that instead.
12 | This package supports defining a PSF transformation from any input PSF to any target PSF.
13 |
14 | Since the PSF can vary across the image, we create many local models that apply only in smaller regions of the image.
15 | These regions overlap so the correction is smooth without hard transition edges.
16 |
17 | Overview of the package
18 | ------------------------
19 | The package has a few main components:
20 |
21 | 1. Representations of PSFs in ``regularizepsf.psf``
22 | 2. A method of transforming a PSF from one to another in ``regularizepsf.transform``
23 | 3. Routines to model a PSF from data in ``regularizepsf.builder``
24 | 4. Extra visualization tools in ``regularizepsf.visualize``
25 |
26 | PSFs can be represented in three ways:
27 |
28 | 1. `simple_functional_psf`: the PSF is described as a mathematical function that doesn't vary across an image
29 | 2. `varied_functional_psf`: the PSF is described as a mathematical function that varies across the image
30 | 3. `ArrayPSF`: the PSF is described using many small arrays, avoiding the need to find an expressive functional model
31 |
32 | Using a set of images, we can extract `ArrayPSF`s directly and quickly correct an image.
33 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | # -- Project information -----------------------------------------------------
7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8 | import os
9 | import sys
10 | from importlib.metadata import version as get_version
11 | from packaging.version import Version
12 |
13 | sys.path.insert(0, os.path.abspath("../.."))
14 |
15 |
16 | project = "regularizepsf"
17 | copyright = "2024, J. Marcus Hughes and the PUNCH Science Operations Center"
18 | author = "J. Marcus Hughes and the PUNCH Science Operations Center"
19 |
20 | release: str = get_version("regularizepsf")
21 | version: str = release
22 | _version = Version(release)
23 | if _version.is_devrelease:
24 | version = release = f"{_version.base_version}.dev{_version.dev}"
25 | # -- General configuration ---------------------------------------------------
26 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
27 |
28 | extensions = ["autoapi.extension",
29 | "sphinx.ext.autodoc",
30 | "sphinx.ext.napoleon",
31 | "nbsphinx",
32 | "IPython.sphinxext.ipython_console_highlighting"]
33 |
34 | templates_path = ["_templates"]
35 | exclude_patterns = []
36 |
37 |
38 |
39 | # -- Options for HTML output -------------------------------------------------
40 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
41 |
42 | html_theme = "pydata_sphinx_theme"
43 | html_show_sourcelink = False
44 | html_static_path = ["_static"]
45 | html_theme_options = {
46 | "use_edit_page_button": True,
47 | "icon_links": [
48 | {
49 | "name": "GitHub",
50 | "url": "https://github.com/punch-mission/regularizepsf",
51 | "icon": "fa-brands fa-github",
52 | "type": "fontawesome",
53 | },
54 | ],
55 | "show_nav_level": 1,
56 | "show_toc_level": 3,
57 | }
58 | html_context = {
59 | # "github_url": "https://github.com", # or your GitHub Enterprise site
60 | "github_user": "punch-mission",
61 | "github_repo": "regularizepsf",
62 | "github_version": "main",
63 | "doc_path": "docs/source/",
64 | }
65 |
66 |
67 | autoapi_dirs = ["../../regularizepsf"]
68 | autoapi_python_class_content = "both"
69 |
--------------------------------------------------------------------------------
/docs/source/development.rst:
--------------------------------------------------------------------------------
1 | Development
2 | ============
3 | We encourage all contributions. Please see our `contribution guide first `_. If you're contributing code, we recommend reading `our project-wide evelopment guide `_.
4 |
5 | We recommend working in a virtual environment.
6 | This can be created by running ``python -m venv venv``. Then, activate the environment with ``source venv/bin/activate``.
7 | You can then install the required packages with ``pip install ".[dev]"``.
8 |
9 | If at any time you run into issues, please contact us by :doc:`following the guidelines here `.
10 |
11 | Setting up pre-commit
12 | ----------------------
13 |
14 | The first time you develop code, you'll need to install the pre-commit. This checks that our style is consistent.
15 | It gets installed when you do ``pip install ".[dev]"`` but then requires you to activate them by
16 | running ``pre-commit install``. Now every time you commit, our checks will run first.
17 |
18 | Building the docs
19 | ------------------
20 | The docs are built using ``sphinx``. First, you must install it and the other documentation requirements with ::
21 |
22 | pip install ".[docs]"
23 |
24 | Then, navigate to the ``docs`` directory and run ``make html`` to build the docs.
25 |
26 | We use ReadTheDocs, so a preview of the docs are built with each PR.
27 | That makes it easier to check updates without manually building.
28 |
29 | Running tests
30 | -------------
31 | To run the tests for this package, run ``pytest`` in the repository base directory.
32 |
33 | Tests are automatically run for pull requests.
34 |
--------------------------------------------------------------------------------
/docs/source/help.rst:
--------------------------------------------------------------------------------
1 | Help
2 | ==============
3 |
4 | First of all, thank you for your support and use of the package!
5 |
6 | If you notice a bug, please `open an issue on GitHub `_.
7 |
8 | If you need help using this code, please `start a discussion on GitHub `_.
9 | We encourage using GitHub rather than email so that others can benefit from your inquiry too. We want to make this code
10 | as user-friendly as possible. If you're encountering an issue, it's likely someone else is too; you can help
11 | everyone by speaking up.
12 |
--------------------------------------------------------------------------------
/docs/source/images/dash.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/docs/source/images/dash.png
--------------------------------------------------------------------------------
/docs/source/images/estimated_corrected_psfs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/docs/source/images/estimated_corrected_psfs.png
--------------------------------------------------------------------------------
/docs/source/images/estimated_psfs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/docs/source/images/estimated_psfs.png
--------------------------------------------------------------------------------
/docs/source/images/model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/docs/source/images/model.png
--------------------------------------------------------------------------------
/docs/source/images/star_distribution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/docs/source/images/star_distribution.png
--------------------------------------------------------------------------------
/docs/source/images/transfer_kernels.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/docs/source/images/transfer_kernels.png
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to regularizepsf!
2 | =========================================
3 |
4 | **regularizepsf** is a Python package (with Cython speed improvements) for determining and correcting
5 | point spread functions in astronomical images. It was originally developed for the `PUNCH`_ mission
6 | and is documented in `an Astronomical Journal paper`_.
7 |
8 | Below is an example of correcting model data using the package.
9 | An initial image of a simplified starfield (a) is synthetically observed with a slowly varying PSF (b),
10 | then regularized with this technique (c). The final image visually matches a direct convolution of the initial image
11 | with the target PSF (d). The panels are gamma-corrected to highlight the periphery of the model PSFs.
12 |
13 | .. image:: images/model.png
14 | :width: 800
15 | :alt: example of correction
16 |
17 | .. toctree::
18 | :maxdepth: 2
19 | :caption: Contents:
20 |
21 | concepts
22 | example.ipynb
23 | help
24 | cite
25 | development
26 |
27 | Indices and tables
28 | ==================
29 |
30 | * :ref:`genindex`
31 | * :ref:`modindex`
32 | * :ref:`search`
33 |
34 | .. _PUNCH: https://punch.space.swri.edu/
35 | .. _an Astronomical Journal paper: https://iopscience.iop.org/article/10.3847/1538-3881/acc578
36 |
--------------------------------------------------------------------------------
/model_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/model_example.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel", "numpy", "setuptools-scm>=8"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [tool.setuptools]
6 | packages = ["regularizepsf"]
7 |
8 | [project]
9 | name = "regularizepsf"
10 | dynamic = ["version"]
11 | requires-python = ">3.10"
12 | description = "Point spread function modeling and regularization"
13 | dependencies = [
14 | "numpy",
15 | "h5py",
16 | "sep-pjw",
17 | "astropy",
18 | "scipy",
19 | "scikit-image",
20 | "matplotlib",
21 | ]
22 | readme = "README.md"
23 | license = {file = "LICENSE"}
24 |
25 | [project.optional-dependencies]
26 | test = [
27 | "pytest",
28 | "pytest-cov",
29 | "hypothesis",
30 | "coverage",
31 | "ruff",
32 | "pytest-mpl",
33 | ]
34 | docs = [
35 | "packaging",
36 | "sphinx",
37 | "pydata-sphinx-theme",
38 | "sphinx-autoapi",
39 | "nbsphinx",
40 | "ipython",
41 | ]
42 | dev = ["regularizepsf[test, docs]", "pre-commit"]
43 |
44 | [tool.setuptools_scm]
45 |
46 | [tool.ruff]
47 | exclude = ['tests/*']
48 | line-length = 120
49 | # lint.select = ["ALL"]
50 | lint.ignore = [ "FBT001", "FBT002", "ANN401", "E731"]
51 |
52 | #[tool.ruff.lint]
53 | #select = ["NPY201"]
54 |
55 | [tool.isort]
56 | balanced_wrapping = true
57 | default_section = "THIRDPARTY"
58 | include_trailing_comma = true
59 | known_compatibility = "future"
60 | known_first_party = "regularizepsf"
61 | length_sort = false
62 | length_sort_sections = "stdlib"
63 | line_length = 120
64 | multi_line_output = 3
65 | no_lines_before = "LOCALFOLDER"
66 | sections = "FUTURE, COMPATIBILITY, STDLIB, THIRDPARTY, FIRSTPARTY, LOCALFOLDER"
67 |
68 | [tool.cibuildwheel]
69 | # Disable building PyPy wheels on all platforms
70 | skip = "pp*"
71 |
--------------------------------------------------------------------------------
/regularizepsf/__init__.py:
--------------------------------------------------------------------------------
1 | """Global init."""
2 |
3 | import importlib.metadata
4 |
5 | from .builder import ArrayPSFBuilder
6 | from .psf import ArrayPSF, simple_functional_psf, varied_functional_psf
7 | from .transform import ArrayPSFTransform
8 |
9 | __version__ = importlib.metadata.version("regularizepsf")
10 |
11 | __all__ = ["simple_functional_psf",
12 | "varied_functional_psf",
13 | "ArrayPSF",
14 | "ArrayPSFBuilder",
15 | "ArrayPSFTransform",
16 | "__version__"]
17 |
--------------------------------------------------------------------------------
/regularizepsf/builder.py:
--------------------------------------------------------------------------------
1 | """Functions for building PSF models from images."""
2 |
3 | import pathlib
4 | from collections.abc import Generator
5 |
6 | import numpy as np
7 | import sep_pjw as sep
8 | from astropy.io import fits
9 | from scipy.interpolate import RectBivariateSpline
10 | from skimage.transform import downscale_local_mean
11 |
12 | from regularizepsf.exceptions import IncorrectShapeError, PSFBuilderError
13 | from regularizepsf.psf import ArrayPSF
14 | from regularizepsf.util import IndexedCube, calculate_covering
15 |
16 |
17 | def _convert_to_generator(images: list[pathlib.Path] | np.ndarray | Generator,
18 | hdu_choice: int | None = None) -> Generator:
19 | if isinstance(images, Generator):
20 | data_iterator = images
21 | elif isinstance(images, np.ndarray):
22 | if len(images.shape) == 3:
23 | def generator() -> np.ndarray:
24 | yield from images
25 | data_iterator = generator()
26 | elif len(images.shape) == 2:
27 | def generator() -> np.ndarray:
28 | while True:
29 | yield images
30 | data_iterator = generator()
31 | else:
32 | msg = "Image data array must be 3D"
33 | raise IncorrectShapeError(msg)
34 | elif isinstance(images, list) and (isinstance(images[0], str) or isinstance(images[0], pathlib.Path)):
35 | def generator() -> np.ndarray:
36 | for image_path in images:
37 | with fits.open(image_path) as hdul:
38 | yield hdul[hdu_choice].data.astype(float)
39 | data_iterator = generator()
40 | else:
41 | msg = "Unsupported type for `images`"
42 | raise TypeError(msg)
43 |
44 | return data_iterator
45 |
46 | def _scale_image(image, interpolation_scale):
47 | interpolator = RectBivariateSpline(np.arange(image.shape[0]),
48 | np.arange(image.shape[1]),
49 | image)
50 | image = interpolator(np.linspace(0,
51 | image.shape[0] - 1,
52 | 1 + (image.shape[0] - 1) * interpolation_scale),
53 | np.linspace(0,
54 | image.shape[1] - 1,
55 | 1 + (image.shape[1] - 1) * interpolation_scale))
56 | return image
57 |
58 | def _find_patches(image, star_threshold, star_mask, interpolation_scale, psf_size, i):
59 | background = sep.Background(image)
60 | image_background_removed = image - background
61 | image_star_coords = sep.extract(image_background_removed,
62 | star_threshold,
63 | err=background.globalrms,
64 | mask=star_mask)
65 |
66 | coordinates = [(i,
67 | int(round(x - psf_size * interpolation_scale / 2)),
68 | int(round(y - psf_size * interpolation_scale / 2)))
69 | for x, y in zip(image_star_coords["y"], image_star_coords["x"], strict=True)]
70 |
71 | # pad in case someone selects a region on the edge of the image
72 | padding_shape = ((psf_size * interpolation_scale, psf_size * interpolation_scale),
73 | (psf_size * interpolation_scale, psf_size * interpolation_scale))
74 | padded_image = np.pad(image_background_removed,
75 | padding_shape,
76 | mode="reflect")
77 |
78 | patches = {}
79 | for coordinate in coordinates:
80 | patch = padded_image[coordinate[1] + interpolation_scale * psf_size:
81 | coordinate[1] + 2 * interpolation_scale * psf_size,
82 | coordinate[2] + interpolation_scale * psf_size:
83 | coordinate[2] + 2 * interpolation_scale * psf_size]
84 | patches[coordinate] = patch
85 |
86 | return patches
87 |
88 | def _find_matches(coordinate, x_bounds, y_bounds, psf_size):
89 | center_x = coordinate[1] + psf_size // 2
90 | center_y = coordinate[2] + psf_size // 2
91 | x_matches = (x_bounds[:, 0] <= center_x) * (center_x < x_bounds[:, 1])
92 | y_matches = (y_bounds[:, 0] <= center_y) * (center_y < y_bounds[:, 1])
93 | match_indices = np.where(x_matches * y_matches)[0]
94 | return match_indices
95 |
96 | def _average_patches_by_mean(patches, corners, x_bounds, y_bounds, psf_size):
97 | accumulator = {tuple(corner): np.zeros((psf_size, psf_size))
98 | for corner in corners}
99 | accumulator_counts = {tuple(corner): np.zeros((psf_size, psf_size))
100 | for corner in corners}
101 | counts = {tuple(corner): 0 for corner in corners}
102 |
103 | for coordinate, patch in patches.items():
104 | patch = patch / patch[psf_size // 2, psf_size // 2] # normalize so the star brightness is always 1
105 | match_indices = _find_matches(coordinate, x_bounds, y_bounds, psf_size)
106 |
107 | for match_index in match_indices:
108 | match_corner = tuple(corners[match_index])
109 | accumulator[match_corner] = np.nansum([accumulator[match_corner], patch], axis=0)
110 | accumulator_counts[match_corner] += np.isfinite(patch)
111 | counts[match_corner] += 1
112 |
113 | averages = {(corner[0], corner[1]):
114 | accumulator[corner] / accumulator_counts[corner]
115 | for corner in accumulator}
116 |
117 | return averages, counts
118 |
119 | def _average_patches_by_percentile(patches, corners, x_bounds, y_bounds, psf_size, percentile: float=50):
120 | if percentile == 50:
121 | percentile_method = lambda d: np.nanmedian(d, axis=0)
122 | else:
123 | percentile_method = lambda d: np.nanpercentile(d, percentile, axis=0)
124 |
125 | stack = {tuple(corner): [] for corner in corners}
126 | counts = {tuple(corner): 0 for corner in corners}
127 |
128 | for coordinate, patch in patches.items():
129 | patch = patch / patch[psf_size // 2, psf_size // 2] # normalize so the star brightness is always 1
130 | match_indices = _find_matches(coordinate, x_bounds, y_bounds, psf_size)
131 |
132 | for match_index in match_indices:
133 | match_corner = tuple(corners[match_index])
134 | stack[match_corner].append(patch)
135 | counts[match_corner] += 1
136 |
137 | averages = {(corner[0], corner[1]): percentile_method(stack[corner]) for corner in stack}
138 | return averages, counts
139 |
140 | def _average_patches(patches, corners, method='mean', percentile: float=None):
141 | psf_size = next(iter(patches.values())).shape[0]
142 | corners_x, corners_y = corners[:, 0], corners[:, 1]
143 | x_bounds = np.stack([corners_x, corners_x + psf_size], axis=-1)
144 | y_bounds = np.stack([corners_y, corners_y + psf_size], axis=-1)
145 |
146 | if method == 'mean':
147 | averages, counts = _average_patches_by_mean(patches, corners, x_bounds, y_bounds, psf_size)
148 | elif method == 'percentile':
149 | averages, counts = _average_patches_by_percentile(patches, corners, x_bounds, y_bounds, psf_size, percentile)
150 | elif method == "median":
151 | averages, counts = _average_patches_by_percentile(patches, corners, x_bounds, y_bounds, psf_size, 50)
152 | else:
153 | raise PSFBuilderError(f"Unknown method {method}.")
154 |
155 | return averages, counts
156 |
157 | class ArrayPSFBuilder:
158 | """A builder that will take a series of images and construct an ArrayPSF to represent their implicit PSF."""
159 |
160 | def __init__(self, psf_size: int) -> None:
161 | """Initialize an ArrayPSFBuilder."""
162 | self._psf_size = psf_size
163 |
164 | @property
165 | def psf_size(self):
166 | return self._psf_size
167 |
168 | def build(self,
169 | images: list[str] | list[pathlib.Path] | np.ndarray | Generator,
170 | star_masks: list[str] | list[pathlib.Path] | np.ndarray | Generator | None = None,
171 | hdu_choice: int | None = 0,
172 | interpolation_scale: int = 1,
173 | star_threshold: int = 3,
174 | average_method: str = 'median',
175 | percentile: float = 50) -> (ArrayPSF, dict):
176 | """Build the PSF model.
177 |
178 | Parameters
179 | ----------
180 | images : list[pathlib.Path] | np.ndarray | Generator
181 | images to use
182 |
183 | Returns
184 | -------
185 | (ArrayPSF, dict)
186 | an array PSF and the counts of stars in each component
187 |
188 | """
189 | data_iterator = _convert_to_generator(images, hdu_choice=hdu_choice)
190 |
191 | if star_masks is None:
192 | def generator() -> None:
193 | while True:
194 | yield None
195 | mask_iterator = generator()
196 | else:
197 | mask_iterator = _convert_to_generator(star_masks, hdu_choice=hdu_choice)
198 |
199 | # We'll store the first image's shape, and then make sure the others match.
200 | image_shape = None
201 | patches = {}
202 | for i, (image, star_mask) in enumerate(zip(data_iterator, mask_iterator, strict=False)):
203 | if image_shape is None:
204 | image_shape = image.shape
205 | elif image.shape != image_shape:
206 | msg = ("Images must all be the same shape."
207 | f"Found both {image_shape} and {image.shape}.")
208 | raise PSFBuilderError(msg)
209 |
210 | # if the image should be scaled then, do the scaling before anything else
211 | if interpolation_scale != 1:
212 | image = _scale_image(image, interpolation_scale=1)
213 |
214 | # find stars using SEP
215 | patches.update(_find_patches(image, star_threshold, star_mask, interpolation_scale, self.psf_size, i))
216 |
217 | corners = calculate_covering((image_shape[0] * interpolation_scale,
218 | image_shape[1] * interpolation_scale),
219 | self.psf_size * interpolation_scale)
220 | averaged_patches, counts = _average_patches(patches, corners,
221 | method=average_method, percentile=percentile)
222 |
223 | values_coords = []
224 | values_array = np.zeros((len(averaged_patches), self.psf_size, self.psf_size))
225 | for i, (coordinate, this_patch) in enumerate(averaged_patches.items()):
226 | if interpolation_scale != 1:
227 | this_patch = downscale_local_mean(this_patch,(interpolation_scale, interpolation_scale))
228 | values_coords.append(coordinate)
229 | values_array[i, :, :] = this_patch
230 |
231 | return ArrayPSF(IndexedCube(values_coords, values_array)), counts
232 |
--------------------------------------------------------------------------------
/regularizepsf/exceptions.py:
--------------------------------------------------------------------------------
1 | """Errors and warnings for regularizepsf."""
2 |
3 |
4 | class RegularizePSFError(Exception):
5 | """Base class for regularizepsf exceptions."""
6 |
7 |
8 | class InvalidCoordinateError(RegularizePSFError):
9 | """The key for this coordinate does not exist in the model."""
10 |
11 |
12 | class IncorrectShapeError(RegularizePSFError):
13 | """The shapes do not match for the model and the value."""
14 |
15 |
16 | class InvalidFunctionError(RegularizePSFError):
17 | """Function for functional model has invalid parameters."""
18 |
19 |
20 | class FunctionParameterMismatchError(RegularizePSFError):
21 | """Function evaluated with nonexistent kwargs."""
22 |
23 | class PSFBuilderError(RegularizePSFError):
24 | """Something went wrong building the PSF model."""
25 |
--------------------------------------------------------------------------------
/regularizepsf/psf.py:
--------------------------------------------------------------------------------
1 | """Representations of point spread functions."""
2 |
3 | from __future__ import annotations
4 |
5 | import inspect
6 | import pathlib
7 | from typing import TYPE_CHECKING, Any, cast
8 | from functools import partial
9 |
10 | import h5py
11 | import matplotlib as mpl
12 | import numpy as np
13 | import scipy.fft
14 | from astropy.io import fits
15 |
16 | from regularizepsf.exceptions import IncorrectShapeError, InvalidCoordinateError, InvalidFunctionError
17 | from regularizepsf.util import IndexedCube
18 | from regularizepsf.visualize import KERNEL_IMSHOW_ARGS_DEFAULT, PSF_IMSHOW_ARGS_DEFAULT, visualize_grid
19 |
20 | if TYPE_CHECKING:
21 | from numbers import Real
22 | from collections.abc import Callable
23 |
24 |
25 | class SimpleFunctionalPSF:
26 | """Model for a simple PSF."""
27 |
28 | def __init__(self, function: Callable) -> None:
29 | """Create a PSF object.
30 |
31 | Parameters
32 | ----------
33 | function
34 | Python function representing the PSF,
35 | first two parameters must be x and y and must return an numpy array
36 |
37 | """
38 | self._f: Callable = function
39 | self._signature: inspect.Signature = inspect.signature(function)
40 | self._parameters: set[str] = set()
41 |
42 | if len(self._signature.parameters) < 2: # noqa: PLR2004
43 | msg = "row and col must be the first two arguments in your model equation."
44 | raise InvalidFunctionError(msg)
45 |
46 | for i, variable in enumerate(self._signature.parameters):
47 | if i == 0 and variable != "row":
48 | msg = "row must be the first arguments in your model equation."
49 | raise InvalidFunctionError(msg)
50 | if i == 1 and variable != "col":
51 | msg = "col must be the second arguments in your model equation"
52 | raise InvalidFunctionError(msg)
53 | if i >= 2: # noqa: PLR2004
54 | self._parameters.add(variable)
55 |
56 | def __call__(self, row: Real | np.ndarray, col: Real | np.ndarray, **kwargs: dict[str, Any]) -> Real | np.ndarray:
57 | """Get the PSF value at (row, col)."""
58 | return self._f(row, col, **kwargs)
59 |
60 | @property
61 | def parameters(self) -> set[str]:
62 | """Get the parameters of this PSF."""
63 | return self._parameters
64 |
65 | def as_array_psf(self, coordinates: list[tuple[int, int]], size: int, **kwargs) -> ArrayPSF: # noqa: ANN003
66 | """Convert FunctionalPSF to an ArrayPSF."""
67 | rr, cc = np.meshgrid(np.arange(size), np.arange(size))
68 | evaluation = self(rr, cc, **kwargs)
69 | values = [evaluation for _ in coordinates]
70 | return ArrayPSF(IndexedCube(coordinates, np.stack(values)))
71 |
72 | @property
73 | def f(self) -> Callable:
74 | """Retrieve the PSF functional form for calling."""
75 | return self._f
76 |
77 |
78 | def simple_functional_psf(arg: Any = None) -> SimpleFunctionalPSF:
79 | """Decorate a SimpleFunctionalPSF."""
80 | if callable(arg):
81 | return SimpleFunctionalPSF(arg)
82 | msg = "psf decorator must have no arguments."
83 | raise TypeError(msg)
84 |
85 |
86 | class VariedFunctionalPSF:
87 | """Model for a PSF that varies over the field of view."""
88 |
89 | def __init__(self, vary_function: Callable, base_psf: SimpleFunctionalPSF, validate_at_call: bool = True) -> None:
90 | """Create a VariedFunctionalPSF object.
91 |
92 | Parameters
93 | ----------
94 | vary_function : Callable
95 | function used to vary the parameters of the base_psf
96 | base_psf : Callable
97 | base form of the PSF
98 | validate_at_call : bool
99 | whether to check if parameters are valid at each call, turning off may be faster but is risky
100 |
101 | """
102 | self._vary_function = vary_function
103 | self._base_psf = base_psf
104 | self.validate_at_call = validate_at_call
105 |
106 | self.parameterization_signature = inspect.signature(vary_function)
107 | if len(self.parameterization_signature.parameters) < 2: # noqa: PLR2004
108 | msg = f"Found {len(self.parameterization_signature.parameters)}"
109 | raise InvalidFunctionError(msg)
110 |
111 | if len(self.parameterization_signature.parameters) > 2: # noqa: PLR2004
112 | msg = (
113 | "Found function requiring"
114 | f"{len(self.parameterization_signature.parameters)}"
115 | "arguments. Expected 2, only `row` and `col`."
116 | )
117 | raise InvalidFunctionError(msg)
118 |
119 | for i, variable in enumerate(self.parameterization_signature.parameters):
120 | if i == 0 and variable != "row":
121 | msg = "row must be the first argument in your parameterization equation."
122 | raise InvalidFunctionError(msg)
123 | if i == 1 and variable != "col":
124 | msg = "col must be the second argument in your parameterization equation"
125 | raise InvalidFunctionError(msg)
126 |
127 | # check the parameters at the origin
128 | origin_evaluation: dict[str, Any] = vary_function(0, 0)
129 | self._origin_parameters: set[str] = set(origin_evaluation.keys())
130 | if self._base_psf.parameters != self._origin_parameters:
131 | msg = (
132 | f"The base PSF model has parameters {self._base_psf.parameters} "
133 | f"while the varied psf supplies {self._origin_parameters}"
134 | "at the origin. These must match."
135 | )
136 | raise InvalidFunctionError(msg)
137 |
138 | def __call__(self, row: Real | np.ndarray, col: Real | np.ndarray) -> Real | np.ndarray:
139 | """Get the PSF value at (row, col)."""
140 | variance = self._vary_function(row, col)
141 | if self.validate_at_call and set(variance.keys()) != self.parameters:
142 | msg = (
143 | f"At (row, col) the varying parameters were {set(variance.keys())}"
144 | f" when the parameters were expected as {self.parameters}."
145 | )
146 | raise InvalidFunctionError(msg)
147 | return self._base_psf(row, col, **variance)
148 |
149 | @property
150 | def parameters(self) -> set[str]:
151 | """Get the parameters of this PSF."""
152 | return self._base_psf.parameters
153 |
154 | def simplify(self, row: int, col: int) -> SimpleFunctionalPSF:
155 | """Simplify this VariedFunctionalPSF to a SimpleFunctionalPSF by evaluating at (row, col)."""
156 | variance = self._vary_function(row, col)
157 | return simple_functional_psf(partial(self._base_psf.f, **variance))
158 |
159 | def as_array_psf(self, coordinates: list[tuple[int, int]], size: int, **kwargs) -> ArrayPSF: # noqa: ANN003
160 | """Convert FunctionalPSF to an ArrayPSF."""
161 | values = []
162 | rr, cc = np.meshgrid(np.arange(size), np.arange(size))
163 | for row, col in coordinates:
164 | values.append(self.simplify(row, col)(rr, cc, **kwargs))
165 | return ArrayPSF(IndexedCube(coordinates, np.stack(values)))
166 |
167 |
168 | def _varied_functional_psf(base_psf: SimpleFunctionalPSF) -> VariedFunctionalPSF:
169 | if base_psf is None:
170 | msg = "A base_psf must be provided to the varied_psf decorator."
171 | raise TypeError(msg)
172 |
173 | def inner(__fn: Callable = None, *, check_at_call: bool = True) -> Callable: # noqa: RUF013
174 | if __fn:
175 | return VariedFunctionalPSF(__fn, base_psf, validate_at_call=check_at_call)
176 | return partial(inner, check_at_call=check_at_call)
177 |
178 | return inner
179 |
180 |
181 | def varied_functional_psf(base_psf: SimpleFunctionalPSF = None) -> VariedFunctionalPSF:
182 | """Decorate to create a VariedFunctionalPSF."""
183 | if isinstance(base_psf, SimpleFunctionalPSF):
184 | return cast(VariedFunctionalPSF, _varied_functional_psf(base_psf))
185 | if callable(base_psf):
186 | msg = "varied_psf decorator must be calledwith an argument for the base_psf."
187 | raise TypeError(msg)
188 | msg = "varied_psf decorator expects exactlyone argument of type PSF."
189 | raise TypeError(msg)
190 |
191 |
192 | class ArrayPSF:
193 | """A PSF represented as a set of arrays."""
194 |
195 | def __init__(
196 | self, values_cube: IndexedCube, fft_cube: IndexedCube | None = None, workers: int | None = None,
197 | ) -> None:
198 | """Initialize an ArrayPSF model.
199 |
200 | Parameters
201 | ----------
202 | values_cube : IndexedCube
203 | PSF model where keys are upper left coordinates of array patches in the image
204 | fft_cube : IndexedCube
205 | fft of the model
206 | workers: int | None
207 | Maximum number of workers to use for parallel computation of FFT.
208 | If negative, the value wraps around from os.cpu_count(). See scipy.fft.fft for more details.
209 | Only used if fft_cube is None.
210 |
211 | """
212 | self._values_cube = values_cube
213 | self._fft_cube = fft_cube
214 | self._workers = workers
215 |
216 | if self._fft_cube is None:
217 | self._fft_cube = IndexedCube(
218 | values_cube.coordinates, scipy.fft.fft2(values_cube.values, workers=self._workers),
219 | )
220 |
221 | if self._fft_cube.sample_shape != self._values_cube.sample_shape:
222 | msg = (
223 | f"Values cube and FFT cube have different sample shapes: "
224 | f"{self._values_cube.sample_shape} != {self._fft_cube.sample_shape}."
225 | )
226 | raise IncorrectShapeError(msg)
227 |
228 | if len(self._fft_cube) != len(self._values_cube):
229 | msg = (
230 | f"Values cube and FFT cube have different sample counts: "
231 | f"{len(self._values_cube)} != {len(self._fft_cube)}."
232 | )
233 | raise IncorrectShapeError(msg)
234 |
235 | if np.any(np.array(self._values_cube.coordinates) != np.array(self._fft_cube.coordinates)):
236 | msg = "Values cube and FFT cube have different coordinates"
237 | raise InvalidCoordinateError(msg)
238 |
239 | @property
240 | def coordinates(self) -> list[tuple[int, int]]:
241 | """Get the keys of the PSF model, i.e., where it is evaluated as an array."""
242 | return self._values_cube.coordinates
243 |
244 | @property
245 | def values(self) -> np.ndarray:
246 | """Get the model values."""
247 | return self._values_cube.values
248 |
249 | @property
250 | def fft_evaluations(self) -> np.ndarray:
251 | """Get the model values."""
252 | return self._fft_cube.values
253 |
254 | def __getitem__(self, coord: tuple[int, int]) -> np.ndarray:
255 | """Evaluate the PSF model at specific coordinates."""
256 | return self._values_cube[coord]
257 |
258 | def fft_at(self, coord: tuple[int, int]) -> np.ndarray:
259 | """Retrieve the FFT evaluation at a coordinate."""
260 | return self._fft_cube[coord]
261 |
262 | def save(self, path: pathlib.Path) -> None:
263 | """Save the PSF model to a file. Supports h5 and FITS.
264 |
265 | Parameters
266 | ----------
267 | path : pathlib.Path
268 | where to save the PSF model
269 |
270 | Returns
271 | -------
272 | None
273 |
274 | """
275 | path = pathlib.Path(path)
276 | if path.suffix == ".h5":
277 | with h5py.File(path, "w") as f:
278 | f.create_dataset("coordinates", data=self.coordinates)
279 | f.create_dataset("values", data=self.values)
280 | f.create_dataset("fft_evaluations", data=self.fft_evaluations)
281 | elif path.suffix == ".fits":
282 | fits.HDUList([fits.PrimaryHDU(),
283 | fits.CompImageHDU(np.array(self.coordinates), name="coordinates"),
284 | fits.CompImageHDU(self.values, name="values"),
285 | fits.CompImageHDU(self.fft_evaluations.real, name="fft_real", quantize_level=32),
286 | fits.CompImageHDU(self.fft_evaluations.imag, name="fft_imag", quantize_level=32),
287 | ]).writeto(path)
288 | else:
289 | raise NotImplementedError(f"Unsupported file type {path.suffix}. Change to .h5 or .fits.")
290 |
291 | @classmethod
292 | def load(cls, path: pathlib.Path) -> ArrayPSF:
293 | """Load the PSF model from a file. Supports h5 and FITS.
294 |
295 | Parameters
296 | ----------
297 | path : pathlib.Path
298 | where to load the PSF model from
299 |
300 | Returns
301 | -------
302 | ArrayPSF
303 | loaded model
304 |
305 | """
306 | path = pathlib.Path(path)
307 | if path.suffix == ".h5":
308 | with h5py.File(path, "r") as f:
309 | coordinates = [tuple(c) for c in f["coordinates"][:]]
310 | values = f["values"][:]
311 | fft_evaluations = f["fft_evaluations"][:]
312 | values_cube = IndexedCube(coordinates, values)
313 | fft_cube = IndexedCube(coordinates, fft_evaluations)
314 | elif path.suffix == ".fits":
315 | with fits.open(path) as hdul:
316 | coordinates_index = hdul.index_of("coordinates")
317 | coordinates = [tuple(c) for c in hdul[coordinates_index].data]
318 |
319 | values_index = hdul.index_of("values")
320 | values = hdul[values_index].data
321 | values_cube = IndexedCube(coordinates, values)
322 |
323 | fft_real_index = hdul.index_of("fft_real")
324 | fft_real = hdul[fft_real_index].data
325 | fft_imag_index = hdul.index_of("fft_imag")
326 | fft_imag = hdul[fft_imag_index].data
327 | fft_cube = IndexedCube(coordinates, fft_real + fft_imag*1j)
328 | else:
329 | raise NotImplementedError(f"Unsupported file type {path.suffix}. Change to .h5 or .fits.")
330 | return cls(values_cube, fft_cube)
331 |
332 | def visualize_psfs(self,
333 | fig: mpl.figure.Figure | None = None,
334 | fig_scale: int = 1,
335 | all_patches: bool = False, imshow_args: dict | None = None) -> None: # noqa: ANN002, ANN003
336 | """Visualize the PSF model."""
337 | imshow_args = PSF_IMSHOW_ARGS_DEFAULT if imshow_args is None else imshow_args
338 | visualize_grid(self._values_cube, fig=fig, fig_scale=fig_scale, all_patches=all_patches,
339 | colorbar_label="Normalized brightness",
340 | imshow_args=imshow_args)
341 |
342 | def visualize_ffts(self,
343 | fig: mpl.figure.Figure | None = None,
344 | fig_scale: int = 1,
345 | all_patches: bool = False, imshow_args: dict | None = None) -> None: # noqa: ANN002, ANN003
346 | """Visualize the fft of the PSF."""
347 | imshow_args = KERNEL_IMSHOW_ARGS_DEFAULT if imshow_args is None else imshow_args
348 |
349 | arr = np.abs(np.fft.fftshift(np.fft.ifft2(self._fft_cube.values)))
350 | extent = np.max(np.abs(arr))
351 | if 'vmin' not in imshow_args:
352 | imshow_args['vmin'] = -extent
353 | if 'vmax' not in imshow_args:
354 | imshow_args['vmax'] = extent
355 |
356 | return visualize_grid(
357 | IndexedCube(self._fft_cube.coordinates, arr),
358 | all_patches=all_patches, fig=fig,
359 | fig_scale=fig_scale, colorbar_label="Transfer kernel amplitude",
360 | imshow_args=imshow_args)
361 |
362 | def __eq__(self, other: ArrayPSF) -> bool:
363 | """Check equality between two ArrayPSFs."""
364 | if not isinstance(other, ArrayPSF):
365 | msg = "Can only compare ArrayPSF to other ArrayPSF."
366 | raise TypeError(msg)
367 | return self._values_cube == other._values_cube and self._fft_cube == other._fft_cube
368 |
369 | @property
370 | def sample_shape(self) -> tuple[int, int]:
371 | """Get the sample shape for this PSF model."""
372 | return self._values_cube.sample_shape
373 |
374 | def __len__(self) -> int:
375 | """Get the number of coordinates evaluated in this model."""
376 | return len(self._values_cube)
377 |
--------------------------------------------------------------------------------
/regularizepsf/transform.py:
--------------------------------------------------------------------------------
1 | """Tools to transform from one PSF to another."""
2 |
3 | from __future__ import annotations
4 |
5 | import pathlib
6 | from typing import TYPE_CHECKING
7 |
8 | import h5py
9 | import matplotlib as mpl
10 | import numpy as np
11 | import scipy
12 | from astropy.io import fits
13 |
14 | from regularizepsf.exceptions import InvalidCoordinateError
15 | from regularizepsf.util import IndexedCube
16 | from regularizepsf.visualize import KERNEL_IMSHOW_ARGS_DEFAULT, visualize_grid
17 |
18 | if TYPE_CHECKING:
19 |
20 | from regularizepsf.psf import ArrayPSF
21 |
22 |
23 | class ArrayPSFTransform:
24 | """Representation of a transformation from a source to a target PSF that can be applied to images."""
25 |
26 | def __init__(self, transfer_kernel: IndexedCube) -> None:
27 | """Initialize a PSFTransform.
28 |
29 | Parameters
30 | ----------
31 | transfer_kernel: TransferKernel
32 | the transfer kernel required by this ArrayPSFTransform
33 |
34 | """
35 | self._transfer_kernel = transfer_kernel
36 |
37 | @property
38 | def psf_shape(self) -> tuple[int, int]:
39 | """Retrieve the shape of the individual PSFs for this transform."""
40 | return self._transfer_kernel.sample_shape
41 |
42 | @property
43 | def coordinates(self) -> list[tuple[int, int]]:
44 | """Retrieve the coordinates of the individual PSFs for this transform."""
45 | return self._transfer_kernel.coordinates
46 |
47 | def __len__(self) -> int:
48 | """Retrieve the number of coordinates used to represent this transform."""
49 | return len(self._transfer_kernel)
50 |
51 | @classmethod
52 | def construct(cls, source: ArrayPSF, target: ArrayPSF, alpha: float, epsilon: float) -> ArrayPSFTransform:
53 | """Construct an ArrayPSFTransform from a source to a target PSF.
54 |
55 | Parameters
56 | ----------
57 | source : ArrayPSF
58 | source point spread function
59 | target : ArrayPSF
60 | target point spread function
61 | alpha : float
62 | controls the “hardness” of the transition from amplification to attenuation
63 | epsilon : float
64 | controls the maximum of the amplification
65 |
66 | Returns
67 | -------
68 | ArrayPSFTransform
69 | corresponding ArrayPSFTransform instance
70 |
71 | """
72 | if np.any(np.array(source.coordinates) != np.array(target.coordinates)):
73 | msg = "Source PSF coordinates do not match target PSF coordinates."
74 | raise InvalidCoordinateError(msg)
75 |
76 | source_abs = abs(source.fft_evaluations)
77 | target_abs = abs(target.fft_evaluations)
78 | numerator = source.fft_evaluations.conjugate() * source_abs ** (alpha - 1)
79 | denominator = source_abs ** (alpha + 1) + (epsilon * target_abs) ** (alpha + 1)
80 | cube = IndexedCube(source.coordinates, (numerator / denominator) * target.fft_evaluations)
81 | return ArrayPSFTransform(cube)
82 |
83 | def apply(self, image: np.ndarray, workers: int | None = None, pad_mode: str = "symmetric") -> np.ndarray:
84 | """Apply the PSFTransform to an image.
85 |
86 | Parameters
87 | ----------
88 | image : np.ndarray
89 | image to apply the transform to
90 | workers: int | None
91 | Maximum number of workers to use for parallel computation of FFT.
92 | If negative, the value wraps around from os.cpu_count(). See scipy.fft.fft for more details.
93 | pad_mode: str
94 | how to pad the image when computing ffts, see np.pad for more details.
95 |
96 | Returns
97 | -------
98 | np.ndarray
99 | image with psf transformed
100 |
101 | """
102 | padded_image = np.pad(
103 | image,
104 | ((2 * self.psf_shape[0], 2 * self.psf_shape[0]), (2 * self.psf_shape[1], 2 * self.psf_shape[1])),
105 | mode=pad_mode,
106 | )
107 |
108 | def slice_padded_image(coordinate: tuple[int, int]) -> tuple[slice, slice]:
109 | """Get the slice objects for a coordinate patch in the padded cube."""
110 | row_slice = slice(
111 | coordinate[0] + self.psf_shape[0] * 2, coordinate[0] + self.psf_shape[0] + self.psf_shape[0] * 2,
112 | )
113 | col_slice = slice(
114 | coordinate[1] + self.psf_shape[1] * 2, coordinate[1] + self.psf_shape[1] + self.psf_shape[1] * 2,
115 | )
116 | return row_slice, col_slice
117 |
118 | row_arr, col_arr = np.meshgrid(np.arange(self.psf_shape[0]), np.arange(self.psf_shape[1]))
119 | apodization_window = np.sin((row_arr + 0.5) * (np.pi / self.psf_shape[0])) * np.sin(
120 | (col_arr + 0.5) * (np.pi / self.psf_shape[1]),
121 | )
122 | apodization_window = np.broadcast_to(apodization_window, (len(self), self.psf_shape[0], self.psf_shape[1]))
123 |
124 | patches = np.stack(
125 | [
126 | padded_image[slice_padded_image(coordinate)[0], slice_padded_image(coordinate)[1]]
127 | for coordinate in self.coordinates
128 | ],
129 | )
130 | patches = scipy.fft.fft2(apodization_window * patches, workers=workers)
131 | patches = np.real(scipy.fft.ifft2(patches * self._transfer_kernel.values, workers=workers))
132 | patches = patches * apodization_window
133 |
134 | reconstructed_image = np.zeros_like(padded_image)
135 | for coordinate, patch in zip(self.coordinates, patches, strict=True):
136 | reconstructed_image[slice_padded_image(coordinate)[0], slice_padded_image(coordinate)[1]] += patch
137 |
138 | return reconstructed_image[
139 | 2 * self.psf_shape[0] : image.shape[0] + 2 * self.psf_shape[0],
140 | 2 * self.psf_shape[1] : image.shape[1] + 2 * self.psf_shape[1],
141 | ]
142 |
143 | def visualize(self,
144 | fig: mpl.figure.Figure | None = None,
145 | fig_scale: int = 1,
146 | all_patches: bool = False, imshow_args: dict | None = None) -> None: # noqa: ANN002, ANN003
147 | """Visualize the transfer kernels."""
148 | imshow_args = KERNEL_IMSHOW_ARGS_DEFAULT if imshow_args is None else imshow_args
149 |
150 | arr = np.abs(np.fft.fftshift(np.fft.ifft2(self._transfer_kernel.values)))
151 | extent = np.max(np.abs(arr))
152 | if 'vmin' not in imshow_args:
153 | imshow_args['vmin'] = -extent
154 | if 'vmax' not in imshow_args:
155 | imshow_args['vmax'] = extent
156 |
157 | return visualize_grid(
158 | IndexedCube(self._transfer_kernel.coordinates, arr),
159 | all_patches=all_patches, fig=fig,
160 | fig_scale=fig_scale, colorbar_label="Transfer kernel amplitude",
161 | imshow_args=imshow_args)
162 |
163 | def save(self, path: pathlib.Path) -> None:
164 | """Save a PSFTransform to a file. Supports h5 and FITS.
165 |
166 | Parameters
167 | ----------
168 | path : pathlib.Path
169 | where to save the PSFTransform
170 |
171 | Returns
172 | -------
173 | None
174 |
175 | """
176 | path = pathlib.Path(path)
177 | if path.suffix == ".h5":
178 | with h5py.File(path, "w") as f:
179 | f.create_dataset("coordinates", data=self.coordinates)
180 | f.create_dataset("transfer_kernel", data=self._transfer_kernel.values)
181 | elif path.suffix == ".fits":
182 | fits.HDUList([fits.PrimaryHDU(),
183 | fits.CompImageHDU(np.array(self.coordinates), name="coordinates"),
184 | fits.CompImageHDU(self._transfer_kernel.values.real,
185 | name="transfer_real", quantize_level=32),
186 | fits.CompImageHDU(self._transfer_kernel.values.imag,
187 | name="transfer_imag", quantize_level=32)]).writeto(path)
188 | else:
189 | raise NotImplementedError(f"Unsupported file type {path.suffix}. Change to .h5 or .fits.")
190 |
191 | @classmethod
192 | def load(cls, path: pathlib.Path) -> ArrayPSFTransform:
193 | """Load a PSFTransform object. Supports h5 and FITS.
194 |
195 | Parameters
196 | ----------
197 | path : pathlib.Path
198 | file to load the PSFTransform from
199 |
200 | Returns
201 | -------
202 | PSFTransform
203 |
204 | """
205 | path = pathlib.Path(path)
206 | if path.suffix == ".h5":
207 | with h5py.File(path, "r") as f:
208 | coordinates = [tuple(c) for c in f["coordinates"][:]]
209 | transfer_kernel = f["transfer_kernel"][:]
210 | kernel = IndexedCube(coordinates, transfer_kernel)
211 | elif path.suffix == ".fits":
212 | with fits.open(path) as hdul:
213 | coordinates_index = hdul.index_of("coordinates")
214 | coordinates = [tuple(c) for c in hdul[coordinates_index].data]
215 | transfer_real_index = hdul.index_of("transfer_real")
216 | transfer_real = hdul[transfer_real_index].data
217 | transfer_imag_index = hdul.index_of("transfer_imag")
218 | transfer_imag = hdul[transfer_imag_index].data
219 | kernel = IndexedCube(coordinates, transfer_real + transfer_imag*1j)
220 | else:
221 | raise NotImplementedError(f"Unsupported file type {path.suffix}. Change to .h5 or .fits.")
222 | return cls(kernel)
223 |
224 | def __eq__(self, other: ArrayPSFTransform) -> bool:
225 | """Test equality between two transforms."""
226 | if not isinstance(other, ArrayPSFTransform):
227 | msg = "Can only compare ArrayPSFTransform to another ArrayPSFTransform."
228 | raise TypeError(msg)
229 | return self._transfer_kernel == other._transfer_kernel
230 |
--------------------------------------------------------------------------------
/regularizepsf/util.py:
--------------------------------------------------------------------------------
1 | """Utility functions for regularizepsf."""
2 |
3 | from __future__ import annotations
4 |
5 | import numpy as np
6 |
7 | from regularizepsf.exceptions import IncorrectShapeError, InvalidCoordinateError
8 |
9 |
10 | def calculate_covering(image_shape: tuple[int, int], size: int) -> np.ndarray:
11 | """Determine the grid of overlapping neighborhood patches.
12 |
13 | Parameters
14 | ----------
15 | image_shape : tuple of 2 ints
16 | shape of the image we plan to correct
17 | size : int
18 | size of the square patches we want to create
19 |
20 | Returns
21 | -------
22 | np.ndarray
23 | an array of shape Nx2 where return[:, 0]
24 | are the x coordinate and return[:, 1] are the y coordinates
25 |
26 | """
27 | half_size = np.ceil(size / 2).astype(int)
28 |
29 | x1 = np.arange(0, image_shape[0], size)
30 | y1 = np.arange(0, image_shape[1], size)
31 |
32 | x2 = np.arange(-half_size, image_shape[0], size)
33 | y2 = np.arange(-half_size, image_shape[1], size)
34 |
35 | x3 = np.arange(-half_size, image_shape[0], size)
36 | y3 = np.arange(0, image_shape[1], size)
37 |
38 | x4 = np.arange(0, image_shape[0], size)
39 | y4 = np.arange(-half_size, image_shape[1], size)
40 |
41 | x1, y1 = np.meshgrid(x1, y1)
42 | x2, y2 = np.meshgrid(x2, y2)
43 | x3, y3 = np.meshgrid(x3, y3)
44 | x4, y4 = np.meshgrid(x4, y4)
45 |
46 | x1, y1 = x1.flatten(), y1.flatten()
47 | x2, y2 = x2.flatten(), y2.flatten()
48 | x3, y3 = x3.flatten(), y3.flatten()
49 | x4, y4 = x4.flatten(), y4.flatten()
50 |
51 | x = np.concatenate([x1, x2, x3, x4])
52 | y = np.concatenate([y1, y2, y3, y4])
53 | return np.stack([x, y], -1)
54 |
55 |
56 | class IndexedCube:
57 | """A stack of arrays with assigned coordinates as keys."""
58 |
59 | def __init__(self, coordinates: list[tuple[int, int]], values: np.ndarray) -> None:
60 | """Initialize an IndexedCube.
61 |
62 | Parameters
63 | ----------
64 | coordinates : list[tuple[int, int]]
65 | list of image coordinates for upper left corner of the cube patches represented.
66 | values: np.ndarray
67 | an array of image cube patches, should be size (len(coordinates), x, y)
68 | where x and y are the dimensions of the patches
69 |
70 | """
71 | if len(values.shape) != 3: # noqa: PLR2004
72 | msg = "Values must be three dimensional"
73 | raise IncorrectShapeError(msg)
74 |
75 | if len(coordinates) != values.shape[0]:
76 | msg = f"{len(coordinates)} coordinates defined but {values.shape[0]} values found."
77 | raise IncorrectShapeError(msg)
78 |
79 | self._coordinates = coordinates
80 | self._values = values
81 |
82 | self._index = {tuple(coordinate): i for i, coordinate in enumerate(self._coordinates)}
83 |
84 | @property
85 | def sample_shape(self) -> tuple[int, int]:
86 | """Shape of individual sample."""
87 | return self._values.shape[1], self._values.shape[2]
88 |
89 | def __getitem__(self, coordinate: tuple[int, int]) -> np.ndarray:
90 | """Get the sample associated with that coordinate.
91 |
92 | Parameters
93 | ----------
94 | coordinate: tuple[int, int]
95 | reference coordinate for requested array
96 |
97 | Returns
98 | -------
99 | np.ndarray
100 | sample at that coordinate
101 |
102 | """
103 | if coordinate not in self._index:
104 | msg = f"Coordinate {coordinate} not in TransferKernel."
105 | raise InvalidCoordinateError(msg)
106 | return self._values[self._index[coordinate]]
107 |
108 | def __setitem__(self, coordinate: tuple[int, int], value: np.ndarray) -> None:
109 | """Set the array associated with that coordinate.
110 |
111 | Parameters
112 | ----------
113 | coordinate: tuple[int, int]
114 | reference coordinate for sample
115 |
116 | value: np.ndarray
117 | value at the sample
118 |
119 | Returns
120 | -------
121 | np.ndarray
122 | sample array
123 |
124 | """
125 | if coordinate not in self._index:
126 | msg = f"Coordinate {coordinate} not in TransferKernel."
127 | raise InvalidCoordinateError(msg)
128 |
129 | if value.shape != self.sample_shape:
130 | msg = f"Cannot assign value of shape {value.shape} to transfer kernel of shape {self.sample_shape}."
131 | raise IncorrectShapeError(msg)
132 |
133 | self._values[self._index[coordinate]] = value
134 |
135 | @property
136 | def coordinates(self) -> list[tuple[int, int]]:
137 | """Retrieve coordinates the transfer kernel is defined on.
138 |
139 | Returns
140 | -------
141 | list[tuple[int, int]]
142 | coordinates the transfer kernel is defined on.
143 |
144 | """
145 | return self._coordinates
146 |
147 | @property
148 | def values(self) -> np.ndarray:
149 | """Retrieve values of the cube."""
150 | return self._values
151 |
152 | def __len__(self) -> int:
153 | """Return number of sample cube is indexed on.
154 |
155 | Returns
156 | -------
157 | int
158 | number of sample cube is indexed on.
159 |
160 | """
161 | return len(self.coordinates)
162 |
163 | def __eq__(self, other: IndexedCube) -> bool:
164 | """Test equality between two IndexedCubes."""
165 | if not isinstance(other, IndexedCube):
166 | msg = "Can only compare IndexedCube instances."
167 | raise TypeError(msg)
168 | return (
169 | self.coordinates == other.coordinates
170 | and self.sample_shape == other.sample_shape
171 | and np.allclose(self.values, other.values, rtol=1e-04, atol=1e-06)
172 | )
173 |
--------------------------------------------------------------------------------
/regularizepsf/visualize.py:
--------------------------------------------------------------------------------
1 | """Visualization tools for PSFs."""
2 | import itertools
3 |
4 | import matplotlib as mpl
5 | import matplotlib.colors
6 | import matplotlib.pyplot as plt
7 | import numpy as np
8 |
9 | from regularizepsf.util import IndexedCube
10 |
11 |
12 | def _generate_colormap() -> matplotlib.colors.ListedColormap:
13 | a = np.linspace(0, 1, 1000)
14 | r = np.sqrt(a)
15 | g = a
16 | b = np.square(a)
17 | colors = np.stack([r, g, b], axis=-1)
18 | return mpl.colors.ListedColormap(colors)
19 |
20 |
21 | DEFAULT_COLORMAP = _generate_colormap()
22 | PSF_IMSHOW_ARGS_DEFAULT = {
23 | "origin": "lower",
24 | "cmap": DEFAULT_COLORMAP,
25 | "norm": mpl.colors.PowerNorm(gamma=1 / 2.2, vmin=None, vmax=None),
26 | }
27 | KERNEL_IMSHOW_ARGS_DEFAULT = {
28 | "norm": None,
29 | "cmap": "bwr"
30 | }
31 |
32 |
33 | def visualize_patch_counts(counts: dict[tuple[int, int], int],
34 | ax: mpl.axes.Axes | None = None,
35 | label_pixel_bounds: bool = False) -> mpl.axes.Axes:
36 | """Visualize the number of stars identified within each patch.
37 |
38 | Parameters
39 | ----------
40 | counts : dict[tuple[int, int], int]
41 | The counts returned by an ArrayPSFBuilder.build
42 | ax : matplotlib.axes.Axes
43 | An Axes object on which to plot. If not provided, a new Figure will be
44 | generated.
45 | label_pixel_bounds : bool
46 | If True, the axes of the plot will be labeled with the pixel range
47 | spanned by each patch.
48 |
49 | """
50 | if ax is None:
51 | fig = plt.figure()
52 | ax = fig.subplots()
53 |
54 | rows = [k[0] for k in counts.keys()]
55 | columns = [k[1] for k in counts.keys()]
56 | rows = np.unique(sorted(rows))
57 | columns = np.unique(sorted(columns))
58 | dr = rows[1] - rows[0]
59 | dc = columns[1] - columns[0]
60 |
61 | # Build an array containing all the patch counts
62 | counts_arr = np.empty((len(rows), len(columns)))
63 | for k, count in counts.items():
64 | r, c = k[0], k[1]
65 | r = int((r - rows.min()) / dr)
66 | c = int((c - columns.min()) / dc)
67 | counts_arr[r, c] = count
68 |
69 | m = ax.imshow(counts_arr, origin="lower")
70 | plt.colorbar(m).set_label("Number of stars found in patch")
71 |
72 | if label_pixel_bounds:
73 | xticks = [xt for xt in plt.xticks()[0] if 0 <= xt < len(columns)]
74 | plt.xticks(
75 | xticks,
76 | [f"{int(columns.min() + dc * i)}"
77 | f" to\n{int(columns.min() + dc * (i+2))} px"
78 | for i in xticks])
79 | yticks = [yt for yt in plt.yticks()[0] if 0 <= yt < len(rows)]
80 | plt.yticks(
81 | yticks,
82 | [f"{int(rows.min() + dr * i)}"
83 | f" to\n{int(rows.min() + dr * (i+2))} px"
84 | for i in yticks])
85 | ax.set_xlabel("Patch bounds (px)")
86 | ax.set_ylabel("Patch bounds (px)")
87 | else:
88 | ax.set_xlabel("Patch number")
89 | ax.set_ylabel("Patch number")
90 | return ax
91 |
92 | def visualize_grid(data: IndexedCube,
93 | second_data: IndexedCube | None = None,
94 | title: str | tuple[str, str] = "",
95 | fig: mpl.figure.Figure | None = None,
96 | fig_scale: int = 1,
97 | all_patches: bool = False,
98 | imshow_args: dict | None = None,
99 | colorbar_label: str = "") -> None: # noqa: ANN002, ANN003
100 | """Visualize the PSF model."""
101 | # Identify which patches we'll be plotting
102 | rows = np.unique(sorted(r for r, c in data.coordinates))
103 | columns = np.unique(sorted(c for r, c in data.coordinates))
104 | if not all_patches:
105 | rows = rows[1::2]
106 | columns = columns[1::2]
107 |
108 | # Work out the size of the image
109 | # Each grid of patches will be 6 inches wide
110 | patches_width = 6
111 | # Determine an image height based on the number of rows of patches
112 | patches_height = patches_width * len(rows) / len(columns)
113 | # Add space for the colorbar
114 | total_width = patches_width + .3
115 | # To make sure we have a little padding between the patches and the
116 | # colorbar, we'll add an extra, empty column
117 | n_columns = len(columns) + 2
118 | width_ratios = [patches_width / len(columns)] * len(columns) + [.1, .2]
119 |
120 | if second_data is not None:
121 | # Add space for a second grid of patches (including a padding column)
122 | total_width += patches_width + .2
123 | n_columns += len(columns) + 1
124 | width_ratios = (
125 | [patches_width / len(columns)] * len(columns)
126 | + [.2] + width_ratios)
127 |
128 | if fig is None:
129 | fig = plt.figure(
130 | figsize=(total_width * fig_scale, patches_height * fig_scale))
131 |
132 | gs = mpl.gridspec.GridSpec(
133 | len(rows), n_columns, figure=fig,
134 | wspace=0, hspace=0,
135 | width_ratios=width_ratios)
136 |
137 | for i, j in itertools.product(range(len(rows)), range(len(columns))):
138 | ax = fig.add_subplot(gs[len(rows) - 1 - i, j])
139 | im = ax.imshow(data[rows[i], columns[j]], **imshow_args)
140 | # Ensure there's a thin line between subplots
141 | ax.spines[:].set_color("white")
142 | ax.set_xticks([])
143 | ax.set_yticks([])
144 |
145 | cax = fig.add_subplot(gs[:, -1])
146 | fig.colorbar(im, cax=cax, label=colorbar_label)
147 |
148 | if second_data is not None:
149 | for i, j in itertools.product(range(len(rows)), range(len(columns))):
150 | ax = fig.add_subplot(gs[len(rows) - 1 - i, j + len(columns) + 1])
151 | image = second_data[(rows[i], columns[j])]
152 | im = ax.imshow(image, **imshow_args)
153 | ax.spines[:].set_color("white")
154 | ax.set_xticks([])
155 | ax.set_yticks([])
156 |
157 | fig.text(0.31, 0.95, title[0], ha="center", fontsize=15)
158 | fig.text(0.7, 0.95, title[1], ha="center", fontsize=15)
159 |
160 | return fig
161 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | h5py
3 | sep-pjw
4 | astropy
5 | scipy
6 | scikit-image
7 | matplotlib
8 | pytest
9 | pytest-cov
10 | hypothesis
11 | coverage
12 | ruff
13 | pytest-mpl
14 | pre-commit
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/tests/__init__.py
--------------------------------------------------------------------------------
/tests/data/compressed_dash.fits:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/punch-mission/regularizepsf/ab5a99c1a1c6c88af2ac1e379dfb9bba91974d62/tests/data/compressed_dash.fits
--------------------------------------------------------------------------------
/tests/helper.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def make_gaussian(size, fwhm = 3, center=None):
5 | """ Make a square gaussian kernel.
6 |
7 | size is the length of a side of the square
8 | fwhm is full-width-half-maximum, which
9 | can be thought of as an effective radius.
10 | """
11 |
12 | x = np.arange(0, size, 1, float)
13 | y = x[:,np.newaxis]
14 |
15 | if center is None:
16 | x0 = y0 = size // 2
17 | else:
18 | x0 = center[0]
19 | y0 = center[1]
20 |
21 | return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)
22 |
--------------------------------------------------------------------------------
/tests/test_builder.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 |
3 | import numpy as np
4 | import pytest
5 | from astropy.io import fits
6 |
7 | from regularizepsf.builder import ArrayPSFBuilder, _average_patches, _find_patches
8 | from regularizepsf.psf import ArrayPSF
9 |
10 | TEST_DIR = pathlib.Path(__file__).parent.resolve()
11 |
12 | def test_find_patches():
13 | img_path = str(TEST_DIR / "data/compressed_dash.fits")
14 | image_array = fits.getdata(img_path).astype(float)
15 | patches = _find_patches(image_array, 3, None, 1, 32, 50)
16 | for coord, patch in patches.items():
17 | assert coord[0] == 50
18 | print(coord)
19 | assert patch.shape == (32, 32)
20 |
21 | def test_averaging():
22 | collection = {(0, 0, 0): np.full((10, 10), .3),
23 | (1, 0, 0): np.full((10, 10), .5),
24 | (2, 0, 0): np.full((10, 10), .9),
25 | # Exercise the nan-rejection in CoordinatePatchCollection.average()
26 | (3, 0, 0): np.full((10, 10), np.nan),
27 | }
28 | for patch in collection.values():
29 | # Make the normalization of each patch a no-op
30 | patch[5, 5] = 1
31 |
32 | averaged_collection, counts = _average_patches(collection, np.array([[0, 0]]), method='median')
33 | expected = np.nanmedian([.3, .5, .9])
34 | assert averaged_collection[(0, 0)][1, 1] == expected
35 |
36 | averaged_collection, counts = _average_patches(collection, np.array([[0, 0]]), method='mean')
37 | expected = np.nanmean([.3, .5, .9])
38 | assert averaged_collection[(0, 0)][1, 1] == expected
39 |
40 | averaged_collection, counts = _average_patches(collection, np.array([[0, 0]]), method='percentile', percentile=20)
41 | expected = np.nanpercentile([.3, .5, .9], 20)
42 | assert averaged_collection[(0, 0)][1, 1] == expected
43 |
44 |
45 | @pytest.mark.parametrize("method", ["mean", "median", "percentile"])
46 | def test_find_stars_and_average_path(method):
47 | img_path = str(TEST_DIR / "data/compressed_dash.fits")
48 | builder = ArrayPSFBuilder(32)
49 | example, _ = builder.build([img_path], average_method=method, hdu_choice=1)
50 | assert isinstance(example, ArrayPSF)
51 | assert example.sample_shape == (32, 32)
52 |
53 | @pytest.mark.parametrize("method", ["mean", "median", "percentile"])
54 | def test_find_stars_and_average_array(method):
55 | img_path = str(TEST_DIR / "data/compressed_dash.fits")
56 | image_array = fits.getdata(img_path).astype(float)
57 | image_array = image_array.reshape((1, *image_array.shape))
58 |
59 | # Use a mask to only process part of the image, to speed up this test
60 | mask = np.ones_like(image_array, dtype=bool)
61 | mask[:, :800, :800] = 0
62 |
63 | builder = ArrayPSFBuilder(32)
64 | example, _ = builder.build(image_array, mask, average_method=method)
65 | assert isinstance(example, ArrayPSF)
66 | assert example.sample_shape == (32, 32)
67 |
68 |
69 | @pytest.mark.parametrize("method", ["mean", "median", "percentile"])
70 | def test_find_stars_and_average_generator(method):
71 | img_path = str(TEST_DIR / "data/compressed_dash.fits")
72 | image_array = fits.getdata(img_path).astype(float)
73 | image_array = image_array.reshape((1, *image_array.shape))
74 | def generator():
75 | yield image_array[0]
76 |
77 | builder = ArrayPSFBuilder(32)
78 | example, _ = builder.build(generator(), average_method=method)
79 | assert isinstance(example, ArrayPSF)
80 | assert example.sample_shape == (32, 32)
81 |
--------------------------------------------------------------------------------
/tests/test_psf.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import scipy
4 |
5 | from regularizepsf.exceptions import IncorrectShapeError, InvalidCoordinateError, InvalidFunctionError
6 | from regularizepsf.psf import (
7 | ArrayPSF,
8 | SimpleFunctionalPSF,
9 | VariedFunctionalPSF,
10 | simple_functional_psf,
11 | varied_functional_psf,
12 | )
13 | from regularizepsf.util import IndexedCube
14 | from tests.helper import make_gaussian
15 |
16 |
17 | @pytest.mark.parametrize("extension", ["fits", "h5"])
18 | def test_arraypsf_saves_and_loads(tmp_path, extension):
19 | """Can save and reload an ArrayPSF"""
20 | coordinates = [(0, 0), (1, 1), (2, 2)]
21 | gauss = make_gaussian(128, fwhm=3)
22 | values = np.stack([gauss for _ in coordinates])
23 |
24 | source = ArrayPSF(IndexedCube(coordinates, values))
25 |
26 | path = tmp_path / f"psf.{extension}"
27 |
28 | source.save(path)
29 | reloaded = ArrayPSF.load(path)
30 |
31 | assert source == reloaded
32 |
33 | def test_arraypsf_compare_to_array_fails():
34 | """Can only compare ArrayPSF to an ArrayPSF."""
35 | coordinates = [(0, 0), (1, 1), (2, 2)]
36 | gauss = make_gaussian(128, fwhm=3)
37 | values = np.stack([gauss for _ in coordinates])
38 |
39 | source = ArrayPSF(IndexedCube(coordinates, values))
40 |
41 | with pytest.raises(TypeError):
42 | _ = source == np.zeros((50, 50))
43 |
44 |
45 | def test_arraypsf_with_mismatched_coordinates_fails():
46 | """ArrayPSF values and shapes must have same coordinates."""
47 | coordinates = [(0, 0), (1, 1), (2, 2)]
48 | gauss = make_gaussian(128, fwhm=3)
49 | values = np.stack([gauss for _ in coordinates])
50 |
51 | with pytest.raises(InvalidCoordinateError):
52 | _ = ArrayPSF(IndexedCube(coordinates, values), IndexedCube([(0, 0), (1, 1), (3, 3)], values))
53 |
54 |
55 | def test_arraypsf_with_different_len_fails():
56 | """ArrayPSFs must have the same len for values and ffts."""
57 | coordinates = [(0, 0), (1, 1), (2, 2)]
58 | gauss = make_gaussian(128, fwhm=3)
59 | values = np.stack([gauss for _ in coordinates])
60 |
61 | fft_coordinates = [(0, 0), (1, 1), (2, 2), (3, 3)]
62 | gauss = make_gaussian(128, fwhm=3)
63 | fft_values = np.stack([gauss for _ in fft_coordinates])
64 |
65 | with pytest.raises(IncorrectShapeError):
66 | _ = ArrayPSF(IndexedCube(coordinates, values), IndexedCube(fft_coordinates, fft_values))
67 |
68 |
69 | def test_arraypsf_with_different_sample_shapes_fails():
70 | """ArrayPSFs must have the same shape for values and ffts."""
71 | coordinates = [(0, 0), (1, 1), (2, 2)]
72 | gauss = make_gaussian(128, fwhm=3)
73 | values = np.stack([gauss for _ in coordinates])
74 |
75 | gauss = make_gaussian(64, fwhm=3)
76 | fft_values = np.stack([gauss for _ in coordinates])
77 |
78 | with pytest.raises(IncorrectShapeError):
79 | _ = ArrayPSF(IndexedCube(coordinates, values), IndexedCube(coordinates, fft_values))
80 |
81 |
82 | def test_arraypsf_get_evaluations():
83 | """Check that evaluations and fft evaluations can be retrieved."""
84 | coordinates = [(0, 0), (1, 1), (2, 2)]
85 | gauss = make_gaussian(128, fwhm=3)
86 | values = np.stack([gauss for _ in coordinates])
87 | psf = ArrayPSF(IndexedCube(coordinates, values))
88 | assert np.all(psf[(0, 0)] == gauss)
89 | assert np.all(psf.fft_at((0, 0)) == scipy.fft.fft2(gauss))
90 |
91 |
92 |
93 | def test_simple_psf_valid():
94 | """ Confirms that a psf with no extra parameters works"""
95 | func = lambda row, col: row + col
96 | eqn = simple_functional_psf(func)
97 | assert isinstance(eqn, SimpleFunctionalPSF)
98 | assert eqn.parameters == set()
99 | assert eqn(1, 2) == 3
100 |
101 |
102 | def test_simple_psf_two_parameters():
103 | """ Confirms that a psf with two parameters performs correctly"""
104 | func = lambda row, col, sigma=3, mu=4: row + col + sigma + mu
105 | eqn = simple_functional_psf(func)
106 | assert isinstance(eqn, SimpleFunctionalPSF)
107 | assert eqn.parameters == {'sigma', 'mu'}
108 | assert eqn(1, 2) == 10
109 |
110 |
111 | def test_simple_psf_missing_xy_fails():
112 | """ Confirms that a psf without x and y arguments fails"""
113 | with pytest.raises(InvalidFunctionError):
114 | simple_functional_psf(lambda: 1)
115 |
116 |
117 | def test_simple_psf_swap_x_and_y_fails():
118 | """ Ensures x and y must be in the proper order"""
119 | with pytest.raises(InvalidFunctionError):
120 | simple_functional_psf(lambda y, x: x + y)
121 |
122 |
123 | def test_simple_psf_missing_y_fails():
124 | """ Ensures y must be the second argument"""
125 | with pytest.raises(InvalidFunctionError):
126 | simple_functional_psf(lambda x, sigma: x + sigma)
127 |
128 |
129 | def test_varied_psf_simple_is_valid():
130 | """ Ensures a simple varied psf performs correctly"""
131 | base = simple_functional_psf(lambda row, col, sigma=5: row + col + sigma)
132 | my_psf = varied_functional_psf(base)(lambda row, col: {"sigma": 1})
133 | assert isinstance(my_psf, VariedFunctionalPSF)
134 | assert my_psf.parameters == {'sigma'}
135 | assert my_psf(0, 0) == 1
136 |
137 |
138 | def test_varied_psf_too_few_parameters_fails():
139 | """ Confirms that a varied psf that has too few parameters compared to the base model fails"""
140 | base = simple_functional_psf(lambda row, col, sigma, mu: row + col)
141 | with pytest.raises(InvalidFunctionError):
142 | varied_functional_psf(base)(lambda: {'sigma': 0.1})
143 |
144 |
145 | def test_varied_psf_too_many_parameters_fails():
146 | """ Confirms that a varied psf with too many parameters compared to the base model fails"""
147 | ref = simple_functional_psf(lambda row, col: row + col)
148 | with pytest.raises(InvalidFunctionError):
149 | varied_functional_psf(ref)(lambda row, col, c: {'sigma': 0.1})
150 |
151 |
152 | def test_varied_psf_missing_x_fails():
153 | """ Confirms a varied psf model with a missing x fails"""
154 | ref = simple_functional_psf(lambda row, col: row + col)
155 | with pytest.raises(InvalidFunctionError):
156 | varied_functional_psf(ref)(lambda c, col: {'sigma': 0.1})
157 |
158 |
159 | def test_varied_psf_missing_y_fails():
160 | """ Confirms a varied psf model with a missing y fails"""
161 | ref = simple_functional_psf(lambda row, col: row + col)
162 | with pytest.raises(InvalidFunctionError):
163 | varied_functional_psf(ref)(lambda row, c: {'sigma': 0.1})
164 |
165 |
166 | def test_varied_psf_called_without_arguments():
167 | with pytest.raises(TypeError):
168 | varied_functional_psf()(lambda row, col: {"sigma": 0.2})
169 |
170 |
171 | def test_varied_psf_called_with_none_base_psf():
172 | with pytest.raises(TypeError):
173 | @varied_functional_psf(None)
174 | def func(row, col):
175 | return {"sigma": 0.2}
176 |
177 |
178 | def test_varied_psf_called_naked():
179 | with pytest.raises(TypeError):
180 | @varied_functional_psf
181 | def func(row, col):
182 | return {"sigma": 0.1}
183 |
184 |
185 | def test_varied_psf_parameters_not_match_base_errors():
186 | @simple_functional_psf
187 | def base(row, col, m):
188 | return row + col
189 |
190 | with pytest.raises(InvalidFunctionError):
191 | @varied_functional_psf(base)
192 | def varied(row, col):
193 | return {"n": 0, "m": 30}
194 |
195 |
196 | def test_varied_psf_parameters_match_except_at_call_errors():
197 | @simple_functional_psf
198 | def base(row, col, m):
199 | return row + col
200 |
201 | with pytest.raises(InvalidFunctionError):
202 | @varied_functional_psf(base)
203 | def varied(row, col):
204 | if row == 0 and col == 0:
205 | return {"m": 30}
206 | else:
207 | return {"n": 100, "m": 30}
208 | _ = varied(10, 10)
209 |
210 | def test_evaluate_simplefunctionalpsf_to_arraypsf():
211 | """Can evaluate a simple functional psf to an array psf."""
212 | def f(row, col, a=10):
213 | return row + col + a
214 |
215 | functionalpsf = simple_functional_psf(f)
216 | arraypsf = functionalpsf.as_array_psf([(0, 0), (1, 0)], 3)
217 |
218 | assert len(arraypsf) == 2
219 | assert arraypsf.sample_shape == (3, 3)
220 | assert arraypsf.coordinates == [(0, 0), (1, 0)]
221 |
222 | rr, cc = np.meshgrid(np.arange(3), np.arange(3))
223 | assert np.allclose(arraypsf[(0, 0)], rr + cc + 10)
224 |
225 | def test_evaluate_variedfunctionalpsf_to_arraypsf():
226 | """Can evaluate a varied psf to an array psf."""
227 | base = simple_functional_psf(lambda row, col, sigma=5: row + col + sigma)
228 | my_psf = varied_functional_psf(base)(lambda row, col: {"sigma": row*col})
229 |
230 | arraypsf = my_psf.as_array_psf([(0, 0), (3, 4)], 3)
231 |
232 | assert len(arraypsf) == 2
233 | assert arraypsf.sample_shape == (3, 3)
234 | assert arraypsf.coordinates == [(0, 0), (3, 4)]
235 |
236 | rr, cc = np.meshgrid(np.arange(3), np.arange(3))
237 | assert np.allclose(arraypsf[(3, 4)], rr + cc + 3*4)
238 |
--------------------------------------------------------------------------------
/tests/test_transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from regularizepsf.exceptions import InvalidCoordinateError
5 | from regularizepsf.psf import ArrayPSF
6 | from regularizepsf.transform import ArrayPSFTransform
7 | from regularizepsf.util import IndexedCube, calculate_covering
8 | from tests.helper import make_gaussian
9 |
10 |
11 | @pytest.mark.parametrize("extension", ["fits", "h5"])
12 | def test_transform_saves_and_loads(tmp_path, extension):
13 | """Can save and reload an ArrayPSF"""
14 | coordinates = [(0, 0), (1, 1), (2, 2)]
15 | gauss = make_gaussian(128, fwhm=3)
16 | values = np.stack([gauss for _ in coordinates])
17 |
18 | source = ArrayPSF(IndexedCube(coordinates, values))
19 | target = ArrayPSF(IndexedCube(coordinates, values))
20 | transform = ArrayPSFTransform.construct(source, target, 1.0, 0.1)
21 |
22 | path = tmp_path / f"transform.{extension}"
23 |
24 | transform.save(path)
25 | reloaded = ArrayPSFTransform.load(path)
26 |
27 | assert transform == reloaded
28 |
29 | def test_transform_apply():
30 | """Test that applying an identity transform does not change the values."""
31 | size = 256
32 | gauss = make_gaussian(size, fwhm=3)
33 | dtype = np.float32
34 |
35 | covering = [tuple(t) for t in calculate_covering((2048, 2048), size)]
36 | values = np.stack([np.zeros((size, size), dtype=dtype) for _ in covering])
37 | values[:] = gauss / np.sum(gauss)
38 |
39 | cube = IndexedCube(covering, values)
40 | source = ArrayPSF(cube, workers=None)
41 |
42 | t = ArrayPSFTransform.construct(source, source, 3.0, 0.1)
43 |
44 | image = np.zeros((2048, 2048), dtype=dtype)
45 | image[500:1000, 200:400] = 5
46 |
47 | out = t.apply(image)
48 |
49 | assert np.allclose(image, out, atol=1E-3)
50 |
51 |
52 | def test_transform_with_mismatch_coordinates_errors():
53 | source_coordinates = [(0, 0), (1, 1), (2, 2)]
54 | target_coordinates = [(0, 0), (1, 1), (0.5, 0.5)]
55 | source = ArrayPSF(IndexedCube(source_coordinates, np.zeros((len(source_coordinates), 128, 128))))
56 | target = ArrayPSF(IndexedCube(target_coordinates, np.zeros((len(target_coordinates), 128, 128))))
57 | with pytest.raises(InvalidCoordinateError):
58 | ArrayPSFTransform.construct(source, target, 3.0, 0.1)
59 |
60 |
61 | def test_transform_save_load(tmp_path):
62 | path = tmp_path / "transform.h5"
63 | coordinates = [(0, 0), (1, 1), (2, 2)]
64 | gauss = make_gaussian(128, fwhm=3)
65 | values = np.stack([gauss for _ in coordinates])
66 |
67 | source = ArrayPSF(IndexedCube(coordinates, values))
68 | transform = ArrayPSFTransform.construct(source, source, 3.0, 0.1)
69 |
70 | transform.save(path)
71 | reloaded = ArrayPSFTransform.load(path)
72 |
73 | assert transform == reloaded
74 |
75 |
76 | def test_transform_compare_to_array_fails():
77 | coordinates = [(0, 0), (1, 1), (2, 2)]
78 | gauss = make_gaussian(128, fwhm=3)
79 | values = np.stack([gauss for _ in coordinates])
80 |
81 | source = ArrayPSF(IndexedCube(coordinates, values))
82 | transform = ArrayPSFTransform.construct(source, source, 3.0, 0.1)
83 |
84 | with pytest.raises(TypeError):
85 | _ = transform == np.zeros((50, 50))
86 |
--------------------------------------------------------------------------------
/tests/test_util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from hypothesis import given, settings
4 | from hypothesis import strategies as st
5 |
6 | from regularizepsf.exceptions import IncorrectShapeError, InvalidCoordinateError
7 | from regularizepsf.util import IndexedCube, calculate_covering
8 |
9 |
10 | def confirm_full_four_covering(corners, img_shape, patch_size):
11 | """Confirms that the covering fully covers the image four times, i.e. each point is sample four times."""
12 | counts = np.zeros(img_shape)
13 | for i, (x, y) in enumerate(corners):
14 | counts[np.max([0, x]):np.min([img_shape[0], x + patch_size]),
15 | np.max([0, y]):np.min([img_shape[1], y + patch_size])] += 1
16 | assert np.all(counts == 4)
17 |
18 |
19 | @pytest.mark.parametrize("img_shape, patch_size",
20 | [((5, 5), 1),
21 | ((5, 5), 2),
22 | ((15, 15), 3),
23 | ((15, 15), 4),
24 | ((100, 100), 11)])
25 | def test_calculate_covering_with_given_sizes(img_shape, patch_size):
26 | """Calculates a variety of coverings to make sure they're all properly covered."""
27 | corners = calculate_covering(img_shape, patch_size)
28 | confirm_full_four_covering(corners, img_shape, patch_size)
29 |
30 |
31 | @given(img_dim=st.integers(min_value=100, max_value=200), patch_fraction=st.fractions(min_value=0.1, max_value=0.8))
32 | @settings(max_examples=150, deadline=None)
33 | def test_calculate_covering_random_square_images_always_covered(img_dim, patch_fraction):
34 | """Similar to `test_calculate_covering_with_given_sizes`, but randomly generates square patches for covering."""
35 | img_shape = (img_dim, img_dim)
36 | patch_size = np.ceil(img_dim * patch_fraction)
37 | corners = calculate_covering(img_shape, patch_size)
38 | confirm_full_four_covering(corners, img_shape, patch_size)
39 |
40 |
41 | @pytest.mark.parametrize("num_layers, x_shape, y_shape",
42 | [(10, 10, 10),
43 | (15, 20, 25),
44 | (1, 15, 10),
45 | (1, 1, 1),
46 | (0, 1, 1),
47 | (0, 0, 0)])
48 | def test_indexed_cube_general_functionality(num_layers, x_shape, y_shape):
49 | """Tests that the IndexedCube can be created, has the proper shape and length, and indexes where expected."""
50 | # construct cube
51 | data = np.zeros((num_layers, x_shape, y_shape))
52 | for i in range(num_layers):
53 | data[i] = i
54 |
55 | coordinates = [(i, i+1) for i in range(num_layers)]
56 |
57 | cube = IndexedCube(coordinates, data)
58 |
59 | # test cube
60 | assert cube.sample_shape == (x_shape, y_shape)
61 | assert len(cube) == num_layers
62 | assert cube.coordinates == coordinates
63 | for coord in coordinates:
64 | assert np.all(cube[coord] == coord[0])
65 |
66 | with pytest.raises(InvalidCoordinateError):
67 | _ = cube[(coord[1], coord[0])]
68 |
69 | with pytest.raises(InvalidCoordinateError):
70 | cube[(coord[1], coord[0])] = np.zeros((x_shape, y_shape))
71 |
72 | with pytest.raises(IncorrectShapeError):
73 | cube[coord] = np.zeros((x_shape+1, y_shape+1))
74 |
75 | cube[coord] = np.zeros((x_shape, y_shape))
76 |
77 | assert np.all(cube._values == 0)
78 |
79 |
80 | def test_compare_indexed_cube_to_other_fails():
81 | """Cannot compare an IndexedCube to anything other than an IndexedCube."""
82 | cube = IndexedCube([(0, 0), (0, 1)], np.ones((2, 2, 2)))
83 | with pytest.raises(TypeError):
84 | _ = cube == np.zeros((2, 2, 2))
85 |
86 | def test_indexed_cube_wrong_coordinate_length_fails():
87 | """IndexedCube must be self-consistent in coordinate length"""
88 | with pytest.raises(IncorrectShapeError):
89 | _ = IndexedCube([(0, 0), (0, 1), (5, 5)], np.ones((2, 2, 2)))
90 |
91 | def test_indexed_cube_must_be_3d():
92 | """IndexedCube must be 3D"""
93 | with pytest.raises(IncorrectShapeError):
94 | _ = IndexedCube([(0, 0), (0, 1)], np.ones((2, 2)))
95 |
--------------------------------------------------------------------------------