├── docs ├── requirements.txt ├── aa_bw.jpg ├── images │ ├── logo-seal-dark-light.png │ ├── logo-inline-dark-light.png │ └── mask_tutorial │ │ ├── target_region.png │ │ ├── target_replaced.png │ │ └── source_half_degree.png ├── api.rst ├── installation.rst ├── examples.rst ├── mask.rst ├── index.rst ├── make.bat ├── Makefile ├── tutorial.rst └── conf.py ├── tests ├── requirements.txt ├── __init__.py └── test_align.py ├── tea.yaml ├── MANIFEST.in ├── .github └── workflows │ ├── aa-ci.yml │ └── coverage.yml ├── pyproject.toml ├── LICENSE.txt ├── .readthedocs.yaml ├── tox.ini ├── README.md ├── benchmarks ├── flux_bench.py ├── time_bench.py └── time_regression.py └── astroalign.py /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-rtd-theme 3 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | astropy 2 | ccdproc 3 | pillow 4 | -------------------------------------------------------------------------------- /docs/aa_bw.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quatrope/astroalign/HEAD/docs/aa_bw.jpg -------------------------------------------------------------------------------- /docs/images/logo-seal-dark-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quatrope/astroalign/HEAD/docs/images/logo-seal-dark-light.png -------------------------------------------------------------------------------- /docs/images/logo-inline-dark-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quatrope/astroalign/HEAD/docs/images/logo-inline-dark-light.png -------------------------------------------------------------------------------- /docs/images/mask_tutorial/target_region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quatrope/astroalign/HEAD/docs/images/mask_tutorial/target_region.png -------------------------------------------------------------------------------- /docs/images/mask_tutorial/target_replaced.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quatrope/astroalign/HEAD/docs/images/mask_tutorial/target_replaced.png -------------------------------------------------------------------------------- /docs/images/mask_tutorial/source_half_degree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/quatrope/astroalign/HEAD/docs/images/mask_tutorial/source_half_degree.png -------------------------------------------------------------------------------- /tea.yaml: -------------------------------------------------------------------------------- 1 | # https://tea.xyz/what-is-this-file 2 | --- 3 | version: 1.0.0 4 | codeOwners: 5 | - '0x460F396b7b8afd11636A6188b01Ebe5771f14A99' 6 | quorum: 1 7 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include logo file for README 2 | include docs/aa_bw.jpg 3 | include docs/images/logo-inline-dark-light.png 4 | include docs/images/logo-seal-dark-light.png 5 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | 3 | Module API 4 | ========== 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | .. automodule:: astroalign 10 | :members: 11 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | The easiest way to install is using pip:: 5 | 6 | pip install astroalign 7 | 8 | This will install the latest stable version on PyPI. 9 | 10 | If you want to use the latest development code from GitHub, clone the `repo `_ on your local machine and install:: 11 | 12 | git clone https://github.com/quatrope/astroalign 13 | cd astroalign 14 | pip install -e . 15 | -------------------------------------------------------------------------------- /.github/workflows/aa-ci.yml: -------------------------------------------------------------------------------- 1 | name: unit tests 2 | 3 | on: 4 | - push 5 | - pull_request 6 | 7 | jobs: 8 | build: 9 | 10 | runs-on: ubuntu-latest 11 | strategy: 12 | matrix: 13 | python-version: ['3.10', '3.11', '3.12', '3.13'] 14 | 15 | steps: 16 | - uses: actions/checkout@main 17 | - name: Set up Python ${{ matrix.python-version }} 18 | uses: actions/setup-python@main 19 | with: 20 | python-version: ${{ matrix.python-version}} 21 | - name: Install tox 22 | run: pip install tox tox-gh-actions 23 | - name: Run the tests with tox 24 | run: tox -r 25 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "astroalign" 7 | authors = [{ name = "Martin Beroiz", email = "martinberoiz@gmail.com" }] 8 | description = "Astrometric Alignment of Images" 9 | maintainers = [{ name = "Martin Beroiz", email = "martinberoiz@gmail.com" }] 10 | readme = "README.md" 11 | dynamic = ["version"] 12 | requires-python = ">=3.10" 13 | dependencies = ["numpy", "scipy", "scikit-image", "sep>=1.4.0"] 14 | license = { file = "LICENSE.txt" } 15 | keywords = [ 16 | "astronomy", 17 | "registration", 18 | "alignment", 19 | "asterism-matching", 20 | "stellar-astronomical-images" 21 | ] 22 | 23 | [project.urls] 24 | homepage = "https://astroalign.quatrope.org" 25 | documentation = "https://astroalign.readthedocs.io/" 26 | source = "https://github.com/quatrope/astroalign" 27 | 28 | [tool.setuptools] 29 | py-modules = ["astroalign"] 30 | # Ref: https://github.com/pypa/setuptools/issues/4759 31 | license-files = [] 32 | 33 | [tool.setuptools.dynamic] 34 | version = {attr = "astroalign.__version__"} 35 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016-2019 Martin Beroiz 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.11" 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt 36 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # MIT License 5 | 6 | # Copyright (c) 2016-2019 Martin Beroiz 7 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | 15 | # The above copyright notice and this permission notice shall be included in 16 | # all copies or substantial portions of the Software. 17 | 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | 26 | 27 | """Torosfactory Tests""" 28 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | name: code coverage 2 | 3 | on: 4 | - push 5 | - pull_request 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | name: Code Coverage 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@main 17 | - name: Set up Python 18 | uses: actions/setup-python@main 19 | with: 20 | python-version: '3.13' 21 | - name: Install coverage 22 | run: pip install coverage 23 | - name: Install astroalign dependencies 24 | run: pip install numpy scipy scikit-image "sep>=1.4.0" 25 | - name: Install test dependencies 26 | run: pip install -r tests/requirements.txt 27 | - name: Run coverage 28 | run: | 29 | python -m coverage run -m unittest -v 30 | coverage xml --fail-under=80 -o coverage.xml 31 | - name: Code Coverage Report 32 | uses: irongut/CodeCoverageSummary@v1.3.0 33 | with: 34 | filename: coverage.xml 35 | badge: true 36 | fail_below_min: true 37 | format: markdown 38 | hide_branch_rate: false 39 | hide_complexity: true 40 | indicators: true 41 | output: both 42 | thresholds: '60 80' 43 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py{310,311,312,313}, style, docs, docstyle 8 | 9 | 10 | [gh-actions] 11 | python = 12 | 3.10: py310 13 | 3.11: py311 14 | 3.12: py312 15 | 3.13: style, docstyle, docs, py313 16 | 17 | 18 | [testenv:py{310,311,312,313}] 19 | usedevelop = False 20 | deps = 21 | astropy 22 | ccdproc 23 | pillow 24 | pytest 25 | ipdb 26 | commands = 27 | pytest tests -v {posargs} 28 | 29 | 30 | [testenv:style] 31 | skip_install = True 32 | usedevelop = False 33 | deps = flake8 34 | commands = 35 | flake8 --ignore=E203 --max-line-length 90 astroalign.py tests {posargs} 36 | 37 | 38 | [testenv:docstyle] 39 | deps = pydocstyle 40 | commands = 41 | pydocstyle --convention=numpy astroalign.py 42 | 43 | 44 | [testenv:docs] 45 | description = "Invoke sphinx-build to build the HTML docs" 46 | usedevelop = True 47 | skip_install = False 48 | changedir = docs 49 | deps = 50 | Sphinx 51 | sphinx-rtd-theme 52 | commands = 53 | sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html 54 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Examples 4 | ======== 5 | 6 | Very few stars on the field 7 | --------------------------- 8 | 9 | .. note:: 10 | The minimum number of stars necessary to find a transformation is 3 11 | 12 | If your field has few stars on the field, of about 3 to 6, you may want to 13 | restrict astroalign to only pick that number of stars, to prevent catching 14 | noisy structures as sources. 15 | 16 | Use ``max_control_points`` keyword argument to do so:: 17 | 18 | >>> import astroalign as aa 19 | >>> registered_image, footprint = aa.register(source, target, max_control_points=3) 20 | 21 | This keyword will also work in ``find_transform``. 22 | 23 | Faint stars 24 | ----------- 25 | 26 | If your stars are faint, they may not be bright enough to pass the 27 | :math:`5 \sigma` threshold. If you need to lower the detection :math:`\sigma` 28 | used in the source detection process, adjust the ``detection_sigma`` keyword argument:: 29 | 30 | >>> import astroalign as aa 31 | >>> registered_image, footprint = aa.register(source, target, detection_sigma=2) 32 | 33 | This keyword will also work in ``find_transform``. 34 | 35 | Avoiding hot pixels and other CCD artifacts 36 | ------------------------------------------- 37 | 38 | If your CCD is dominated by persistent defects like hot or dead pixels, they may be taken 39 | as legitimate sources and output the identity transformation. 40 | 41 | We suggest cleaning the image first using `CCDProc's cosmicray_lacosmic `_ to clean the image before trying registration:: 42 | 43 | >>> from ccdproc import cosmicray_lacosmic as lacosmic 44 | >>> clean_source, mask = lacosmic(myimage) 45 | >>> registered_image, footprint = aa.register(clean_source, clean_target, min_area=9) 46 | 47 | Another quick fix can be increasing the expected connected pixels in order to 48 | be considered a source. Increment ``min_area`` from default value of 5:: 49 | 50 | >>> import astroalign as aa 51 | >>> registered_image, footprint = aa.register(source, target, min_area=9) 52 | -------------------------------------------------------------------------------- /docs/mask.rst: -------------------------------------------------------------------------------- 1 | .. _mask: 2 | 3 | Selecting regions with masking 4 | ============================== 5 | 6 | If you want to restrict the source search to regions in any of the images, you can mask the undesired regions out before passing them. 7 | 8 | This can be useful if the field of views are very different and you want to restrict the search only where the images overlap. 9 | 10 | It can also be useful to mask out objects below the horizon; extended objects like galaxies; or very dense globular clusters. 11 | 12 | Below are some examples. 13 | 14 | Masking out undesired regions 15 | ----------------------------- 16 | 17 | If you are in a situation where you know the approximate region that the ``source`` image will occupy in the ``target`` image 18 | but the images themselves are not similar enough, you can restrict the search to the region of interest (ROI.) 19 | 20 | Suppose you have two images of about the same region in the sky. 21 | The first image is about 2 degrees and the second is half a degree field of view. 22 | 23 | .. figure:: /images/mask_tutorial/target_region.png 24 | :height: 200px 25 | 26 | **Image 1**\: Target image of about 2 square degree field of view, with an inset of the approximate region covered by the source image. (Image by `Gary Imm `_, used with permission.) 27 | 28 | .. figure:: /images/mask_tutorial/source_half_degree.png 29 | :height: 200px 30 | 31 | **Image 2**: Source image of about 0.5 square degree field of view. 32 | 33 | Finding out the ROI can be a whole problem of itself, but for this example let's suppose we know it roughly corresponds to the 34 | ROI from rows 300--900 and columns 1000--1700 in all 3 color channels. 35 | 36 | >>> mask = np.ones_like(target, dtype="bool") 37 | >>> mask[300:900,1000:1700,:] = False # ROI 38 | >>> target_masked = np.ma.array(target, mask=mask) 39 | >>> registered, footprint = aa.register(source, target_masked) 40 | 41 | Just to show one application to the process above, let's overwrite the target by the registered source image in the footprint pixels. 42 | 43 | >>> target[~footprint] = registered[~footprint] 44 | 45 | The target image should now look like below. 46 | 47 | .. image:: images/mask_tutorial/target_replaced.png 48 | :height: 200px 49 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | .. image:: images/logo-seal-dark-light.png 4 | :width: 200px 5 | :align: left 6 | 7 | Astroalign documentation 8 | ======================== 9 | 10 | **ASTROALIGN** is a python module that will try to register (align) two stellar astronomical images, 11 | especially when there is no WCS information available. 12 | 13 | It does so by finding similar 3-point asterisms (triangles) in both images and estimating the 14 | affine transformation between them. 15 | 16 | Generic registration routines try to match point features, using corner detection routines to make the point correspondence. 17 | These generally fail for stellar astronomical images, since stars have very little stable structure and so, in general, indistinguishable from each other. 18 | Asterism matching is more robust, and closer to the human way of matching stellar images. 19 | 20 | Because of the geometric nature of its core algorithm, it is not greatly effected by point-spread function differences, seeing or atmospheric conditions. 21 | 22 | You can find a Jupyter notebook example with the main features at http://quatrope.github.io/astroalign. 23 | 24 | .. note:: 25 | 26 | It may not work, or work with special care, on images of extended objects with few point-like sources or in very crowded fields. 27 | 28 | .. note:: 29 | If your images contain a large number of hot pixels, this may result in an incorrect registration. 30 | Please refer to the tutorial for how to solve this problem using `CCDProc's cosmic-ray remover `_. 31 | 32 | 33 | Citation 34 | -------- 35 | 36 | If you use astroalign in a scientific publication, we would appreciate 37 | citations to the following paper: 38 | 39 | Astroalign: A Python module for astronomical image registration. 40 | Beroiz, M., Cabral, J. B., & Sanchez, B. 41 | Astronomy and Computing, Volume 32, July 2020, 100384. 42 | 43 | 44 | Bibtex entry:: 45 | 46 | @article{BEROIZ2020100384, 47 | title = "Astroalign: A Python module for astronomical image registration", 48 | journal = "Astronomy and Computing", 49 | volume = "32", 50 | pages = "100384", 51 | year = "2020", 52 | issn = "2213-1337", 53 | doi = "https://doi.org/10.1016/j.ascom.2020.100384", 54 | url = "http://www.sciencedirect.com/science/article/pii/S221313372030038X", 55 | author = "M. Beroiz and J.B. Cabral and B. Sanchez", 56 | keywords = "Astronomy, Image registration, Python package", 57 | abstract = "We present an algorithm implemented in the Astroalign Python module for image registration in astronomy. Our module does not rely on WCS information and instead matches three-point asterisms ( triangles) on the images to find the most accurate linear transformation between them. It is especially useful in the context of aligning images prior to stacking or performing difference image analysis. Astroalign can match images of different point-spread functions, seeing, and atmospheric conditions." 58 | } 59 | 60 | **Full Publication:** https://www.sciencedirect.com/science/article/pii/S221313372030038X 61 | 62 | 63 | Or cite the project itself from `ASCL `_: 64 | 65 | Beroiz, M. I. (2019). Astroalign: Asterism-matching alignment of 66 | astronomical images. Astrophysics Source Code Library. 67 | 68 | Bibtex:: 69 | 70 | @article{beroiz2019astroalign, 71 | title={Astroalign: Asterism-matching alignment of astronomical images}, 72 | author={Beroiz, Martin I}, 73 | journal={Astrophysics Source Code Library}, 74 | year={2019} 75 | } 76 | 77 | 78 | Guide: 79 | ^^^^^^ 80 | 81 | .. toctree:: 82 | :maxdepth: 2 83 | 84 | installation 85 | tutorial 86 | mask 87 | examples 88 | api 89 | 90 | 91 | Indices and tables 92 | ================== 93 | 94 | * :ref:`genindex` 95 | * :ref:`search` 96 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![logo](docs/images/logo-inline-dark-light.png) 2 | 3 | *** 4 | 5 | [![QuatroPe](https://img.shields.io/badge/QuatroPe-Applications-1c5896)](https://quatrope.github.io/) 6 | [![unit tests](https://github.com/quatrope/astroalign/actions/workflows/aa-ci.yml/badge.svg?branch=master)](https://github.com/quatrope/astroalign/actions/workflows/aa-ci.yml) 7 | [![Coverage](https://codecov.io/github/quatrope/astroalign/coverage.svg?branch=master)](https://codecov.io/github/quatrope/astroalign) 8 | [![Documentation Status](https://readthedocs.org/projects/astroalign/badge/?version=latest)](http://astroalign.readthedocs.org/en/latest/?badge=latest) 9 | [![PyPI](https://img.shields.io/pypi/v/astroalign)](https://pypi.org/project/astroalign/) 10 | ![PyPI - Downloads](https://img.shields.io/pypi/dm/astroalign) 11 | [![ascl:1906.001](https://img.shields.io/badge/ascl-1906.001-blue.svg?colorB=262255)](http://ascl.net/1906.001) 12 | 13 | 14 | **ASTROALIGN** is a python module that will try to align two stellar astronomical images, especially when there is no WCS information available. 15 | 16 | It does so by finding similar 3-point asterisms (triangles) in both images and deducing the affine transformation between them. 17 | 18 | Generic registration routines try to match feature points, using corner 19 | detection routines to make the point correspondence. 20 | These generally fail for stellar astronomical images, since stars have very 21 | little stable structure and so, in general, indistinguishable from each other. 22 | Asterism matching is more robust, and closer to the human way of matching stellar images. 23 | 24 | Astroalign can match images of very different field of view, point-spread function, seeing and atmospheric conditions. 25 | 26 | It may not work, or work with special care, on images of extended objects with few point-like sources or in very crowded fields. 27 | 28 | You can find a Jupyter notebook example with the main features at [http://quatrope.github.io/astroalign/](http://quatrope.github.io/astroalign/). 29 | 30 | **Full documentation:** https://astroalign.readthedocs.io/ 31 | 32 | # Installation 33 | 34 | Install from PyPI 35 | 36 | ```bash 37 | $ pip install astroalign 38 | ``` 39 | 40 | ## Running Tests 41 | 42 | ```bash 43 | python tests/test_align.py 44 | ``` 45 | 46 | or using pytest: 47 | 48 | ```bash 49 | pytest -v 50 | ``` 51 | 52 | # Usage example 53 | 54 | ``` 55 | >>> import astroalign as aa 56 | >>> aligned_image, footprint = aa.register(source_image, target_image) 57 | ``` 58 | 59 | In this example `source_image` will be interpolated by a transformation to coincide pixel to pixel with `target_image` and stored in `aligned_image`. 60 | 61 | If we are only interested in knowing the transformation and the correspondence of control points in both images, use `find_transform` will return the transformation in a [Scikit-Image](https://scikit-image.org/) `SimilarityTransform` object and a list of stars in source with the corresponding stars in target. 62 | 63 | ``` 64 | >>> transf, (s_list, t_list) = aa.find_transform(source, target) 65 | ``` 66 | 67 | `source` and `target` can each either be the numpy array of the image (grayscale or color), 68 | or an iterable of (x, y) pairs of star positions on the image. 69 | 70 | The returned `transf` object is a scikit-image [`SimilarityTranform`](http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.SimilarityTransform) object that contains the transformation matrix along with the scale, rotation and translation parameters. 71 | 72 | `s_list` and `t_list` are numpy arrays of (x, y) point correspondence between `source` and `target`. `transf` applied to `s_list` will approximately render `t_list`. 73 | 74 | # Related Software 75 | 76 | There are other related software that may offer similar functionality as astroalign. 77 | This list is not exhaustive and may be others. 78 | 79 | * [astrometry.net](https://github.com/dstndstn/astrometry.net) 80 | * [reproject](https://github.com/astropy/reproject) 81 | * [Watney Astrometry Engine](https://github.com/Jusas/WatneyAstrometry) 82 | * [Stellar Solver](https://github.com/rlancaste/stellarsolver) 83 | * [THRASTRO](https://github.com/THRASTRO/astrometrylib) 84 | * [Montage](https://github.com/Caltech-IPAC/Montage) 85 | * [Aafitrans](https://github.com/prajwel/aafitrans) 86 | * [astrometry](https://github.com/neuromorphicsystems/astrometry) 87 | 88 | # Citation 89 | 90 | If you use astroalign in a scientific publication, we would appreciate citations to the following [paper](https://www.sciencedirect.com/science/article/pii/S221313372030038X): 91 | 92 | Astroalign: A Python module for astronomical image registration. 93 | Beroiz, M., Cabral, J. B., & Sanchez, B. 94 | Astronomy & Computing, Volume 32, July 2020, 100384. 95 | 96 | *** 97 | 98 | 99 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 1>NUL 2>NUL 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\astroalign.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\astroalign.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help 23 | help: 24 | @echo "Please use \`make ' where is one of" 25 | @echo " html to make standalone HTML files" 26 | @echo " dirhtml to make HTML files named index.html in directories" 27 | @echo " singlehtml to make a single large HTML file" 28 | @echo " pickle to make pickle files" 29 | @echo " json to make JSON files" 30 | @echo " htmlhelp to make HTML files and a HTML help project" 31 | @echo " qthelp to make HTML files and a qthelp project" 32 | @echo " applehelp to make an Apple Help Book" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | @echo " coverage to run coverage check of the documentation (if enabled)" 49 | 50 | .PHONY: clean 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | .PHONY: html 55 | html: 56 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 57 | @echo 58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 59 | 60 | .PHONY: dirhtml 61 | dirhtml: 62 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 63 | @echo 64 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 65 | 66 | .PHONY: singlehtml 67 | singlehtml: 68 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 69 | @echo 70 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 71 | 72 | .PHONY: pickle 73 | pickle: 74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 75 | @echo 76 | @echo "Build finished; now you can process the pickle files." 77 | 78 | .PHONY: json 79 | json: 80 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 81 | @echo 82 | @echo "Build finished; now you can process the JSON files." 83 | 84 | .PHONY: htmlhelp 85 | htmlhelp: 86 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 87 | @echo 88 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 89 | ".hhp project file in $(BUILDDIR)/htmlhelp." 90 | 91 | .PHONY: qthelp 92 | qthelp: 93 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 94 | @echo 95 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 96 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 97 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/astroalign.qhcp" 98 | @echo "To view the help file:" 99 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/astroalign.qhc" 100 | 101 | .PHONY: applehelp 102 | applehelp: 103 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 104 | @echo 105 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 106 | @echo "N.B. You won't be able to view it unless you put it in" \ 107 | "~/Library/Documentation/Help or install it in your application" \ 108 | "bundle." 109 | 110 | .PHONY: devhelp 111 | devhelp: 112 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 113 | @echo 114 | @echo "Build finished." 115 | @echo "To view the help file:" 116 | @echo "# mkdir -p $$HOME/.local/share/devhelp/astroalign" 117 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/astroalign" 118 | @echo "# devhelp" 119 | 120 | .PHONY: epub 121 | epub: 122 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 123 | @echo 124 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 125 | 126 | .PHONY: latex 127 | latex: 128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 129 | @echo 130 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 131 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 132 | "(use \`make latexpdf' here to do that automatically)." 133 | 134 | .PHONY: latexpdf 135 | latexpdf: 136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 137 | @echo "Running LaTeX files through pdflatex..." 138 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 139 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 140 | 141 | .PHONY: latexpdfja 142 | latexpdfja: 143 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 144 | @echo "Running LaTeX files through platex and dvipdfmx..." 145 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 146 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 147 | 148 | .PHONY: text 149 | text: 150 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 151 | @echo 152 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 153 | 154 | .PHONY: man 155 | man: 156 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 157 | @echo 158 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 159 | 160 | .PHONY: texinfo 161 | texinfo: 162 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 163 | @echo 164 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 165 | @echo "Run \`make' in that directory to run these through makeinfo" \ 166 | "(use \`make info' here to do that automatically)." 167 | 168 | .PHONY: info 169 | info: 170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 171 | @echo "Running Texinfo files through makeinfo..." 172 | make -C $(BUILDDIR)/texinfo info 173 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 174 | 175 | .PHONY: gettext 176 | gettext: 177 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 178 | @echo 179 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 180 | 181 | .PHONY: changes 182 | changes: 183 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 184 | @echo 185 | @echo "The overview file is in $(BUILDDIR)/changes." 186 | 187 | .PHONY: linkcheck 188 | linkcheck: 189 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 190 | @echo 191 | @echo "Link check complete; look for any errors in the above output " \ 192 | "or in $(BUILDDIR)/linkcheck/output.txt." 193 | 194 | .PHONY: doctest 195 | doctest: 196 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 197 | @echo "Testing of doctests in the sources finished, look at the " \ 198 | "results in $(BUILDDIR)/doctest/output.txt." 199 | 200 | .PHONY: coverage 201 | coverage: 202 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 203 | @echo "Testing of coverage in the sources finished, look at the " \ 204 | "results in $(BUILDDIR)/coverage/python.txt." 205 | 206 | .PHONY: xml 207 | xml: 208 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 209 | @echo 210 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 211 | 212 | .PHONY: pseudoxml 213 | pseudoxml: 214 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 215 | @echo 216 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 217 | -------------------------------------------------------------------------------- /docs/tutorial.rst: -------------------------------------------------------------------------------- 1 | Tutorial 2 | ======== 3 | 4 | A simple usage example 5 | ---------------------- 6 | 7 | Suppose we have two images of about the same portion of the sky, and we would like to transform one of them to fit on top of the other. 8 | Suppose we do not have WCS information, but we are confident that we could do it by eye, by matching some obvious asterisms on the two images. 9 | 10 | In this particular use case, astroalign can be of great help to automatize the process. 11 | 12 | After we load our images into numpy arrays, we simple choose one to be the source image to be transformed, and the other to be the target. 13 | 14 | The usage for this simple most common case would be as follows:: 15 | 16 | >>> import astroalign as aa 17 | >>> registered_image, footprint = aa.register(source, target) 18 | 19 | ``registered_image`` is now a transformed (numpy array) image of ``source`` that will match pixel to pixel to ``target``. 20 | 21 | ``footprint`` is a boolean numpy array, ``True`` for masked pixels with no information. 22 | 23 | .. note:: 24 | * If instead of images, you have lists of bright, reference star positions on each image, 25 | see :ref:`ftransf`. 26 | 27 | * ``astroalign.register`` will also accept as input, data objects with ``data`` and ``mask`` properties, 28 | like ``NDData``, ``CCDData`` and ``Numpy`` masked arrays. 29 | See :ref:`dataobjs`. 30 | 31 | * Check this `Jupyter notebook `_ for a more complete example. 32 | 33 | .. warning:: 34 | Flux may not be conserved after the transformation. 35 | 36 | .. note:: 37 | If your image requires special care see :ref:`examples`. 38 | 39 | 40 | Images with RGB channels 41 | ------------------------ 42 | 43 | Astroalign can work with color images provided the channel index be the last axis in the array. 44 | Adding the channel dimension in the last axis of the array is the default behavior for 45 | `pillow `_ 46 | and `scikit-image `__. 47 | The transformation is found on the ``mean`` average of all the channels. 48 | PNG images with RGBA channels work similarly. 49 | 50 | Example: 51 | 52 | .. code-block:: python 53 | 54 | from PIL import Image 55 | import astroalign as aa 56 | source = Image.open("source.jpg") 57 | target = Image.open("target.jpg") 58 | registered, footprint = aa.register(source, target) 59 | # Convert back to pillow image if necessary: 60 | registered = Image.fromarray(registered.astype("unit8")) 61 | 62 | *Pillow may require array to be unsigned 8-bit integer format.* 63 | 64 | 65 | Mask Fill Value 66 | --------------- 67 | 68 | If you need to mask the aligned image with a special value over the region where transformation had no pixel information, 69 | you can use the ``footprint`` mask to do so:: 70 | 71 | >>> registered_image, footprint = aa.register(source, target) 72 | >>> registered_image[footprint] = -99999.99 73 | 74 | Or you can pass the value to the ``fill_value`` argument:: 75 | 76 | >>> registered_image, footprint = aa.register(source, target, fill_value=-99999.99) 77 | 78 | Both will yield the same result. 79 | 80 | .. _ftransf: 81 | 82 | Finding the transformation 83 | -------------------------- 84 | 85 | In some cases it may be necessary to inspect first the transformation parameters before applying it, 86 | or we may be interested only in a star to star correspondence between the images. 87 | For those cases, we can use ``find_transform``:: 88 | 89 | >>> transf, (source_list, target_list) = aa.find_transform(source, target) 90 | 91 | The inputs ``source`` and ``target`` can be either numpy arrays of the image pixels, 92 | **or any iterable of (x, y) pairs**, corresponding to star positions. 93 | 94 | Having an iterable of (x, y) pairs is especially useful in situations where source detection requires special care. 95 | In situations like that, source detection can be done separately and the resulting catalogs fed to ``find_transform``. 96 | 97 | ``find_transform`` returns a `scikit-image `__ `SimilarityTransform `_ object that encapsulates the matrix transformation, 98 | and the transformation parameters. 99 | It will also return a tuple with two lists of star positions of ``source`` and its corresponding ordered star postions on 100 | the ``target`` image. 101 | 102 | The transformation parameters can be found in ``transf.rotation``, ``transf.translation``, ``transf.scale`` 103 | and the transformation matrix in ``transf.params``. 104 | 105 | If the transformation is satisfactory, we can apply it to the image with ``apply_transform``. 106 | Continuing our example:: 107 | 108 | >>> if transf.rotation > MIN_ROT: 109 | ... registered_image = aa.apply_transform(transf, source, target) 110 | 111 | If you know the star-to-star correspondence 112 | ------------------------------------------- 113 | 114 | .. note:: 115 | `estimate_transform `_ 116 | from `scikit-image` is imported into astroalign as a convenience. 117 | 118 | If for any reason you know which star corresponds to which other, you can call ``estimate_transform``. 119 | 120 | Let us suppose we know the correspondence: 121 | 122 | - (127.03, 85.98) in source --> (175.13, 111.36) in target 123 | - (23.11, 31.87) in source --> (0.58, 119.04) in target 124 | - (98.84, 142.99) in source --> (181.55, 206.49) in target 125 | - (150.93, 85.02) in source --> (205.60, 91.89) in target 126 | - (137.99, 12.88) in source --> (134.61, 7.94) in target 127 | 128 | Then we can estimate the transform:: 129 | 130 | >>> src = np.array([(127.03, 85.98), (23.11, 31.87), (98.84, 142.99), 131 | ... (150.93, 85.02), (137.99, 12.88)]) 132 | >>> dst = np.array([(175.13, 111.36), (0.58, 119.04), (181.55, 206.49), 133 | ... (205.60, 91.89), (134.61, 7.94)]) 134 | >>> tform = aa.estimate_transform('affine', src, dst) 135 | 136 | And apply it to an image with ``apply_transform`` or to a set of points with ``matrix_transform``. 137 | 138 | Applying a transformation to a set of points 139 | -------------------------------------------- 140 | 141 | .. note:: 142 | `matrix_transform `_ 143 | from `scikit-image` is imported into astroalign as a convenience. 144 | 145 | To apply a known transform to a set of points, we use ``matrix_transform``. 146 | Following the example in the previous section:: 147 | 148 | >>> dst_calc = aa.matrix_transform(src, tform.params) 149 | 150 | ``dst_calc`` should be a 5 by 2 array similar to the ``dst`` array. 151 | 152 | 153 | .. _dataobjs: 154 | 155 | Objects with ``data`` and ``mask`` property 156 | ------------------------------------------- 157 | 158 | If your image is stored in objects with ``data`` and ``mask`` properties, 159 | such as `ccdproc `_'s 160 | `CCDData `_ 161 | or `astropy `_'s 162 | `NDData `_ 163 | or a `NumPy `_ 164 | `masked array `_ 165 | you can use them as input for ``register``, ``find_transform`` and ``apply_transform``. 166 | 167 | In general in these cases it is convenient to transform their masks 168 | along with the data and to add the ``footprint`` onto the mask. 169 | 170 | Astroalign provides this functionality with the ``propagate_mask`` argument to ``register`` and ``apply_transform``. 171 | 172 | For example:: 173 | 174 | >>> from astropy.nddata import NDData 175 | >>> nd = NDData([[0, 1], [2, 3]], [[True, False], [False, False]]) 176 | 177 | and we want to apply a clockwise 90 degree rotation:: 178 | 179 | >>> import numpy as np 180 | >>> from skimage.transform import SimilarityTransform 181 | >>> transf = SimilarityTransform(rotation=np.pi/2., translation=(1, 0)) 182 | 183 | Then we can call astroalign as usual, but with the `propagate_mask` set to True:: 184 | 185 | >>> aligned_image, footprint = aa.apply_transform(transf, nd, nd, propagate_mask=True) 186 | 187 | This will transform ``nd.data`` and ``nd.mask`` simultaneously and add the 188 | ``footprint`` mask from the transformation onto ``nd.mask``:: 189 | 190 | >>> aligned_image 191 | array([[2., 0.], 192 | [3., 1.]]) 193 | >>> footprint 194 | array([[False, True], 195 | [False, False]]) 196 | 197 | Creating a new object of the same input type is now easier:: 198 | 199 | >>> new_nd = NDData(aligned_image, mask=footprint) 200 | 201 | The same will apply for ``CCDData`` objects and ``NumPy`` masked arrays. 202 | 203 | ---------------------------------------- 204 | 205 | See :ref:`api` for the API specification. 206 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # astroalign documentation build configuration file, created by 4 | # sphinx-quickstart on Mon Jan 18 14:29:21 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | # from recommonmark.parser import CommonMarkParser 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | sys.path.insert(0, os.path.abspath('..')) 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | #needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 34 | 'sphinx.ext.coverage', 35 | 'sphinx.ext.mathjax', 36 | 'sphinx.ext.napoleon', 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | source_parsers = { 43 | # '.md': CommonMarkParser, 44 | } 45 | 46 | # The suffix(es) of source filenames. 47 | # You can specify multiple suffix as a list of string: 48 | source_suffix = ['.rst', '.md'] 49 | # source_suffix = '.rst' 50 | 51 | # The encoding of source files. 52 | #source_encoding = 'utf-8-sig' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # General information about the project. 58 | project = u'astroalign' 59 | copyright = u'2017, Martin Beroiz' 60 | author = u'Martin Beroiz' 61 | 62 | # The version info for the project you're documenting, acts as replacement for 63 | # |version| and |release|, also used in various other places throughout the 64 | # built documents. 65 | # 66 | # Get the version from astroalign file itself (not imported) 67 | with open(os.path.join('..', 'astroalign.py'), 'r') as f: 68 | for line in f: 69 | if line.startswith('__version__'): 70 | _, _, __version__ = line.replace('"', "").split() 71 | break 72 | 73 | # The short X.Y version. 74 | version = __version__ 75 | # The full version, including alpha/beta/rc tags. 76 | release = __version__ 77 | 78 | # The language for content autogenerated by Sphinx. Refer to documentation 79 | # for a list of supported languages. 80 | # 81 | # This is also used if you do content translation via gettext catalogs. 82 | # Usually you set "language" from the command line for these cases. 83 | language = 'en' 84 | 85 | # There are two options for replacing |today|: either, you set today to some 86 | # non-false value, then it is used: 87 | #today = '' 88 | # Else, today_fmt is used as the format for a strftime call. 89 | #today_fmt = '%B %d, %Y' 90 | 91 | # List of patterns, relative to source directory, that match files and 92 | # directories to ignore when looking for source files. 93 | exclude_patterns = ['_build'] 94 | 95 | # The reST default role (used for this markup: `text`) to use for all 96 | # documents. 97 | #default_role = None 98 | 99 | # If true, '()' will be appended to :func: etc. cross-reference text. 100 | #add_function_parentheses = True 101 | 102 | # If true, the current module name will be prepended to all description 103 | # unit titles (such as .. function::). 104 | #add_module_names = True 105 | 106 | # If true, sectionauthor and moduleauthor directives will be shown in the 107 | # output. They are ignored by default. 108 | #show_authors = False 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | #modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | #keep_warnings = False 118 | 119 | # If true, `todo` and `todoList` produce output, else they produce nothing. 120 | todo_include_todos = False 121 | 122 | # -- Options for Napoleon ------------------------------------------------- 123 | 124 | napoleon_google_docstring = False 125 | napoleon_numpy_docstring = True 126 | 127 | # -- Options for autodoc -------------------------------------------------- 128 | 129 | autodoc_mock_imports = [ 130 | "scipy", "scipy.spatial", "scipy.spatial.KDTree", "numpy", 131 | "skimage", "skimage.transform", 132 | "skimage.transform.estimate_transform", 133 | "skimage.transform.matrix_transform", 134 | "skimage.transform.warp", 135 | "sep", 136 | ] 137 | 138 | # -- Options for HTML output ---------------------------------------------- 139 | 140 | # The theme to use for HTML and HTML Help pages. See the documentation for 141 | # a list of builtin themes. 142 | html_theme = 'sphinx_rtd_theme' 143 | 144 | # Theme options are theme-specific and customize the look and feel of a theme 145 | # further. For a list of options available for each theme, see the 146 | # documentation. 147 | #html_theme_options = {} 148 | 149 | # Add any paths that contain custom themes here, relative to this directory. 150 | #html_theme_path = [] 151 | 152 | # The name for this set of Sphinx documents. If None, it defaults to 153 | # " v documentation". 154 | #html_title = None 155 | 156 | # A shorter title for the navigation bar. Default is the same as html_title. 157 | #html_short_title = None 158 | 159 | # The name of an image file (relative to this directory) to place at the top 160 | # of the sidebar. 161 | # html_logo = None 162 | 163 | # The name of an image file (within the static path) to use as favicon of the 164 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 165 | # pixels large. 166 | #html_favicon = None 167 | 168 | # Add any paths that contain custom static files (such as style sheets) here, 169 | # relative to this directory. They are copied after the builtin static files, 170 | # so a file named "default.css" will overwrite the builtin "default.css". 171 | # html_static_path = ['_static'] 172 | 173 | # Add any extra paths that contain custom files (such as robots.txt or 174 | # .htaccess) here, relative to this directory. These files are copied 175 | # directly to the root of the documentation. 176 | #html_extra_path = [] 177 | 178 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 179 | # using the given strftime format. 180 | #html_last_updated_fmt = '%b %d, %Y' 181 | 182 | # If true, SmartyPants will be used to convert quotes and dashes to 183 | # typographically correct entities. 184 | #html_use_smartypants = True 185 | 186 | # Custom sidebar templates, maps document names to template names. 187 | #html_sidebars = {} 188 | 189 | # Additional templates that should be rendered to pages, maps page names to 190 | # template names. 191 | #html_additional_pages = {} 192 | 193 | # If false, no module index is generated. 194 | #html_domain_indices = True 195 | 196 | # If false, no index is generated. 197 | #html_use_index = True 198 | 199 | # If true, the index is split into individual pages for each letter. 200 | #html_split_index = False 201 | 202 | # If true, links to the reST sources are added to the pages. 203 | #html_show_sourcelink = True 204 | 205 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 206 | #html_show_sphinx = True 207 | 208 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 209 | #html_show_copyright = True 210 | 211 | # If true, an OpenSearch description file will be output, and all pages will 212 | # contain a tag referring to it. The value of this option must be the 213 | # base URL from which the finished HTML is served. 214 | #html_use_opensearch = '' 215 | 216 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 217 | #html_file_suffix = None 218 | 219 | # Language to be used for generating the HTML full-text search index. 220 | # Sphinx supports the following languages: 221 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 222 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 223 | #html_search_language = 'en' 224 | 225 | # A dictionary with options for the search language support, empty by default. 226 | # Now only 'ja' uses this config value 227 | #html_search_options = {'type': 'default'} 228 | 229 | # The name of a javascript file (relative to the configuration directory) that 230 | # implements a search results scorer. If empty, the default will be used. 231 | #html_search_scorer = 'scorer.js' 232 | 233 | # Output file base name for HTML help builder. 234 | htmlhelp_basename = 'astroaligndoc' 235 | 236 | # -- Options for LaTeX output --------------------------------------------- 237 | 238 | latex_elements = { 239 | # The paper size ('letterpaper' or 'a4paper'). 240 | #'papersize': 'letterpaper', 241 | 242 | # The font size ('10pt', '11pt' or '12pt'). 243 | #'pointsize': '10pt', 244 | 245 | # Additional stuff for the LaTeX preamble. 246 | #'preamble': '', 247 | 248 | # Latex figure (float) alignment 249 | #'figure_align': 'htbp', 250 | } 251 | 252 | # Grouping the document tree into LaTeX files. List of tuples 253 | # (source start file, target name, title, 254 | # author, documentclass [howto, manual, or own class]). 255 | latex_documents = [ 256 | (master_doc, 'astroalign.tex', u'astroalign Documentation', 257 | u'Martin Beroiz', 'manual'), 258 | ] 259 | 260 | # The name of an image file (relative to this directory) to place at the top of 261 | # the title page. 262 | #latex_logo = None 263 | 264 | # For "manual" documents, if this is true, then toplevel headings are parts, 265 | # not chapters. 266 | #latex_use_parts = False 267 | 268 | # If true, show page references after internal links. 269 | #latex_show_pagerefs = False 270 | 271 | # If true, show URL addresses after external links. 272 | #latex_show_urls = False 273 | 274 | # Documents to append as an appendix to all manuals. 275 | #latex_appendices = [] 276 | 277 | # If false, no module index is generated. 278 | #latex_domain_indices = True 279 | 280 | 281 | # -- Options for manual page output --------------------------------------- 282 | 283 | # One entry per manual page. List of tuples 284 | # (source start file, name, description, authors, manual section). 285 | man_pages = [ 286 | (master_doc, 'astroalign', u'astroalign Documentation', 287 | [author], 1) 288 | ] 289 | 290 | # If true, show URL addresses after external links. 291 | #man_show_urls = False 292 | 293 | 294 | # -- Options for Texinfo output ------------------------------------------- 295 | 296 | # Grouping the document tree into Texinfo files. List of tuples 297 | # (source start file, target name, title, author, 298 | # dir menu entry, description, category) 299 | texinfo_documents = [ 300 | (master_doc, 'astroalign', u'astroalign Documentation', 301 | author, 'astroalign', 'One line description of project.', 302 | 'Miscellaneous'), 303 | ] 304 | 305 | # Documents to append as an appendix to all manuals. 306 | #texinfo_appendices = [] 307 | 308 | # If false, no module index is generated. 309 | #texinfo_domain_indices = True 310 | 311 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 312 | #texinfo_show_urls = 'footnote' 313 | 314 | # If true, do not generate a @detailmenu in the "Top" node's menu. 315 | #texinfo_no_detailmenu = False 316 | -------------------------------------------------------------------------------- /benchmarks/flux_bench.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | # ============================================================================= 25 | # IMPORTS 26 | # ============================================================================= 27 | 28 | import sys 29 | import os 30 | import datetime as dt 31 | import argparse 32 | 33 | import numpy as np 34 | 35 | import astroalign as aa 36 | 37 | import pandas as pd 38 | 39 | import sep 40 | 41 | from skimage.transform import SimilarityTransform 42 | 43 | from scipy import stats 44 | 45 | 46 | test_path = os.path.abspath(os.path.dirname(aa.__file__)) 47 | sys.path.insert(0, test_path) 48 | 49 | from tests.test_align import simulate_image_single # noqa 50 | 51 | 52 | # ============================================================================= 53 | # CONSTANTS 54 | # ============================================================================= 55 | 56 | SIZE = 256 57 | 58 | STARS = 300 59 | 60 | NOISE = 100 61 | 62 | REPEATS = 35 63 | 64 | DEFAULT_SIZE = 6.4, 4.8 65 | 66 | 67 | # ============================================================================= 68 | # FUNCTIONS 69 | # ============================================================================= 70 | 71 | def get_image(size, stars, noise, seed): 72 | """Retrieves a single image""" 73 | if seed is not None: 74 | np.random.seed(seed) 75 | shape = (size, size) 76 | image = simulate_image_single( 77 | shape=shape, num_stars=stars, noise_level=noise)[:2] 78 | return image 79 | 80 | 81 | def benchmark(size=SIZE, stars=STARS, noise=NOISE, repeats=REPEATS, seed=None): 82 | # get image 83 | image = get_image(size, stars, noise, seed) 84 | imagedata = np.ascontiguousarray(image[0]) 85 | 86 | # detect sources (we know where they are, actually) 87 | bkg = sep.Background(imagedata) 88 | thresh = 3. * bkg.globalrms 89 | sources = sep.extract(imagedata - bkg.back(), thresh) 90 | sources.sort(order='flux') 91 | 92 | # perform photometry 93 | flux, fluxerr, flag = sep.sum_circle( 94 | imagedata-bkg.back(), sources['x'], 95 | sources['y'], 3.0, err=bkg.globalrms, gain=1.0) 96 | 97 | dframes = [] 98 | # transform it 99 | for i_trsf in range(repeats): 100 | dx, dy = np.random.randint( 101 | low=-1 * size / 32., high=size / 32., size=2) 102 | theta = (np.random.random()-0.5)*0.125*np.pi 103 | s = 0.85+np.random.random()*0.3 104 | trf = SimilarityTransform( 105 | translation=(dx, dy), rotation=theta, scale=s) 106 | 107 | target = np.zeros(shape=np.array(imagedata.shape) * 2) 108 | newimage = aa.apply_transform(trf, imagedata - bkg.back(), target) 109 | 110 | # perform photometry on new places 111 | src_coords = np.array([sources['x'], sources['y']]).T 112 | new_coords = trf(src_coords).T 113 | nflux, nfluxerr, nflag = sep.sum_circle( 114 | newimage[0], new_coords[0], new_coords[1], 3.0 * s, 115 | err=bkg.globalrms, gain=1.0) 116 | 117 | # compare fluxes 118 | good_flux = nflag == 0 119 | new_to_orig = nflux[good_flux]/flux[good_flux] 120 | 121 | # put everything in a pd dataframe 122 | df = pd.DataFrame() 123 | 124 | df["idx"] = np.array([i_trsf] * sum(good_flux)) 125 | df["seed"] = np.array([seed] * sum(good_flux)) 126 | df["repeats"] = np.array([repeats] * sum(good_flux)) 127 | 128 | df['orig_x'] = sources['x'][good_flux] 129 | df['orig_y'] = sources['y'][good_flux] 130 | df['orig_flux'] = flux[good_flux] 131 | df['orig_fluxerr'] = fluxerr[good_flux] 132 | df['orig_flag'] = flag[good_flux] 133 | 134 | df['new_x'] = new_coords[0][good_flux] 135 | df['new_y'] = new_coords[1][good_flux] 136 | df['new_flux'] = nflux[good_flux] 137 | df['new_fluxerr'] = nfluxerr[good_flux] 138 | df['new_flag'] = nflag[good_flux] 139 | 140 | df['flux_ratio'] = new_to_orig 141 | 142 | df['trf_theta'] = theta 143 | df['trf_dx'] = dx 144 | df['trf_dy'] = dy 145 | df['trf_scale'] = s 146 | 147 | slp, intpt, r_val, p_val, std_err = stats.linregress( 148 | flux[good_flux], nflux[good_flux]) 149 | df['stats_slope'] = slp 150 | df['stats_intpt'] = intpt 151 | df['flux_per_area_ratio'] = df['flux_ratio'] / (df['trf_scale'] ** 2) 152 | 153 | dframes.append(df) 154 | 155 | final_df = pd.concat(dframes) 156 | 157 | return final_df 158 | 159 | 160 | def describe(results): 161 | repetitions = results.repeats.values[0] 162 | resume = results[["flux_per_area_ratio"]].describe() 163 | return repetitions, resume 164 | 165 | 166 | def plot(results, ax): 167 | 168 | bins = np.arange(0.95, 1.05, 0.001) 169 | ax.hist( 170 | results.flux_per_area_ratio, normed=True, 171 | histtype='step', bins=bins, label='Data') 172 | 173 | ax.plot( 174 | bins + (bins[1] - bins[0]) / 2., 175 | stats.norm.pdf( 176 | bins, 177 | loc=np.mean(results.flux_per_area_ratio), 178 | scale=np.std(results.flux_per_area_ratio)), 179 | label='Gaussian') 180 | 181 | ax.legend(loc='best') 182 | 183 | ax.set_title("Flux ratio per unit area") 184 | ax.set_xlabel('Flux ratio per unit area') 185 | ax.set_ylabel('Normalized N') 186 | 187 | return ax 188 | 189 | 190 | # ============================================================================= 191 | # CLI MAIN 192 | # ============================================================================= 193 | 194 | class CLI: 195 | 196 | def __init__(self): 197 | self._parser = argparse.ArgumentParser( 198 | description="Astroalign flux benchmark tool") 199 | self._parser.set_defaults( 200 | callback=lambda ns: self.parser.print_usage()) 201 | 202 | self._parser.add_argument( 203 | '--version', action='version', version='%(prog)s 2019.10') 204 | 205 | subparsers = self._parser.add_subparsers() 206 | 207 | # ===================================================================== 208 | # benchmark subparser 209 | # ===================================================================== 210 | 211 | benchmark = subparsers.add_parser( 212 | "benchmark", 213 | help="Execute and collect the flux benchmark data of astroalign") 214 | benchmark.set_defaults(callback=self.benchmark_command) 215 | 216 | benchmark.add_argument( 217 | "--size", dest="size", type=int, default=SIZE, 218 | help=("The size in pixels of the image. This parameter creates " 219 | f"square figure (defaults={SIZE}).")) 220 | 221 | benchmark.add_argument( 222 | "--stars", dest="stars", type=int, default=STARS, 223 | help=("The total numbers of stars in the image " 224 | f"(defaults={STARS}).")) 225 | 226 | benchmark.add_argument( 227 | "--noise", dest="noise", type=int, default=NOISE, 228 | help=f"lambda parameter for poisson noise (default={NOISE})") 229 | 230 | benchmark.add_argument( 231 | "--number", dest="repeats", type=int, default=REPEATS, 232 | help=f"How many flux tests must be executed (default={REPEATS})") 233 | 234 | benchmark.add_argument( 235 | "--seed", dest="seed", type=int, default=None, 236 | help=("Random seed used to initialize the pseudo-random number " 237 | "generator. if seed is None, then random-state will try to " 238 | "read data from /dev/urandom (or the Windows analogue) if " 239 | "available or seed from the clock otherwise " 240 | "(default=None).")) 241 | 242 | benchmark.add_argument( 243 | "--out", "-o", dest="out", required=True, 244 | type=argparse.FileType('w'), 245 | help="Output file path. The data was stored in CSV format") 246 | 247 | # ===================================================================== 248 | # describe subparser 249 | # ===================================================================== 250 | 251 | describe = subparsers.add_parser( 252 | "describe", 253 | help="Show a resume and (optionally) of the benchmark results") 254 | describe.set_defaults(callback=self.describe_command) 255 | 256 | describe.add_argument( 257 | "--file", "-f", dest="file", required=True, 258 | type=argparse.FileType('r'), 259 | help="File path of the flux benchmark data in CSV format") 260 | 261 | # ===================================================================== 262 | # plot subparser 263 | # ===================================================================== 264 | 265 | plot = subparsers.add_parser( 266 | "plot", help="Show the histogram of a given results") 267 | plot.set_defaults(callback=self.plot_command) 268 | 269 | plot.add_argument( 270 | "--file", "-f", dest="file", required=True, 271 | type=argparse.FileType('r'), 272 | help="File path of the flux benchmark data in CSV format") 273 | 274 | plot.add_argument( 275 | "--size", dest="size", nargs=2, type=float, 276 | help=("The size of the entire figure in inches in the format " 277 | f"'width height' (default={DEFAULT_SIZE}).")) 278 | 279 | plot.add_argument( 280 | "--out", "-o", dest="out", 281 | help=("A file to store the generated plot. " 282 | "By default the default matplotlib backend shows the plot")) 283 | 284 | def parse_and_run(self, *args, **kwargs): 285 | ns = self._parser.parse_args(*args, **kwargs) 286 | return ns.callback(ns) 287 | 288 | def plot_command(self, ns): 289 | import matplotlib.pyplot as plt 290 | 291 | results = pd.read_csv(ns.file) 292 | 293 | size = ns.size if ns.size else DEFAULT_SIZE 294 | 295 | fig, ax = plt.subplots() 296 | fig.set_size_inches(*size) 297 | 298 | plot(results, ax) 299 | 300 | fig.suptitle("") 301 | plt.tight_layout() 302 | if ns.out is None: 303 | print(f"Showing plot for data stored in '{ns.file.name}'...") 304 | fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}") 305 | plt.show() 306 | else: 307 | print( 308 | f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...") 309 | plt.savefig(ns.out) 310 | print("DONE!") 311 | 312 | def describe_command(self, ns): 313 | results = pd.read_csv(ns.file) 314 | 315 | repetitions, resume = describe(results) 316 | 317 | print(f"Data size: {len(results)}") 318 | print(f"\twith {repetitions} repetitions \n") 319 | print(">>>>> Resume <<<<<") 320 | print(resume) 321 | print("") 322 | 323 | def benchmark_command(self, ns): 324 | if ns.repeats <= 0: 325 | self._parser.error(f"'repeats' must be > 0. Found {ns.repeats}") 326 | 327 | now = dt.datetime.now 328 | 329 | print( 330 | f"[{now()}] Starting flux benchmark " 331 | f"for astroalign {aa.__version__}...") 332 | print("") 333 | results = benchmark( 334 | size=ns.size, stars=ns.stars, noise=ns.noise, 335 | repeats=ns.repeats, seed=ns.seed) 336 | 337 | repetitions, resume = describe(results) 338 | 339 | print(f"[{now()}] Data size: {len(results)}") 340 | print(f"\twith {repetitions} repetitions \n") 341 | 342 | print(">>>>> Resume <<<<<") 343 | print(resume) 344 | print("") 345 | 346 | results.to_csv(ns.out, index=False) 347 | 348 | @property 349 | def parser(self): 350 | return self._parser 351 | 352 | 353 | # ============================================================================= 354 | # MAIN 355 | # ============================================================================= 356 | 357 | if __name__ == "__main__": 358 | parser = CLI() 359 | parser.parse_and_run() 360 | -------------------------------------------------------------------------------- /benchmarks/time_bench.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | # ============================================================================= 25 | # IMPORTS 26 | # ============================================================================= 27 | 28 | import sys 29 | import os 30 | import timeit 31 | import datetime as dt 32 | import argparse 33 | from collections import OrderedDict 34 | 35 | import numpy as np 36 | 37 | import astroalign as aa 38 | 39 | import pandas as pd 40 | 41 | import joblib 42 | 43 | from sklearn.model_selection import ParameterGrid 44 | 45 | import tqdm 46 | 47 | test_path = os.path.abspath(os.path.dirname(aa.__file__)) 48 | sys.path.insert(0, test_path) 49 | 50 | from tests.test_align import simulate_image_pair # noqa 51 | 52 | 53 | # ============================================================================= 54 | # CONSTANTS 55 | # ============================================================================= 56 | 57 | SIZES = (256, 512, 768, 1024) 58 | 59 | STARS = (300, 500, 1000, 10_000) 60 | 61 | NOISES = (100, 500, 1000, 5000) 62 | 63 | COMB_NUMBER = 10 64 | 65 | STATEMENT = "aa.register(source, target)" 66 | 67 | REPEATS = 50 68 | 69 | COLSROWS = { 70 | "vertical": {"ncols": 1, "nrows": 3}, 71 | "horizontal": {"ncols": 3, "nrows": 1} 72 | } 73 | 74 | DEFAULT_SIZES = { 75 | "vertical": (4, 12), 76 | "horizontal": (12, 4) 77 | } 78 | 79 | 80 | # ============================================================================= 81 | # FUNCTIONS 82 | # ============================================================================= 83 | 84 | def get_images(size, stars, noise, seed): 85 | """Retrieves a pair source and target image""" 86 | if seed is not None: 87 | np.random.seed(seed) 88 | shape = (size, size) 89 | source, target = simulate_image_pair( 90 | shape=shape, num_stars=stars, noise_level=noise)[:2] 91 | return source, target 92 | 93 | 94 | def get_parameters(sizes, stars, noises, comb_number, repeats, seed): 95 | """Create a list of dictionaries with all the combinations of the given 96 | parameters. 97 | 98 | """ 99 | grid = ParameterGrid({ 100 | "size": sizes, "stars": stars, "noise": noises}) 101 | grid = list(grid) * comb_number 102 | 103 | # set the random state for run in parallel 104 | random = np.random.RandomState(seed) 105 | images_seeds = random.randint(1_000_000, size=len(grid)) 106 | 107 | for idx, g in enumerate(grid): 108 | g["idx"] = idx 109 | g["seed"] = seed 110 | g["images_seed"] = images_seeds[idx] 111 | g["repeats"] = repeats 112 | 113 | return grid 114 | 115 | 116 | def _test(idx, size, stars, noise, seed, images_seed, repeats): 117 | 118 | # create the two images 119 | source, target = get_images( 120 | size=size, stars=stars, noise=noise, seed=images_seed) 121 | 122 | # create the timer 123 | test_globals = {"aa": aa, "source": source, "target": target} 124 | timer = timeit.Timer(stmt=STATEMENT, globals=test_globals) 125 | 126 | # find the number of loops 127 | loops = timer.autorange()[0] 128 | 129 | # create a copy of the params to be returned ad result 130 | result = OrderedDict({ 131 | "idx": idx, "size": size, "noise": noise, "stars": stars, "seed": seed, 132 | "images_seed": images_seed, "repeats": repeats, "loops": loops}) 133 | 134 | # execute the timeit 135 | times = timer.repeat(repeats, loops) 136 | 137 | # store the times into the result 138 | result["time"] = np.min(np.array(times) / loops) 139 | for tidx, time in enumerate(times): 140 | result[f"time_{tidx}"] = time 141 | 142 | return result 143 | 144 | 145 | def benchmark(sizes=SIZES, stars=STARS, noises=NOISES, 146 | comb_number=10, seed=None, repeats=REPEATS, n_jobs=-1): 147 | 148 | grid = get_parameters( 149 | sizes=sizes, stars=stars, noises=noises, 150 | comb_number=comb_number, seed=seed, repeats=repeats) 151 | 152 | with joblib.Parallel(n_jobs=n_jobs) as parallel: 153 | results = parallel( 154 | joblib.delayed(_test)(**params) for params in tqdm.tqdm(grid)) 155 | 156 | df = pd.DataFrame(results) 157 | return df 158 | 159 | 160 | def describe(results): 161 | repetitions = results.repeats.values[0] 162 | resume = results[["time", "loops"]].describe() 163 | return repetitions, resume 164 | 165 | 166 | def plot(results, ax_size, ax_stars, ax_noise): 167 | results = results[["size", "stars", "noise", "time"]] 168 | 169 | def plots(df, by, ax): 170 | df.boxplot(by=by, column=["time"], grid=False, ax=ax) 171 | ax.set_title(f"Time by {by.title()}") 172 | ax.set_ylabel("Seconds") 173 | ax.set_xlabel(by.title()) 174 | 175 | plots(results, "noise", ax_noise) 176 | plots(results, "stars", ax_stars) 177 | plots(results, "size", ax_size) 178 | 179 | return (ax_size, ax_stars, ax_noise) 180 | 181 | 182 | # ============================================================================= 183 | # CLI MAIN 184 | # ============================================================================= 185 | 186 | class CLI: 187 | 188 | def __init__(self): 189 | self._parser = argparse.ArgumentParser( 190 | description="Astroalign time benchmark tool based on timeit") 191 | self._parser.set_defaults( 192 | callback=lambda ns: self.parser.print_usage()) 193 | 194 | self._parser.add_argument( 195 | '--version', action='version', version='%(prog)s 2019.10') 196 | 197 | subparsers = self._parser.add_subparsers() 198 | 199 | # ===================================================================== 200 | # benchmark subparser 201 | # ===================================================================== 202 | 203 | benchmark = subparsers.add_parser( 204 | "benchmark", 205 | help="Execute and collect the benchmark data of astroalign") 206 | benchmark.set_defaults(callback=self.benchmark_command) 207 | 208 | benchmark.add_argument( 209 | "--sizes", dest="sizes", type=int, default=SIZES, nargs="+", 210 | help=("The size in pixels of the image. This parameter creates " 211 | f"square figure (defaults={SIZES}).")) 212 | 213 | benchmark.add_argument( 214 | "--stars", dest="stars", type=int, default=STARS, nargs="+", 215 | help=("The total numbers of stars in the image " 216 | f"(defaults={STARS}).")) 217 | 218 | benchmark.add_argument( 219 | "--noises", dest="noises", type=int, default=NOISES, nargs="+", 220 | help=f"lambda parameter for poisson noise (default={NOISES})") 221 | 222 | benchmark.add_argument( 223 | "--number", dest="comb_number", type=int, default=10, 224 | help=("How many random images pairs must be created for one " 225 | "combination of sizes, stars and noise (default=10).")) 226 | 227 | benchmark.add_argument( 228 | "--seed", dest="seed", type=int, default=None, 229 | help=("Random seed used to initialize the pseudo-random number " 230 | "generator. if seed is None, then random-state will try to " 231 | "read data from /dev/urandom (or the Windows analogue) if " 232 | "available or seed from the clock otherwise " 233 | "(default=None).")) 234 | 235 | benchmark.add_argument( 236 | "--repeats", dest="repeats", type=int, default=REPEATS, 237 | help=("How many measurements must be taken for every image pair. " 238 | "The final 'time' is the lower bound of all the times. " 239 | "Docs: https://docs.python.org/3.7/library/timeit.html")) 240 | 241 | benchmark.add_argument( 242 | "--jobs", dest="n_jobs", type=int, default=-1, 243 | help=("The number of CPU to run the benchmars. " 244 | "-1 uses all the available CPUS (default=-1)")) 245 | 246 | benchmark.add_argument( 247 | "--out", "-o", dest="out", required=True, 248 | type=argparse.FileType('w'), 249 | help="Output file path. The data was stored in CSV format") 250 | 251 | # ===================================================================== 252 | # describe subparser 253 | # ===================================================================== 254 | 255 | describe = subparsers.add_parser( 256 | "describe", 257 | help="Show a resume and (optionally) of the benchmark results") 258 | describe.set_defaults(callback=self.describe_command) 259 | 260 | describe.add_argument( 261 | "--file", "-f", dest="file", required=True, 262 | type=argparse.FileType('r'), 263 | help="File path of the time benchmark data in CSV format") 264 | 265 | # ===================================================================== 266 | # plot subparser 267 | # ===================================================================== 268 | 269 | plot = subparsers.add_parser( 270 | "plot", help="Show three boxplots of a given results") 271 | plot.set_defaults(callback=self.plot_command) 272 | 273 | plot.add_argument( 274 | "--file", "-f", dest="file", required=True, 275 | type=argparse.FileType('r'), 276 | help="File path of the time benchmark data in CSV format") 277 | 278 | plot.add_argument( 279 | "--orientation", dest="orientation", 280 | choices=list(COLSROWS.keys()), default="horizontal", 281 | help=("If the plots will be a single row (horizontal) " 282 | f"or vertical for a single column (default='horizontal')")) 283 | 284 | plot.add_argument( 285 | "--size", dest="size", nargs=2, type=float, 286 | help=("The size of the entire figure in inches in the format " 287 | "'width height' for horizontal orientation the size by " 288 | f"default is {DEFAULT_SIZES['horizontal']} and for " 289 | f"vertical {DEFAULT_SIZES['vertical']}.")) 290 | 291 | plot.add_argument( 292 | "--out", "-o", dest="out", 293 | help=("A file to store the generated plot. " 294 | "By default the default matplotlib backend shows the plot")) 295 | 296 | def parse_and_run(self, *args, **kwargs): 297 | ns = self._parser.parse_args(*args, **kwargs) 298 | return ns.callback(ns) 299 | 300 | def plot_command(self, ns): 301 | import matplotlib.pyplot as plt 302 | 303 | results = pd.read_csv(ns.file) 304 | 305 | orientation = COLSROWS[ns.orientation] 306 | size = ns.size if ns.size else DEFAULT_SIZES[ns.orientation] 307 | 308 | fig, axes = plt.subplots(**orientation) 309 | fig.set_size_inches(*size) 310 | 311 | plot(results, *axes) 312 | 313 | fig.suptitle("") 314 | plt.tight_layout() 315 | if ns.out is None: 316 | print(f"Showing plot for data stored in '{ns.file.name}'...") 317 | fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}") 318 | plt.show() 319 | else: 320 | print( 321 | f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...") 322 | plt.savefig(ns.out) 323 | print("DONE!") 324 | 325 | def describe_command(self, ns): 326 | results = pd.read_csv(ns.file) 327 | 328 | repetitions, resume = describe(results) 329 | 330 | print(f"Executed: {len(results)} cases") 331 | 332 | print(f"\twith {repetitions} repetitions \n") 333 | print(">>>>> Resume <<<<<") 334 | print(resume) 335 | print("") 336 | 337 | def benchmark_command(self, ns): 338 | if ns.repeats <= 0: 339 | self._parser.error(f"'repeats' must be > 0. Found {ns.repeats}") 340 | 341 | now = dt.datetime.now 342 | 343 | print( 344 | f"[{now()}] Starting benchmark for astroalign {aa.__version__}...") 345 | print("") 346 | results = benchmark( 347 | sizes=ns.sizes, stars=ns.stars, noises=ns.noises, 348 | comb_number=ns.comb_number, seed=ns.seed, 349 | repeats=ns.repeats, n_jobs=ns.n_jobs) 350 | 351 | repetitions, resume = describe(results) 352 | 353 | print(f"[{now()}] Executed: {len(results)} cases") 354 | 355 | print(f"\twith {repetitions} repetitions \n") 356 | print(">>>>> Resume <<<<<") 357 | print(resume) 358 | print("") 359 | 360 | results.to_csv(ns.out, index=False) 361 | print(f"[{now()}] Data stored in '{ns.out.name}'") 362 | 363 | @property 364 | def parser(self): 365 | return self._parser 366 | 367 | 368 | # ============================================================================= 369 | # MAIN 370 | # ============================================================================= 371 | 372 | if __name__ == "__main__": 373 | parser = CLI() 374 | parser.parse_and_run() 375 | -------------------------------------------------------------------------------- /benchmarks/time_regression.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | # ============================================================================= 24 | # IMPORTS 25 | # ============================================================================= 26 | 27 | import sys 28 | import os 29 | import timeit 30 | import datetime as dt 31 | import argparse 32 | from collections import OrderedDict 33 | 34 | import numpy as np 35 | 36 | import astroalign as aa 37 | 38 | from sklearn.model_selection import ParameterGrid 39 | from sklearn.linear_model import LinearRegression 40 | from sklearn.metrics import mean_squared_error, r2_score 41 | 42 | import pandas as pd 43 | 44 | import joblib 45 | 46 | import tqdm 47 | 48 | 49 | test_path = os.path.abspath(os.path.dirname(aa.__file__)) 50 | sys.path.insert(0, test_path) 51 | 52 | from tests.test_align import simulate_image_pair # noqa 53 | 54 | 55 | # ============================================================================= 56 | # CONSTANTS 57 | # ============================================================================= 58 | 59 | SIZES = (256, 512, 768, 1024) 60 | 61 | STARS = 10000 62 | 63 | NOISE = 1000 64 | 65 | STEP = 10 66 | 67 | STATEMENT = "aa.register(source, target)" 68 | 69 | REPEATS = 50 70 | 71 | COMB_NUMBER = 10 72 | 73 | DEFAULT_SIZE = (8, 8) 74 | 75 | 76 | # ============================================================================= 77 | # FUNCTIONS 78 | # ============================================================================= 79 | 80 | def get_images(size, stars, noise, seed): 81 | """Retrieves a pair source and target image""" 82 | if seed is not None: 83 | np.random.seed(seed) 84 | shape = (size, size) 85 | source, target = simulate_image_pair( 86 | shape=shape, num_stars=stars, noise_level=noise)[:2] 87 | return source, target 88 | 89 | 90 | def get_parameters(min_size, max_size, step_size, stars, 91 | noise, seed, comb_number, repeats): 92 | """Create a list of dictionaries with all the combinations of the given 93 | parameters. 94 | 95 | """ 96 | 97 | sample_size = int((max_size - min_size) / step_size) 98 | sizes = np.linspace(min_size, max_size, sample_size, dtype=int) 99 | 100 | grid = ParameterGrid({ 101 | "size": sizes, "stars": [stars], 102 | "noise": [noise], "repeats": [repeats]}) 103 | grid = list(grid) * comb_number 104 | 105 | # set the random state for run in parallel 106 | random = np.random.RandomState(seed) 107 | images_seeds = random.randint(1_000_000, size=len(grid)) 108 | 109 | for idx, g in enumerate(grid): 110 | g["idx"] = idx 111 | g["seed"] = seed 112 | g["min_size"] = min_size 113 | g["max_size"] = max_size 114 | g["step_size"] = step_size 115 | g["images_seed"] = images_seeds[idx] 116 | return grid 117 | 118 | 119 | def _test(idx, min_size, max_size, step_size, size, 120 | stars, noise, seed, repeats, images_seed): 121 | 122 | # create the two images 123 | source, target = get_images( 124 | size=size, stars=stars, noise=noise, seed=images_seed) 125 | 126 | # create the timer 127 | test_globals = {"aa": aa, "source": source, "target": target} 128 | timer = timeit.Timer(stmt=STATEMENT, globals=test_globals) 129 | 130 | # find the number of loops 131 | loops = timer.autorange()[0] 132 | 133 | # create a copy of the params to be returned ad result 134 | result = OrderedDict({ 135 | "idx": idx, "min_size": min_size, "max_size": max_size, 136 | "step_size": step_size, "size": size, "noise": noise, 137 | "stars": stars, "seed": seed, "images_seed": images_seed, 138 | "repeats": repeats, "loops": loops}) 139 | 140 | # execute the timeit 141 | times = timer.repeat(repeats, loops) 142 | 143 | # store the times into the result 144 | result["time"] = np.min(np.array(times) / loops) 145 | for tidx, time in enumerate(times): 146 | result[f"time_{tidx}"] = time 147 | 148 | return result 149 | 150 | 151 | def benchmark(min_size=min(SIZES), max_size=max(SIZES), step_size=STEP, 152 | stars=STARS, noise=NOISE, seed=None, repeats=REPEATS, 153 | n_jobs=-1, comb_number=COMB_NUMBER): 154 | 155 | grid = get_parameters( 156 | min_size=min_size, max_size=max_size, step_size=step_size, 157 | repeats=repeats, stars=stars, noise=noise, seed=seed, 158 | comb_number=comb_number) 159 | 160 | with joblib.Parallel(n_jobs=n_jobs) as parallel: 161 | results = parallel( 162 | joblib.delayed(_test)(**params) for params in tqdm.tqdm(grid)) 163 | 164 | df = pd.DataFrame(results) 165 | return df 166 | 167 | 168 | def describe(results): 169 | repetitions = results.repeats.values[0] 170 | resume = results[["time", "size", "loops"]].describe() 171 | return repetitions, resume 172 | 173 | 174 | def plot(results, ax): 175 | df = results[["size", "time"]] 176 | 177 | df.plot.scatter(x='size', y='time', c='LightBlue', ax=ax, marker=".") 178 | 179 | # linear regression 180 | x = df["size"].values.reshape((-1, 1)) 181 | y = df["time"].values 182 | linear = LinearRegression().fit(x, y) 183 | y_pred = linear.predict(x) 184 | 185 | mqe = mean_squared_error(y, y_pred) 186 | r2 = r2_score(y, y_pred) 187 | 188 | ax.plot(x, y_pred, color='DarkBlue', linewidth=2) 189 | 190 | ax.set_title( 191 | "Linear regression between size and time " 192 | f"\n$mse={mqe:.3f}$ - $R^2={r2:.3f}$") 193 | ax.set_xlabel("Size") 194 | ax.set_ylabel("Seconds") 195 | 196 | return ax 197 | 198 | 199 | # ============================================================================= 200 | # CLI MAIN 201 | # ============================================================================= 202 | 203 | class CLI: 204 | 205 | def __init__(self): 206 | self._parser = argparse.ArgumentParser( 207 | description="Astroalign time benchmark tool based on timeit") 208 | self._parser.set_defaults( 209 | callback=lambda ns: self.parser.print_usage()) 210 | 211 | self._parser.add_argument( 212 | '--version', action='version', version='%(prog)s 2019.10') 213 | 214 | subparsers = self._parser.add_subparsers() 215 | 216 | # ===================================================================== 217 | # benchmark subparser 218 | # ===================================================================== 219 | 220 | benchmark = subparsers.add_parser( 221 | "benchmark", 222 | help="Execute and collect the regression benchmark of astroalign") 223 | benchmark.set_defaults(callback=self.benchmark_command) 224 | 225 | benchmark.add_argument( 226 | "--max", dest="max_size", type=int, default=max(SIZES), 227 | help=("The size in pixels of the bigger square image. " 228 | f"(defaults={max(SIZES)}).")) 229 | 230 | benchmark.add_argument( 231 | "--min", dest="min_size", type=int, default=min(SIZES), 232 | help=("The size in pixels of the smallest square image. " 233 | f"(defaults={max(SIZES)}).")) 234 | 235 | benchmark.add_argument( 236 | "--step", dest="step_size", type=int, default=STEP, 237 | help=f"The size between every image (defaults={STEP}).") 238 | 239 | benchmark.add_argument( 240 | "--stars", dest="stars", type=int, default=STARS, 241 | help=("The total numbers of stars in the image " 242 | f"(defaults={STARS}).")) 243 | 244 | benchmark.add_argument( 245 | "--noise", dest="noise", type=int, default=NOISE, 246 | help=f"lambda parameter for poisson noise (default={NOISE})") 247 | 248 | benchmark.add_argument( 249 | "--number", dest="comb_number", type=int, default=10, 250 | help=("How many random images pairs must be created for one " 251 | f"size (default={COMB_NUMBER}).")) 252 | 253 | benchmark.add_argument( 254 | "--seed", dest="seed", type=int, default=None, 255 | help=("Random seed used to initialize the pseudo-random number " 256 | "generator. if seed is None, then random-state will try to " 257 | "read data from /dev/urandom (or the Windows analogue) if " 258 | "available or seed from the clock otherwise " 259 | "(default=None).")) 260 | 261 | benchmark.add_argument( 262 | "--repeats", dest="repeats", type=int, default=REPEATS, 263 | help=("How many measurements must be taken for every image pair. " 264 | "The final 'time' is the lower bound of all the times. " 265 | "Docs: https://docs.python.org/3.7/library/timeit.html")) 266 | 267 | benchmark.add_argument( 268 | "--jobs", dest="n_jobs", type=int, default=-1, 269 | help=("The number of CPU to run the benchmars. " 270 | "-1 uses all the available CPUS (default=-1)")) 271 | 272 | benchmark.add_argument( 273 | "--out", "-o", dest="out", required=True, 274 | type=argparse.FileType('w'), 275 | help="Output file path. The data was stored in CSV format") 276 | 277 | # ===================================================================== 278 | # describe subparser 279 | # ===================================================================== 280 | 281 | describe = subparsers.add_parser( 282 | "describe", 283 | help="Show a resume and (optionally) of the benchmark results") 284 | describe.set_defaults(callback=self.describe_command) 285 | 286 | describe.add_argument( 287 | "--file", "-f", dest="file", required=True, 288 | type=argparse.FileType('r'), 289 | help="File path of the time benchmark data in CSV format") 290 | 291 | # ===================================================================== 292 | # plot subparser 293 | # ===================================================================== 294 | 295 | plot = subparsers.add_parser( 296 | "plot", help="Show three boxplots of a given results") 297 | plot.set_defaults(callback=self.plot_command) 298 | 299 | plot.add_argument( 300 | "--file", "-f", dest="file", required=True, 301 | type=argparse.FileType('r'), 302 | help="File path of the time benchmark data in CSV format") 303 | 304 | plot.add_argument( 305 | "--size", dest="size", nargs=2, type=float, 306 | help=("The size of the entire figure in inches in the format " 307 | f"'width height' (default={DEFAULT_SIZE}).")) 308 | 309 | plot.add_argument( 310 | "--out", "-o", dest="out", 311 | help=("A file to store the generated plot. " 312 | "By default the default matplotlib backend shows the plot")) 313 | 314 | def parse_and_run(self, *args, **kwargs): 315 | ns = self._parser.parse_args(*args, **kwargs) 316 | return ns.callback(ns) 317 | 318 | def plot_command(self, ns): 319 | import matplotlib.pyplot as plt 320 | 321 | results = pd.read_csv(ns.file) 322 | 323 | size = ns.size if ns.size else DEFAULT_SIZE 324 | 325 | fig, ax = plt.subplots() 326 | fig.set_size_inches(*size) 327 | 328 | plot(results, ax) 329 | 330 | fig.suptitle("") 331 | plt.tight_layout() 332 | if ns.out is None: 333 | print(f"Showing plot for data stored in '{ns.file.name}'...") 334 | fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}") 335 | plt.show() 336 | else: 337 | print( 338 | f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...") 339 | plt.savefig(ns.out) 340 | print("DONE!") 341 | 342 | def describe_command(self, ns): 343 | results = pd.read_csv(ns.file) 344 | 345 | repetitions, resume = describe(results) 346 | 347 | print(f"Executed: {len(results)} cases") 348 | 349 | print(f"\twith {repetitions} repetitions \n") 350 | print(">>>>> Resume <<<<<") 351 | print(resume) 352 | print("") 353 | 354 | def benchmark_command(self, ns): 355 | if ns.step_size <= 0: 356 | self._parser.error(f"'step' must be > 0. Found {ns.step_size}") 357 | 358 | now = dt.datetime.now 359 | 360 | print( 361 | f"[{now()}] Starting benchmark for astroalign {aa.__version__}...") 362 | print("") 363 | results = benchmark( 364 | max_size=ns.max_size, min_size=ns.min_size, step_size=ns.step_size, 365 | stars=ns.stars, noise=ns.noise, seed=ns.seed, 366 | repeats=ns.repeats, n_jobs=ns.n_jobs, comb_number=ns.comb_number) 367 | 368 | repetitions, resume = describe(results) 369 | 370 | print(f"[{now()}] Executed: {len(results)} cases") 371 | 372 | print(f"\twith {repetitions} repetitions \n") 373 | print(">>>>> Resume <<<<<") 374 | print(resume) 375 | print("") 376 | 377 | results.to_csv(ns.out, index=False) 378 | print(f"[{now()}] Data stored in '{ns.out.name}'") 379 | 380 | @property 381 | def parser(self): 382 | return self._parser 383 | 384 | 385 | # ============================================================================= 386 | # MAIN 387 | # ============================================================================= 388 | 389 | if __name__ == "__main__": 390 | parser = CLI() 391 | parser.parse_and_run() 392 | -------------------------------------------------------------------------------- /astroalign.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016 Martin Beroiz 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | """ 25 | ASTROALIGN aligns stellar images using no WCS information. 26 | 27 | It does so by finding similar 3-point asterisms (triangles) in both images and 28 | deducing the affine transformation between them. 29 | 30 | General registration routines try to match feature points, using corner 31 | detection routines to make the point correspondence. 32 | These generally fail for stellar astronomical images, since stars have very 33 | little stable structure and so, in general, indistinguishable from each other. 34 | 35 | Asterism matching is more robust, and closer to the human way of matching 36 | stellar images. 37 | 38 | Astroalign can match images of very different field of view, point-spread 39 | functions, seeing and atmospheric conditions. 40 | 41 | (c) Martin Beroiz 42 | """ 43 | 44 | 45 | __version__ = "2.6.2" 46 | 47 | __all__ = [ 48 | "MIN_MATCHES_FRACTION", 49 | "MaxIterError", 50 | "NUM_NEAREST_NEIGHBORS", 51 | "PIXEL_TOL", 52 | "apply_transform", 53 | "estimate_transform", 54 | "find_transform", 55 | "matrix_transform", 56 | "register", 57 | ] 58 | 59 | import numpy as _np 60 | 61 | 62 | def estimate_transform(*args, **kwargs): 63 | """Lazy-loader function for skimage.transform.estimate_transform. 64 | 65 | Full documentation: 66 | https://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.estimate_transform 67 | """ 68 | from skimage.transform import estimate_transform 69 | 70 | return estimate_transform(*args, **kwargs) 71 | 72 | 73 | def matrix_transform(*args, **kwargs): 74 | """Lazy-loader function for skimage.transform.matrix_transform. 75 | 76 | Full documentation: 77 | https://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.matrix_transform 78 | """ 79 | from skimage.transform import matrix_transform 80 | 81 | return matrix_transform(*args, **kwargs) 82 | 83 | 84 | PIXEL_TOL = 2 85 | """The pixel distance tolerance to assume two invariant points are the same. 86 | 87 | Default: 2""" 88 | 89 | MIN_MATCHES_FRACTION = 0.8 90 | """The minimum fraction of triangle matches to accept a transformation. 91 | 92 | If the minimum fraction yields more than 10 triangles, 10 is used instead. 93 | 94 | Default: 0.8 95 | """ 96 | 97 | NUM_NEAREST_NEIGHBORS = 5 98 | """ 99 | The number of nearest neighbors of a given star (including itself) to construct 100 | the triangle invariants. 101 | 102 | Default: 5 103 | """ 104 | 105 | 106 | def _invariantfeatures(x1, x2, x3): 107 | """Given 3 points x1, x2, x3, return the invariant features for the set.""" 108 | sides = _np.sort( 109 | [ 110 | _np.linalg.norm(x1 - x2), 111 | _np.linalg.norm(x2 - x3), 112 | _np.linalg.norm(x1 - x3), 113 | ] 114 | ) 115 | return [sides[2] / sides[1], sides[1] / sides[0]] 116 | 117 | 118 | def _arrangetriplet(sources, vertex_indices): 119 | """Order vertex_indices according to length side. 120 | 121 | Order in (a, b, c) form Where: 122 | a is the vertex defined by L1 & L2 123 | b is the vertex defined by L2 & L3 124 | c is the vertex defined by L3 & L1 125 | and L1 < L2 < L3 are the sides of the triangle 126 | defined by vertex_indices. 127 | """ 128 | ind1, ind2, ind3 = vertex_indices 129 | x1, x2, x3 = sources[vertex_indices] 130 | 131 | side_ind = _np.array([(ind1, ind2), (ind2, ind3), (ind3, ind1)]) 132 | side_lengths = list(map(_np.linalg.norm, (x1 - x2, x2 - x3, x3 - x1))) 133 | l1_ind, l2_ind, l3_ind = _np.argsort(side_lengths) 134 | 135 | # the most common vertex in the list of vertices for two sides is the 136 | # point at which they meet. 137 | from collections import Counter 138 | 139 | count = Counter(side_ind[[l1_ind, l2_ind]].flatten()) 140 | a = count.most_common(1)[0][0] 141 | count = Counter(side_ind[[l2_ind, l3_ind]].flatten()) 142 | b = count.most_common(1)[0][0] 143 | count = Counter(side_ind[[l3_ind, l1_ind]].flatten()) 144 | c = count.most_common(1)[0][0] 145 | 146 | return _np.array([a, b, c]) 147 | 148 | 149 | def _generate_invariants(sources): 150 | """Return an array of (unique) invariants derived from the array `sources`. 151 | 152 | Return an array of the indices of `sources` that correspond to each 153 | invariant, arranged as described in _arrangetriplet. 154 | """ 155 | from scipy.spatial import KDTree 156 | from itertools import combinations 157 | from functools import partial 158 | 159 | arrange = partial(_arrangetriplet, sources=sources) 160 | 161 | inv = [] 162 | triang_vrtx = [] 163 | coordtree = KDTree(sources) 164 | # The number of nearest neighbors to request (to work with few sources) 165 | knn = min(len(sources), NUM_NEAREST_NEIGHBORS) 166 | for asrc in sources: 167 | __, indx = coordtree.query(asrc, knn) 168 | 169 | # Generate all possible triangles with the 5 indx provided, and store 170 | # them with the order (a, b, c) defined in _arrangetriplet 171 | all_asterism_triang = [ 172 | arrange(vertex_indices=list(cmb)) for cmb in combinations(indx, 3) 173 | ] 174 | triang_vrtx.extend(all_asterism_triang) 175 | 176 | inv.extend( 177 | [ 178 | _invariantfeatures(*sources[triplet]) 179 | for triplet in all_asterism_triang 180 | ] 181 | ) 182 | 183 | # Remove here all possible duplicate triangles 184 | uniq_ind = [ 185 | pos for (pos, elem) in enumerate(inv) if elem not in inv[pos + 1 :] 186 | ] 187 | inv_uniq = _np.array(inv)[uniq_ind] 188 | triang_vrtx_uniq = _np.array(triang_vrtx)[uniq_ind] 189 | 190 | return inv_uniq, triang_vrtx_uniq 191 | 192 | 193 | class _MatchTransform: 194 | def __init__(self, source, target): 195 | self.source = source 196 | self.target = target 197 | 198 | def fit(self, data): 199 | """ 200 | Return the best 2D similarity transform from the points given in data. 201 | 202 | data: N sets of similar corresponding triangles. 203 | 3 indices for a triangle in ref 204 | and the 3 indices for the corresponding triangle in target; 205 | arranged in a (N, 3, 2) array. 206 | """ 207 | d1, d2, d3 = data.shape 208 | s, d = data.reshape(d1 * d2, d3).T 209 | approx_t = estimate_transform( 210 | "similarity", self.source[s], self.target[d] 211 | ) 212 | return approx_t 213 | 214 | def get_error(self, data, approx_t): 215 | d1, d2, d3 = data.shape 216 | s, d = data.reshape(d1 * d2, d3).T 217 | resid = approx_t.residuals(self.source[s], self.target[d]).reshape( 218 | d1, d2 219 | ) 220 | error = resid.max(axis=1) 221 | return error 222 | 223 | 224 | def _data(image): 225 | """Return the bare 2D numpy array with pixel information.""" 226 | if hasattr(image, "data") and isinstance(image.data, _np.ndarray): 227 | return image.data 228 | return _np.asarray(image) 229 | 230 | 231 | def _mask(image): 232 | """Return a 2D numpy mask array if any, or None if there is no mask.""" 233 | if hasattr(image, "mask"): 234 | the_np_mask = _np.asarray(image.mask) 235 | if the_np_mask.ndim == 2: 236 | return the_np_mask 237 | else: 238 | return _np.logical_or.reduce(the_np_mask, axis=-1) 239 | return None 240 | 241 | 242 | def _bw(image): 243 | """Return a 2D numpy array for an array of arbitrary channels.""" 244 | if image.ndim == 2: 245 | return image 246 | return _np.mean(image, axis=-1) 247 | 248 | 249 | def _shape(image): 250 | """Return a 2D shape for the image, ignoring channel info.""" 251 | if image.ndim == 2: 252 | return image.shape 253 | h, w, ch = image.shape 254 | return h, w 255 | 256 | 257 | def find_transform( 258 | source, target, max_control_points=50, detection_sigma=5, min_area=5 259 | ): 260 | """Estimate the transform between ``source`` and ``target``. 261 | 262 | Return a SimilarityTransform object ``T`` that maps pixel x, y indices from 263 | the source image s = (x, y) into the target (destination) image t = (x, y). 264 | T contains parameters of the tranformation: ``T.rotation``, 265 | ``T.translation``, ``T.scale``, ``T.params``. 266 | 267 | Parameters 268 | ---------- 269 | source 270 | A 2D NumPy, CCData or NDData array of the source image to be transformed 271 | or an interable of (x, y) coordinates of the source control points. 272 | target 273 | A 2D NumPy, CCData or NDData array of the target (destination) image 274 | or an interable of (x, y) coordinates of the target control points. 275 | max_control_points 276 | The maximum number of control point-sources to find the transformation. 277 | detection_sigma : int 278 | Factor of background std-dev above which is considered a detection. 279 | This value is ignored if input are not images. 280 | min_area : int 281 | Minimum number of connected pixels to be considered a source. 282 | This value is ignored if input are not images. 283 | 284 | Returns 285 | ------- 286 | T, (source_pos_array, target_pos_array) 287 | The transformation object and a tuple of corresponding star positions 288 | in source and target. 289 | 290 | Raises 291 | ------ 292 | TypeError 293 | If input type of ``source`` or ``target`` is not supported. 294 | ValueError 295 | If it cannot find more than 3 stars on any input. 296 | MaxIterError 297 | If no transformation is found. 298 | """ 299 | from scipy.spatial import KDTree 300 | 301 | try: 302 | if len(_data(source)[0]) == 2: 303 | # Assume it's a list of (x, y) pairs 304 | source_controlp = _np.array(source)[:max_control_points] 305 | else: 306 | # Assume it's a 2D image 307 | source_controlp = _find_sources( 308 | _bw(_data(source)), 309 | detection_sigma=detection_sigma, 310 | min_area=min_area, 311 | mask=_mask(source), 312 | )[:max_control_points] 313 | except Exception: 314 | raise TypeError("Input type for source not supported.") 315 | 316 | try: 317 | if len(_data(target)[0]) == 2: 318 | # Assume it's a list of (x, y) pairs 319 | target_controlp = _np.array(target)[:max_control_points] 320 | else: 321 | # Assume it's a 2D image 322 | target_controlp = _find_sources( 323 | _bw(_data(target)), 324 | detection_sigma=detection_sigma, 325 | min_area=min_area, 326 | mask=_mask(target), 327 | )[:max_control_points] 328 | except Exception: 329 | raise TypeError("Input type for target not supported.") 330 | 331 | # Check for low number of reference points 332 | if len(source_controlp) < 3: 333 | raise ValueError( 334 | "Reference stars in source image are less than the " 335 | "minimum value (3)." 336 | ) 337 | if len(target_controlp) < 3: 338 | raise ValueError( 339 | "Reference stars in target image are less than the " 340 | "minimum value (3)." 341 | ) 342 | 343 | source_invariants, source_asterisms = _generate_invariants(source_controlp) 344 | source_invariant_tree = KDTree(source_invariants) 345 | 346 | target_invariants, target_asterisms = _generate_invariants(target_controlp) 347 | target_invariant_tree = KDTree(target_invariants) 348 | 349 | # r = 0.1 is the maximum search distance, 0.1 is an empirical value that 350 | # returns about the same number of matches than inputs 351 | # matches_list is a list of lists such that for each element 352 | # source_invariant_tree.data[i], matches_list[i] is a list of the indices 353 | # of its neighbors in target_invariant_tree.data 354 | matches_list = source_invariant_tree.query_ball_tree( 355 | target_invariant_tree, r=0.1 356 | ) 357 | 358 | # matches unravels the previous list of matches into pairs of source and 359 | # target control point matches. 360 | # matches is a (N, 3, 2) array. N sets of similar corresponding triangles. 361 | # 3 indices for a triangle in ref 362 | # and the 3 indices for the corresponding triangle in target; 363 | matches = [] 364 | # t1 is an asterism in source, t2 in target 365 | for t1, t2_list in zip(source_asterisms, matches_list): 366 | for t2 in target_asterisms[t2_list]: 367 | matches.append(list(zip(t1, t2))) 368 | matches = _np.array(matches) 369 | 370 | inv_model = _MatchTransform(source_controlp, target_controlp) 371 | n_invariants = len(matches) 372 | # Set the minimum matches to be between 1 and 10 asterisms 373 | min_matches = max(1, min(10, int(n_invariants * MIN_MATCHES_FRACTION))) 374 | if (len(source_controlp) == 3 or len(target_controlp) == 3) and len( 375 | matches 376 | ) == 1: 377 | best_t = inv_model.fit(matches) 378 | inlier_ind = _np.arange(len(matches)) # All of the indices 379 | else: 380 | best_t, inlier_ind = _ransac( 381 | matches, inv_model, PIXEL_TOL, min_matches 382 | ) 383 | triangle_inliers = matches[inlier_ind] 384 | d1, d2, d3 = triangle_inliers.shape 385 | inl_arr = triangle_inliers.reshape(d1 * d2, d3) 386 | inl_unique = set(tuple(pair) for pair in inl_arr) 387 | # In the next, multiple assignements to the same source point s are removed 388 | # We keep the pair (s, t) with the lowest reprojection error. 389 | inl_dict = {} 390 | for s_i, t_i in inl_unique: 391 | # calculate error 392 | s_vertex = source_controlp[s_i] 393 | t_vertex = target_controlp[t_i] 394 | t_vertex_pred = matrix_transform(s_vertex, best_t.params) 395 | error = _np.linalg.norm(t_vertex_pred - t_vertex) 396 | 397 | # if s_i not in dict, or if its error is smaller than previous error 398 | if s_i not in inl_dict or (error < inl_dict[s_i][1]): 399 | inl_dict[s_i] = (t_i, error) 400 | inl_arr_unique = _np.array( 401 | [[s_i, t_i] for s_i, (t_i, e) in inl_dict.items()] 402 | ) 403 | s, d = inl_arr_unique.T 404 | 405 | return best_t, (source_controlp[s], target_controlp[d]) 406 | 407 | 408 | def apply_transform( 409 | transform, source, target, fill_value=None, propagate_mask=False 410 | ): 411 | """Apply the transformation ``transform`` to ``source``. 412 | 413 | The output image will have the same shape as ``target``. 414 | 415 | Parameters 416 | ---------- 417 | transform 418 | A scikit-image ``SimilarityTransform`` object. 419 | source 420 | A 2D NumPy, CCData or NDData array of the source image to be transformed. 421 | target 422 | A 2D NumPy, CCData or NDData array of the target image. 423 | Only used to set the output image shape. 424 | fill_value : float 425 | A value to fill in the areas of aligned_image where footprint == True. 426 | propagate_mask : bool 427 | Wether to propagate the mask in source.mask onto footprint. 428 | 429 | Returns 430 | ------- 431 | aligned_image, footprint 432 | ``aligned_image`` is a numpy 2D array of the transformed source. 433 | ``footprint`` is a mask 2D array with True on the regions with no pixel 434 | information. 435 | """ 436 | from skimage.transform import warp 437 | 438 | source_data = _data(source) 439 | target_shape = _data(target).shape 440 | 441 | aligned_image = warp( 442 | source_data, 443 | inverse_map=transform.inverse, 444 | output_shape=target_shape, 445 | order=3, 446 | mode="constant", 447 | cval=_np.median(source_data), 448 | clip=True, 449 | preserve_range=True, 450 | ) 451 | 452 | footprint = warp( 453 | _np.zeros(_shape(source_data), dtype="float32"), 454 | inverse_map=transform.inverse, 455 | output_shape=target_shape, 456 | cval=1.0, 457 | ) 458 | footprint = footprint > 0.4 459 | 460 | source_mask = _mask(source) 461 | if source_mask is not None and propagate_mask: 462 | if source_mask.shape == source_data.shape: 463 | source_mask_rot = warp( 464 | source_mask.astype("float32"), 465 | inverse_map=transform.inverse, 466 | output_shape=target_shape, 467 | cval=1.0, 468 | ) 469 | source_mask_rot = source_mask_rot > 0.4 470 | footprint = footprint | source_mask_rot 471 | if fill_value is not None: 472 | aligned_image[footprint] = fill_value 473 | 474 | return aligned_image, footprint 475 | 476 | 477 | def register( 478 | source, 479 | target, 480 | fill_value=None, 481 | propagate_mask=False, 482 | max_control_points=50, 483 | detection_sigma=5, 484 | min_area=5, 485 | ): 486 | """Transform ``source`` to coincide pixel to pixel with ``target``. 487 | 488 | Parameters 489 | ---------- 490 | source 491 | A 2D NumPy, CCData or NDData array of the source image to be transformed. 492 | target 493 | A 2D NumPy, CCData or NDData array of the target image. 494 | Used to set the output image shape as well. 495 | fill_value 496 | A value to fill in the areas of aligned_image where footprint == True. 497 | propagate_mask : bool 498 | Wether to propagate the mask in source.mask onto footprint. 499 | max_control_points 500 | The maximum number of control point-sources to find the transformation. 501 | detection_sigma : int 502 | Factor of background std-dev above which is considered a detection. 503 | min_area : int 504 | Minimum number of connected pixels to be considered a source. 505 | 506 | Returns 507 | ------- 508 | aligned_image, footprint 509 | ``aligned_image`` is a numpy 2D array of the transformed source. 510 | ``footprint`` is a mask 2D array with True on the regions with no pixel 511 | information. 512 | 513 | Raises 514 | ------ 515 | TypeError 516 | If input type of ``source`` or ``target`` is not supported. 517 | ValueError 518 | If it cannot find more than 3 stars on any input. 519 | MaxIterError 520 | If no transformation is found. 521 | """ 522 | t, __ = find_transform( 523 | source=source, 524 | target=target, 525 | max_control_points=max_control_points, 526 | detection_sigma=detection_sigma, 527 | min_area=min_area, 528 | ) 529 | aligned_image, footprint = apply_transform( 530 | t, source, target, fill_value, propagate_mask 531 | ) 532 | return aligned_image, footprint 533 | 534 | 535 | def _find_sources(img, detection_sigma=5, min_area=5, mask=None): 536 | """Return sources (x, y) sorted by brightness.""" 537 | import sep 538 | 539 | image = img.astype("float32") 540 | bkg = sep.Background(image, mask=mask) 541 | thresh = detection_sigma * bkg.globalrms 542 | sources = sep.extract( 543 | image - bkg.back(), thresh, minarea=min_area, mask=mask 544 | ) 545 | sources.sort(order="flux") 546 | return _np.array([[asrc["x"], asrc["y"]] for asrc in sources[::-1]]) 547 | 548 | 549 | # Copyright (c) 2004-2007, Andrew D. Straw. All rights reserved. 550 | 551 | # Redistribution and use in source and binary forms, with or without 552 | # modification, are permitted provided that the following conditions are 553 | # met: 554 | 555 | # * Redistributions of source code must retain the above copyright 556 | # notice, this list of conditions and the following disclaimer. 557 | 558 | # * Redistributions in binary form must reproduce the above 559 | # copyright notice, this list of conditions and the following 560 | # disclaimer in the documentation and/or other materials provided 561 | # with the distribution. 562 | 563 | # * Neither the name of the Andrew D. Straw nor the names of its 564 | # contributors may be used to endorse or promote products derived 565 | # from this software without specific prior written permission. 566 | 567 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 568 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 569 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 570 | # a PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 571 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 572 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 573 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 574 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 575 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 576 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 577 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 578 | # 579 | # 580 | # Modified by Martin Beroiz 581 | 582 | 583 | class MaxIterError(RuntimeError): 584 | """Raise if maximum iterations reached.""" 585 | 586 | pass 587 | 588 | 589 | def _ransac(data, model, thresh, min_matches): 590 | """Fit model parameters to data using the RANSAC algorithm. 591 | 592 | This implementation written from pseudocode found at 593 | http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182 594 | 595 | Parameters 596 | ---------- 597 | data: a set of data points 598 | model: a model that can be fitted to data points 599 | thresh: a threshold value to determine when a data point fits a model 600 | min_matches: the min number of matches required to assert that a model 601 | fits well to data 602 | Returns 603 | ------- 604 | bestfit: model parameters which best fit the data (or nil if no good 605 | model is found) 606 | """ 607 | good_fit = None 608 | n_data = data.shape[0] 609 | all_idxs = _np.arange(n_data) 610 | _np.random.default_rng().shuffle(all_idxs) 611 | 612 | for iter_i in range(n_data): 613 | # Partition indices into two random subsets 614 | maybe_idxs = all_idxs[iter_i : iter_i + 1] 615 | test_idxs = list(all_idxs[:iter_i]) 616 | test_idxs.extend(list(all_idxs[iter_i + 1 :])) 617 | test_idxs = _np.array(test_idxs, dtype="i8") 618 | maybeinliers = data[maybe_idxs, :] 619 | test_points = data[test_idxs, :] 620 | maybemodel = model.fit(maybeinliers) 621 | test_err = model.get_error(test_points, maybemodel) 622 | # select indices of rows with accepted points 623 | also_idxs = test_idxs[test_err < thresh] 624 | alsoinliers = data[also_idxs, :] 625 | if len(alsoinliers) >= min_matches: 626 | good_data = _np.concatenate((maybeinliers, alsoinliers)) 627 | good_fit = model.fit(good_data) 628 | break 629 | 630 | if good_fit is None: 631 | raise MaxIterError( 632 | "List of matching triangles exhausted before an acceptable " 633 | "transformation was found" 634 | ) 635 | 636 | better_fit = good_fit 637 | for i in range(3): 638 | test_err = model.get_error(data, better_fit) 639 | better_inlier_idxs = _np.arange(n_data)[test_err < thresh] 640 | better_data = data[better_inlier_idxs] 641 | better_fit = model.fit(better_data) 642 | best_fit = better_fit 643 | best_inlier_idxs = better_inlier_idxs 644 | return best_fit, best_inlier_idxs 645 | -------------------------------------------------------------------------------- /tests/test_align.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | import unittest 24 | import numpy as np 25 | import astroalign as aa 26 | from astropy.nddata import NDData 27 | from ccdproc import CCDData 28 | from skimage.transform import SimilarityTransform 29 | from skimage.transform import estimate_transform, matrix_transform 30 | import tempfile 31 | from PIL import Image 32 | 33 | 34 | def gauss(shape=(11, 11), center=None, sx=2, sy=2): 35 | "Returns a Gaussian of given shape, normalized to 1." 36 | h, w = shape 37 | if center is None: 38 | center = ((h - 1) / 2.0, (w - 1) / 2.0) 39 | x0, y0 = center 40 | x, y = np.meshgrid(range(w), range(h)) 41 | krnl = np.exp(-0.5 * ((x - x0) ** 2 / sx ** 2 + (y - y0) ** 2 / sy ** 2)) 42 | krnl /= krnl.sum() 43 | return krnl 44 | 45 | 46 | def simulate_image_pair( 47 | shape=(512, 512), 48 | kshape=(10, 10), 49 | noise_level=500, 50 | gshape=(21, 21), 51 | gsigma=1.5, 52 | translation=(10, -20), 53 | rot_angle_deg=50.0, 54 | num_stars=1500, 55 | star_refx=None, 56 | star_refy=None, 57 | star_flux=None, 58 | random_seed=None, 59 | ): 60 | from scipy import signal 61 | rng = np.random.default_rng(seed=random_seed) 62 | 63 | h, w = shape # image height and width 64 | kh, kw = kshape # kernel height and width 65 | psf = gauss(shape=gshape, sx=gsigma, sy=gsigma) 66 | # Transformation parameters 67 | x_offset, y_offset = translation 68 | 69 | rot_angle = rot_angle_deg * np.pi / 180.0 70 | 71 | big_r = 0.5 * np.sqrt(h ** 2 + w ** 2) + max(abs(x_offset), abs(y_offset)) 72 | 73 | image_ref = rng.poisson(noise_level, size=(h + kh, w + kw)).astype( 74 | "float64" 75 | ) 76 | image = rng.poisson(noise_level, size=(h + kh, w + kw)).astype( 77 | "float64" 78 | ) 79 | 80 | # x and y of stars in the ref frame (int's) 81 | if star_refx is None: 82 | star_refx = rng.integers( 83 | low=int(-big_r) + w / 2, high=int(big_r) + w / 2, size=(num_stars,) 84 | ) 85 | if star_refy is None: 86 | star_refy = rng.integers( 87 | low=int(-big_r) + h / 2, high=int(big_r) + h / 2, size=(num_stars,) 88 | ) 89 | # Fluxes of stars 90 | if star_flux is None: 91 | a, m = 0.8, 3.0 * image_ref.std() # This are Pareto dist coeff's 92 | star_flux = (1.0 + rng.pareto(a, num_stars)) * m 93 | 94 | # inframe will contain the stars in the reference image 95 | inframe = [] 96 | ymax, xmax = image_ref.shape 97 | for x, y, f in zip(star_refx, star_refy, star_flux): 98 | if x > 0 and x < xmax and y > 0 and y < ymax: 99 | inframe.append((int(x), int(y), f)) 100 | ref_cols, ref_rows, ref_flux = np.array(inframe).astype(int).T 101 | 102 | image_ref[ref_rows, ref_cols] += ref_flux 103 | image_ref = signal.convolve2d(image_ref, psf, mode="same") 104 | image_ref = image_ref[kh // 2 : -kh // 2, kw // 2 : -kw // 2] 105 | # Adjust here the positions of rows and cols after cropping image 106 | ref_cols -= kw // 2 107 | ref_rows -= kh // 2 108 | 109 | newx, newy = [], [] 110 | for x, y in zip(star_refx, star_refy): 111 | x -= w / 2 112 | y -= h / 2 113 | xp = x * np.cos(rot_angle) - y * np.sin(rot_angle) + x_offset 114 | yp = x * np.sin(rot_angle) + y * np.cos(rot_angle) + y_offset 115 | xp += w / 2 116 | yp += h / 2 117 | newx.append(xp) 118 | newy.append(yp) 119 | # x and y of stars in the new frame (float's) 120 | star_newx = np.array(newx) 121 | star_newy = np.array(newy) 122 | 123 | inframe = [] 124 | ymax, xmax = image.shape 125 | for x, y, f in zip(star_newx, star_newy, star_flux): 126 | if x > 0 and x < xmax and y > 0 and y < xmax: 127 | inframe.append((int(x), int(y), f)) 128 | new_cols, new_rows, new_flux = np.array(inframe).astype(int).T 129 | 130 | image[new_rows, new_cols] += new_flux 131 | image = signal.convolve2d(image, psf, mode="same") 132 | image = image[kh // 2 : -kh // 2, kw // 2 : -kw // 2] 133 | # Adjust here the positions of rows and cols after cropping image 134 | new_cols -= kw // 2 135 | new_rows -= kh // 2 136 | 137 | star_ref_pos = np.array(list(zip(ref_cols, ref_rows))) 138 | star_new_pos = np.array(list(zip(new_cols, new_rows))) 139 | 140 | return image, image_ref, star_ref_pos, star_new_pos 141 | 142 | 143 | def simulate_image_single( 144 | shape=(512, 512), 145 | kshape=(10, 10), 146 | noise_level=500, 147 | gshape=(21, 21), 148 | gsigma=1.5, 149 | num_stars=1500, 150 | star_refx=None, 151 | star_refy=None, 152 | star_flux=None, 153 | random_seed=None, 154 | ): 155 | from scipy import signal 156 | rng = np.random.default_rng(seed=random_seed) 157 | 158 | h, w = shape # image height and width 159 | kh, kw = kshape # kernel height and width 160 | psf = gauss(shape=gshape, sx=gsigma, sy=gsigma) 161 | 162 | big_r = 0.5 * np.sqrt(h ** 2 + w ** 2) 163 | 164 | # Sky background 165 | image = rng.poisson(noise_level, size=(h + kh, w + kw)).astype( 166 | "float64" 167 | ) 168 | 169 | # x and y of stars in the ref frame (int's) 170 | if star_refx is None: 171 | star_refx = rng.integers( 172 | low=int(-big_r) + w / 2, high=int(big_r) + w / 2, size=(num_stars,) 173 | ) 174 | if star_refy is None: 175 | star_refy = rng.integers( 176 | low=int(-big_r) + h / 2, high=int(big_r) + h / 2, size=(num_stars,) 177 | ) 178 | # Fluxes of stars 179 | if star_flux is None: 180 | a, m = 0.8, 3.0 * image.std() # This are Pareto dist coeff's 181 | star_flux = (1.0 + rng.pareto(a, num_stars)) * m 182 | 183 | # inframe will contain the stars in the reference image 184 | inframe = [] 185 | ymax, xmax = image.shape 186 | for x, y, f in zip(star_refx, star_refy, star_flux): 187 | if x > 0 and x < xmax and y > 0 and y < ymax: 188 | inframe.append((int(x), int(y), f)) 189 | cols, rows, flux = np.array(inframe).astype(int).T 190 | 191 | image[rows, cols] += flux 192 | image = signal.convolve2d(image, psf, mode="same") 193 | image = image[kh // 2 : -kh // 2, kw // 2 : -kw // 2] 194 | # Adjust here the positions of rows and cols after cropping image 195 | cols -= kw // 2 196 | rows -= kh // 2 197 | 198 | star_pos = np.array(list(zip(cols, rows))) 199 | 200 | return image, star_pos 201 | 202 | 203 | class TestAlign(unittest.TestCase): 204 | def setUp(self): 205 | self.h = 512 # image height 206 | self.w = 512 # image width 207 | self.x_offset = 10 208 | self.y_offset = -20 209 | self.rot_angle = 50.0 * np.pi / 180.0 210 | ( 211 | self.image, 212 | self.image_ref, 213 | self.star_ref_pos, 214 | self.star_new_pos, 215 | ) = simulate_image_pair( 216 | shape=(self.h, self.w), 217 | translation=(self.x_offset, self.y_offset), 218 | rot_angle_deg=50.0, 219 | random_seed=433886085, 220 | ) 221 | self.image_mask = np.zeros((self.h, self.w), dtype="bool") 222 | self.image_ref_mask = np.zeros((self.h, self.w), dtype="bool") 223 | self.image_mask[10:30, 70:90] = True 224 | self.image_ref_mask[10:30, 20:50] = True 225 | 226 | def test_find_transform_givensources(self): 227 | 228 | source = np.array( 229 | [ 230 | [1.4, 2.2], 231 | [5.3, 1.0], 232 | [3.7, 1.5], 233 | [10.1, 9.6], 234 | [1.3, 10.2], 235 | [7.1, 2.0], 236 | ] 237 | ) 238 | nsrc = source.shape[0] 239 | scale = 1.5 # scaling parameter 240 | alpha = np.pi / 8.0 # rotation angle 241 | mm = scale * np.array( 242 | [[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]] 243 | ) 244 | tx, ty = 2.0, 1.0 # translation parameters 245 | transl = np.array([nsrc * [tx], nsrc * [ty]]) 246 | dest = (mm.dot(source.T) + transl).T 247 | t_true = estimate_transform("similarity", source, dest) 248 | 249 | # disorder dest points so they don't match the order of source 250 | np.random.default_rng().shuffle(dest) 251 | 252 | t, (src_pts, dst_pts) = aa.find_transform(source, dest) 253 | self.assertLess(t_true.scale - t.scale, 1e-10) 254 | self.assertLess(t_true.rotation - t.rotation, 1e-10) 255 | self.assertLess( 256 | np.linalg.norm(t_true.translation - t.translation), 1e-10 257 | ) 258 | self.assertEqual(src_pts.shape[0], dst_pts.shape[0]) 259 | self.assertEqual(src_pts.shape[1], 2) 260 | self.assertEqual(dst_pts.shape[1], 2) 261 | dst_pts_test = matrix_transform(src_pts, t.params) 262 | self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1e-10) 263 | 264 | def compare_image(self, the_image): 265 | """Return the fraction of sources found in the reference image""" 266 | # pixel comparison is not good, doesn't work. Compare catalogs. 267 | full_algn = the_image.astype("float32") 268 | import sep 269 | 270 | bkg = sep.Background(full_algn) 271 | thresh = 5.0 * bkg.globalrms 272 | allobjs = sep.extract(full_algn - bkg.back(), thresh) 273 | allxy = np.array([[obj["x"], obj["y"]] for obj in allobjs]) 274 | 275 | from scipy.spatial import KDTree 276 | 277 | ref_coordtree = KDTree(self.star_ref_pos) 278 | 279 | # Compare here srcs list with self.star_ref_pos 280 | num_sources = 0 281 | for asrc in allxy: 282 | found_source = ref_coordtree.query_ball_point(asrc, 3) 283 | if found_source: 284 | num_sources += 1 285 | fraction_found = num_sources / len(allxy) 286 | return fraction_found 287 | 288 | def test_register(self): 289 | registered_img, footp = aa.register( 290 | source=self.image, target=self.image_ref 291 | ) 292 | self.assertIsInstance(registered_img, np.ndarray) 293 | self.assertIsInstance(footp, np.ndarray) 294 | self.assertIs(footp.dtype, np.dtype("bool")) 295 | fraction = self.compare_image(registered_img) 296 | self.assertGreater(fraction, 0.85) 297 | 298 | def test_register_nddata(self): 299 | nd_image = NDData(self.image, mask=self.image_mask) 300 | nd_image_ref = NDData(self.image_ref, mask=self.image_ref_mask) 301 | registered_img, footp = aa.register( 302 | source=nd_image, target=nd_image_ref 303 | ) 304 | self.assertIsInstance(registered_img, np.ndarray) 305 | self.assertIsInstance(footp, np.ndarray) 306 | self.assertIs(footp.dtype, np.dtype("bool")) 307 | fraction = self.compare_image(registered_img) 308 | self.assertGreater(fraction, 0.85) 309 | 310 | def test_register_ccddata(self): 311 | ccd_image = CCDData( 312 | self.image, 313 | mask=self.image_mask, 314 | meta={"object": "fake galaxy", "filter": "R"}, 315 | unit="adu", 316 | ) 317 | ccd_image_ref = CCDData( 318 | self.image_ref, 319 | mask=self.image_ref_mask, 320 | meta={"object": "fake galaxy", "filter": "R"}, 321 | unit="adu", 322 | ) 323 | registered_img, footp = aa.register( 324 | source=ccd_image, target=ccd_image_ref 325 | ) 326 | self.assertIsInstance(registered_img, np.ndarray) 327 | self.assertIsInstance(footp, np.ndarray) 328 | self.assertIs(footp.dtype, np.dtype("bool")) 329 | fraction = self.compare_image(registered_img) 330 | self.assertGreater(fraction, 0.85) 331 | 332 | def test_register_npma(self): 333 | ma_image = np.ma.array(self.image, mask=self.image_mask) 334 | ma_image_ref = np.ma.array(self.image_ref, mask=self.image_ref_mask) 335 | registered_img, footp = aa.register( 336 | source=ma_image, target=ma_image_ref 337 | ) 338 | self.assertIsInstance(registered_img, np.ndarray) 339 | self.assertIsInstance(footp, np.ndarray) 340 | self.assertIs(footp.dtype, np.dtype("bool")) 341 | fraction = self.compare_image(registered_img) 342 | self.assertGreater(fraction, 0.85) 343 | 344 | def test_apply_transform_nddata(self): 345 | transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) 346 | 347 | nd = NDData( 348 | [[0.0, 1.0], [2.0, 3.0]], mask=[[True, False], [False, False]] 349 | ) 350 | registered_img, footp = aa.apply_transform( 351 | transf, nd, nd, propagate_mask=True 352 | ) 353 | err = np.linalg.norm( 354 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 355 | ) 356 | self.assertLess(err, 1e-6) 357 | err_mask = footp == np.array([[False, True], [False, False]]) 358 | self.assertTrue(all(err_mask.flatten())) 359 | 360 | # Test now if there is no assigned mask during creation 361 | nd = NDData([[0.0, 1.0], [2.0, 3.0]]) 362 | registered_img, footp = aa.apply_transform( 363 | transf, nd, nd, propagate_mask=True 364 | ) 365 | err = np.linalg.norm( 366 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 367 | ) 368 | self.assertLess(err, 1e-6) 369 | err_mask = footp == np.array([[False, False], [False, False]]) 370 | self.assertTrue(all(err_mask.flatten())) 371 | 372 | def test_apply_transform_ccddata(self): 373 | 374 | transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) 375 | 376 | cd = CCDData( 377 | [[0.0, 1.0], [2.0, 3.0]], 378 | mask=[[True, False], [False, False]], 379 | unit="adu", 380 | ) 381 | registered_img, footp = aa.apply_transform( 382 | transf, cd, cd, propagate_mask=True 383 | ) 384 | err = np.linalg.norm( 385 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 386 | ) 387 | self.assertLess(err, 1e-6) 388 | err_mask = footp == np.array([[False, True], [False, False]]) 389 | self.assertTrue(all(err_mask.flatten())) 390 | 391 | cd = CCDData([[0.0, 1.0], [2.0, 3.0]], unit="adu") 392 | registered_img, footp = aa.apply_transform( 393 | transf, cd, cd, propagate_mask=True 394 | ) 395 | err = np.linalg.norm( 396 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 397 | ) 398 | self.assertLess(err, 1e-6) 399 | err_mask = footp == np.array([[False, False], [False, False]]) 400 | self.assertTrue(all(err_mask.flatten())) 401 | 402 | def test_apply_transform_npma(self): 403 | from skimage.transform import SimilarityTransform 404 | 405 | transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) 406 | nparr = np.array([[0.0, 1.0], [2.0, 3.0]]) 407 | mask = [[True, False], [False, False]] 408 | 409 | ma = np.ma.array(nparr, mask=mask) 410 | registered_img, footp = aa.apply_transform( 411 | transf, ma, ma, propagate_mask=True 412 | ) 413 | err = np.linalg.norm( 414 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 415 | ) 416 | self.assertLess(err, 1e-6) 417 | err_mask = footp == np.array([[False, True], [False, False]]) 418 | self.assertTrue(all(err_mask.flatten())) 419 | 420 | ma = np.ma.array(nparr) 421 | registered_img, footp = aa.apply_transform( 422 | transf, ma, ma, propagate_mask=True 423 | ) 424 | err = np.linalg.norm( 425 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 426 | ) 427 | self.assertLess(err, 1e-6) 428 | err_mask = footp == np.array([[False, False], [False, False]]) 429 | self.assertTrue(all(err_mask.flatten())) 430 | 431 | def test_fill_value(self): 432 | registered_img, footp = aa.register( 433 | source=self.image, target=self.image_ref, fill_value=-9999.99 434 | ) 435 | self.assertTrue(all(registered_img[footp] == -9999.99)) 436 | self.assertTrue(all(registered_img[~footp] != -9999.99)) 437 | 438 | def test_find_sources(self): 439 | srcs = aa._find_sources(self.image_ref) 440 | 441 | from scipy.spatial import KDTree 442 | 443 | ref_coordtree = KDTree(self.star_ref_pos) 444 | 445 | # Compare here srcs list with self.star_ref_pos 446 | num_sources = 0 447 | for asrc in srcs: 448 | found_source = ref_coordtree.query_ball_point(asrc, 3) 449 | if found_source: 450 | num_sources += 1 451 | fraction_found = float(num_sources) / float(len(srcs)) 452 | self.assertGreater(fraction_found, 0.85) 453 | 454 | def test_dtypes(self): 455 | # aa.register(self.image.astype('float16'), self.image_ref) 456 | aa.register(self.image.astype("float32"), self.image_ref) 457 | aa.register(self.image.astype("float64"), self.image_ref) 458 | aa.register(self.image.astype("int32"), self.image_ref) 459 | aa.register(self.image.astype("int64"), self.image_ref) 460 | 461 | def test_consistent_invert(self): 462 | t, __ = aa.find_transform(self.image, self.image_ref) 463 | tinv, __ = aa.find_transform(self.image_ref, self.image) 464 | rpoint = np.random.default_rng().random(3) * self.h 465 | rpoint[2] = 1.0 466 | rtransf = tinv.params.dot(t.params.dot(rpoint)) 467 | err = np.linalg.norm(rpoint - rtransf) / np.linalg.norm(rpoint) 468 | self.assertLess(err, 1e-2) 469 | 470 | def test_unrepeated_sources(self): 471 | source = np.array( 472 | [[0.0, 2.0], [1.0, 3.0], [2.1, 1.75], [3.5, 1.0], [4.0, 2.0]] 473 | ) 474 | R = np.array( 475 | [ 476 | [np.cos(30.0 * np.pi / 180), np.sin(30.0 * np.pi / 180)], 477 | [-np.sin(30.0 * np.pi / 180), np.cos(30.0 * np.pi / 180)], 478 | ] 479 | ) 480 | tr = np.array([-0.5, 2.5]) 481 | target = R.dot(source.T).T + tr 482 | best_t, (s_list, t_list) = aa.find_transform(source, target) 483 | self.assertEqual(len(s_list), len(t_list)) 484 | self.assertLessEqual(len(s_list), len(source)) 485 | # Assert no repeated sources used 486 | source_set = set((x, y) for x, y in s_list) 487 | self.assertEqual(len(s_list), len(source_set)) 488 | # Assert no repeated targets used 489 | target_set = set((x, y) for x, y in t_list) 490 | self.assertEqual(len(t_list), len(target_set)) 491 | # Assert s_list is a subset of source 492 | self.assertTrue(source_set <= set((x, y) for x, y in source)) 493 | # Assert t_list is a subset of target 494 | self.assertTrue(target_set <= set((x, y) for x, y in target)) 495 | 496 | def test_consistent_result(self): 497 | t1, __ = aa.find_transform(source=self.image, target=self.image_ref) 498 | for i in range(5): 499 | t2, __ = aa.find_transform( 500 | source=self.image, target=self.image_ref 501 | ) 502 | self.assertLess(np.linalg.norm(t1.params - t2.params), 1e-10) 503 | 504 | 505 | class TestFewSources(unittest.TestCase): 506 | def setUp(self): 507 | self.h = 512 # image height 508 | self.w = 512 # image width 509 | self.x_offset = 10 510 | self.y_offset = -20 511 | self.rot_angle = 50.0 * np.pi / 180.0 512 | 513 | def check_if_findtransform_ok(self, numstars): 514 | """Helper function to test find_transform with common test code 515 | for 3, 4, 5, and 6 stars""" 516 | 517 | if numstars > 6: 518 | raise NotImplementedError 519 | 520 | # x and y of stars in the ref frame (int's) 521 | self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars] 522 | self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars] 523 | self.num_stars = numstars 524 | # Fluxes of stars 525 | self.star_f = np.array(numstars * [700.0]) 526 | ( 527 | self.image, 528 | self.image_ref, 529 | self.star_ref_pos, 530 | self.star_new_pos, 531 | ) = simulate_image_pair( 532 | shape=(self.h, self.w), 533 | translation=(self.x_offset, self.y_offset), 534 | rot_angle_deg=50.0, 535 | num_stars=self.num_stars, 536 | star_refx=self.star_refx, 537 | star_refy=self.star_refy, 538 | star_flux=self.star_f, 539 | random_seed=923010207, 540 | ) 541 | 542 | source = self.star_ref_pos 543 | dest = self.star_new_pos.copy() 544 | t_true = estimate_transform("similarity", source, dest) 545 | 546 | # disorder dest points so they don't match the order of source 547 | np.random.default_rng().shuffle(dest) 548 | 549 | t, (src_pts, dst_pts) = aa.find_transform(source, dest) 550 | self.assertLess(t_true.scale - t.scale, 1e-10) 551 | self.assertLess(t_true.rotation - t.rotation, 1e-10) 552 | self.assertLess( 553 | np.linalg.norm(t_true.translation - t.translation), 1.0 554 | ) 555 | self.assertEqual(src_pts.shape[0], dst_pts.shape[0]) 556 | self.assertLessEqual(src_pts.shape[0], source.shape[0]) 557 | self.assertEqual(src_pts.shape[1], 2) 558 | self.assertEqual(dst_pts.shape[1], 2) 559 | dst_pts_test = matrix_transform(src_pts, t.params) 560 | self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1.0) 561 | 562 | def test_find_transform_twosources(self): 563 | with self.assertRaises(Exception): 564 | self.check_if_findtransform_ok(2) 565 | 566 | def test_find_transform_threesources(self): 567 | self.check_if_findtransform_ok(3) 568 | 569 | def test_find_transform_foursources(self): 570 | self.check_if_findtransform_ok(4) 571 | 572 | def test_find_transform_fivesources(self): 573 | self.check_if_findtransform_ok(5) 574 | 575 | def test_find_transform_sixsources(self): 576 | self.check_if_findtransform_ok(6) 577 | 578 | def check_if_register_ok(self, numstars): 579 | """Helper function to test register with common test code 580 | for 3, 4, 5, and 6 stars""" 581 | 582 | if numstars > 6: 583 | raise NotImplementedError 584 | 585 | # x and y of stars in the ref frame (int's) 586 | self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars] 587 | self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars] 588 | self.num_stars = numstars 589 | # Fluxes of stars 590 | self.star_f = np.array(numstars * [700.0]) 591 | ( 592 | self.image, 593 | self.image_ref, 594 | self.star_ref_pos, 595 | self.star_new_pos, 596 | ) = simulate_image_pair( 597 | shape=(self.h, self.w), 598 | translation=(self.x_offset, self.y_offset), 599 | rot_angle_deg=50.0, 600 | noise_level=50, 601 | num_stars=self.num_stars, 602 | star_refx=self.star_refx, 603 | star_refy=self.star_refy, 604 | star_flux=self.star_f, 605 | random_seed=841890526, 606 | ) 607 | 608 | aligned, footprint = aa.register(self.image_ref, self.image) 609 | 610 | source = self.star_ref_pos 611 | dest = self.star_new_pos.copy() 612 | t_true = estimate_transform("similarity", source, dest) 613 | aligned_true, fp = aa.apply_transform( 614 | t_true, self.image_ref, self.image 615 | ) 616 | 617 | err = np.linalg.norm((aligned_true - aligned)[fp], 1) / np.linalg.norm( 618 | (aligned_true)[fp], 1 619 | ) 620 | self.assertLess(err, 1e-1) 621 | 622 | def test_register_twosources(self): 623 | with self.assertRaises(Exception): 624 | self.check_if_register_ok(2) 625 | 626 | def test_register_threesources(self): 627 | self.check_if_register_ok(3) 628 | 629 | def test_register_foursources(self): 630 | self.check_if_register_ok(4) 631 | 632 | def test_register_fivesources(self): 633 | self.check_if_register_ok(5) 634 | 635 | def test_register_sixsources(self): 636 | self.check_if_register_ok(6) 637 | 638 | def test_three_match_one_off(self): 639 | "Test corner case with 1 triangle match + unmatched extra star" 640 | source = [(1, 2), (3, 4), (5, 6), (8, 9)] 641 | target = [(2, 2), (4, 4), (6, 6), (15, 9)] 642 | with self.assertRaises(aa.MaxIterError): 643 | aa.find_transform(source, target) 644 | 645 | 646 | class TestColorImages(unittest.TestCase): 647 | def setUp(self): 648 | def convert_to_uint8(sky_arr): 649 | sky_max, sky_min = sky_arr.max(), sky_arr.min() 650 | sky_arr = (sky_arr - sky_min) * 512 / (sky_max - sky_min) 651 | sky_arr = np.clip(sky_arr, 0, 512) 652 | return sky_arr.astype("uint8") 653 | 654 | self.h = 512 # image height 655 | self.w = 512 # image width 656 | self.x_offset = 10 657 | self.y_offset = -20 658 | self.rot_angle = 50.0 * np.pi / 180.0 659 | ( 660 | image_new, 661 | image_ref, 662 | self.star_ref_pos, 663 | self.star_new_pos, 664 | ) = simulate_image_pair( 665 | shape=(self.h, self.w), 666 | translation=(self.x_offset, self.y_offset), 667 | rot_angle_deg=50.0, 668 | noise_level=10.0, 669 | num_stars=150, 670 | star_flux=np.array([1000.0] * 150), 671 | random_seed=532961203, 672 | ) 673 | self.image_rgb_new = np.array( 674 | [image_new.copy(), image_new.copy(), image_new.copy()] 675 | ) 676 | self.image_rgb_ref = np.array( 677 | [image_ref.copy(), image_ref.copy(), image_ref.copy()] 678 | ) 679 | self.image_rgb_new = np.moveaxis(self.image_rgb_new, 0, -1) 680 | self.image_rgb_ref = np.moveaxis(self.image_rgb_ref, 0, -1) 681 | 682 | self.image_rgba_new = np.array( 683 | [ 684 | image_new.copy(), 685 | image_new.copy(), 686 | image_new.copy(), 687 | 255.0 * np.ones(image_new.shape), 688 | ] 689 | ) 690 | self.image_rgba_ref = np.array( 691 | [ 692 | image_ref.copy(), 693 | image_ref.copy(), 694 | image_ref.copy(), 695 | 255.0 * np.ones(image_new.shape), 696 | ] 697 | ) 698 | self.image_rgba_new = np.moveaxis(self.image_rgba_new, 0, -1) 699 | self.image_rgba_ref = np.moveaxis(self.image_rgba_ref, 0, -1) 700 | 701 | self.jpgref_fp = tempfile.TemporaryFile() 702 | sky_ref = convert_to_uint8(self.image_rgb_ref) 703 | Image.fromarray(sky_ref).save(self.jpgref_fp, "jpeg") 704 | 705 | self.jpgnew_fp = tempfile.TemporaryFile() 706 | sky_new = convert_to_uint8(self.image_rgb_new) 707 | Image.fromarray(sky_new).save(self.jpgnew_fp, "jpeg") 708 | 709 | self.pngref_fp = tempfile.TemporaryFile() 710 | sky_ref = convert_to_uint8(self.image_rgba_ref) 711 | Image.fromarray(sky_ref).save(self.pngref_fp, "png") 712 | 713 | self.pngnew_fp = tempfile.TemporaryFile() 714 | sky_new = convert_to_uint8(self.image_rgba_new) 715 | Image.fromarray(sky_new).save(self.pngnew_fp, "png") 716 | 717 | def tearDown(self): 718 | self.jpgref_fp.close() 719 | self.jpgnew_fp.close() 720 | self.pngref_fp.close() 721 | self.pngnew_fp.close() 722 | 723 | def compare_image(self, the_image): 724 | """Return the fraction of sources found in the reference image""" 725 | # pixel comparison is not good, doesn't work. Compare catalogs. 726 | full_algn = np.mean(the_image, axis=-1, dtype="float32") 727 | import sep 728 | 729 | bkg = sep.Background(full_algn) 730 | thresh = 5.0 * bkg.globalrms 731 | allobjs = sep.extract(full_algn - bkg.back(), thresh) 732 | allxy = np.array([[obj["x"], obj["y"]] for obj in allobjs]) 733 | 734 | from scipy.spatial import KDTree 735 | 736 | ref_coordtree = KDTree(self.star_ref_pos) 737 | 738 | # Compare here srcs list with self.star_ref_pos 739 | num_sources = 0 740 | for asrc in allxy: 741 | found_source = ref_coordtree.query_ball_point(asrc, 3) 742 | if found_source: 743 | num_sources += 1 744 | fraction_found = num_sources / len(allxy) 745 | return fraction_found 746 | 747 | def test_register_rgb_channels(self): 748 | "Test register works with RGB images" 749 | registered, footp = aa.register( 750 | source=self.image_rgb_new, target=self.image_rgb_ref 751 | ) 752 | self.assertEqual(registered.ndim, self.image_rgb_new.ndim) 753 | fraction = self.compare_image(registered) 754 | self.assertGreater(fraction, 0.70) 755 | self.assertTrue(footp.ndim == 2) 756 | self.assertTrue(footp.shape == (self.h, self.w)) 757 | 758 | def test_register_rgba_channels(self): 759 | "Test register works with RGB images" 760 | registered, footp = aa.register( 761 | source=self.image_rgba_new, target=self.image_rgba_ref 762 | ) 763 | self.assertEqual(registered.ndim, self.image_rgba_new.ndim) 764 | fraction = self.compare_image(registered) 765 | self.assertGreater(fraction, 0.60) 766 | self.assertTrue(footp.ndim == 2) 767 | self.assertTrue(footp.shape == (self.h, self.w)) 768 | 769 | def test_register_jpg_image(self): 770 | source = Image.open(self.jpgnew_fp) 771 | target = Image.open(self.jpgref_fp) 772 | registered, footp = aa.register(source, target) 773 | self.assertEqual(registered.ndim, self.image_rgb_new.ndim) 774 | fraction = self.compare_image(registered) 775 | self.assertGreater(fraction, 0.70) 776 | self.assertTrue(footp.ndim == 2) 777 | self.assertTrue(footp.shape == (self.h, self.w)) 778 | 779 | def test_register_png_image(self): 780 | source = Image.open(self.pngnew_fp) 781 | target = Image.open(self.pngref_fp) 782 | registered, footp = aa.register(source, target) 783 | self.assertEqual(registered.ndim, self.image_rgba_new.ndim) 784 | fraction = self.compare_image(registered) 785 | self.assertGreater(fraction, 0.70) 786 | self.assertTrue(footp.ndim == 2) 787 | self.assertTrue(footp.shape == (self.h, self.w)) 788 | 789 | 790 | if __name__ == "__main__": 791 | unittest.main() 792 | --------------------------------------------------------------------------------