├── docs ├── aa_bw.jpg ├── api.rst ├── installation.rst ├── index.rst ├── make.bat ├── tutorial.rst ├── Makefile └── conf.py ├── MANIFEST.in ├── .travis.yml ├── LICENSE.txt ├── tests ├── __init__.py └── test_align.py ├── tox.ini ├── setup.py ├── README.md ├── ez_setup.py ├── benchmarks ├── flux_bench.py ├── time_bench.py └── time_regression.py └── astroalign.py /docs/aa_bw.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mwcraig/astroalign/master/docs/aa_bw.jpg -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | 3 | Module API 4 | ========== 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | .. automodule:: astroalign 10 | :members: 11 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include the README 2 | include README.md 3 | 4 | # Include the license file 5 | include LICENSE.txt 6 | 7 | # Include logo file for README 8 | include docs/aa_bw.jpg 9 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | The easiest way to install is using pip:: 5 | 6 | pip install astroalign 7 | 8 | This will install the latest stable version on PIPy. 9 | 10 | If you want to use the latest development version from github, unpack or clone the `repo `_ on your local machine, 11 | change the directory to where setup.py is, and install using setuptools:: 12 | 13 | python setup.py install 14 | 15 | or pip:: 16 | 17 | pip install . 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | sudo: false 3 | install: 4 | - pip install tox 5 | script: 6 | - tox -r 7 | matrix: 8 | include: 9 | - env: TOXENV=style 10 | python: 3.7 11 | dist: xenial 12 | sudo: true 13 | 14 | - env: TOXENV=coverage 15 | python: 3.7 16 | dist: xenial 17 | sudo: true 18 | 19 | - env: TOXENV=docs 20 | python: 3.7 21 | dist: xenial 22 | sudo: true 23 | 24 | - env: TOXENV=py36 25 | python: 3.6 26 | 27 | - python: 3.7 28 | env: TOXENV=py37 29 | dist: xenial 30 | sudo: true 31 | 32 | - env: TOXENV=py38 33 | python: 3.8 34 | dist: xenial 35 | sudo: true 36 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016-2019 Martin Beroiz 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # MIT License 5 | 6 | # Copyright (c) 2016-2019 Martin Beroiz 7 | 8 | # Permission is hereby granted, free of charge, to any person obtaining a copy 9 | # of this software and associated documentation files (the "Software"), to deal 10 | # in the Software without restriction, including without limitation the rights 11 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 | # copies of the Software, and to permit persons to whom the Software is 13 | # furnished to do so, subject to the following conditions: 14 | 15 | # The above copyright notice and this permission notice shall be included in 16 | # all copies or substantial portions of the Software. 17 | 18 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 21 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 | # SOFTWARE. 25 | 26 | 27 | """Torosfactory Tests""" 28 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py38, py36, py37, style, coverage, docs 8 | 9 | 10 | [testenv] 11 | usedevelop = False 12 | deps = 13 | astropy 14 | ccdproc 15 | pytest 16 | ipdb 17 | commands = 18 | pytest tests -v {posargs} 19 | 20 | 21 | [testenv:style] 22 | skip_install = True 23 | usedevelop = False 24 | deps = flake8 25 | commands = 26 | flake8 astroalign.py setup.py tests {posargs} 27 | 28 | 29 | [testenv:coverage] 30 | usedevelop = True 31 | deps = 32 | astropy 33 | ccdproc 34 | pytest 35 | coverage 36 | pytest-cov 37 | codecov>=1.4.0 38 | passenv = 39 | TOXENV 40 | CI 41 | TRAVIS 42 | TRAVIS_* 43 | CODECOV_* 44 | commands = 45 | pytest tests --cov-report=xml --cov astroalign tests --cov-fail-under=90 {posargs} 46 | codecov -e TOXENV 47 | 48 | 49 | [testenv:docs] 50 | description = "Invoke sphinx-build to build the HTML docs" 51 | usedevelop = True 52 | skip_install = False 53 | changedir = docs 54 | deps = 55 | Sphinx 56 | sphinx-rtd-theme 57 | commands = 58 | sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html 59 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | # ============================================================================= 25 | # IMPORTS 26 | # ============================================================================= 27 | 28 | import os 29 | 30 | from ez_setup import use_setuptools 31 | use_setuptools() 32 | 33 | from setuptools import setup # noqa 34 | 35 | 36 | # ============================================================================= 37 | # PATH TO THIS MODULE 38 | # ============================================================================= 39 | 40 | PATH = os.path.abspath(os.path.dirname(__file__)) 41 | 42 | 43 | # ============================================================================= 44 | # Get the version from astroalign file itself (not imported) 45 | # ============================================================================= 46 | 47 | ASTROALIGN_PY_PATH = os.path.join(PATH, "astroalign.py") 48 | 49 | with open(ASTROALIGN_PY_PATH, 'r') as f: 50 | for line in f: 51 | if line.startswith('__version__'): 52 | _, _, AA_VERSION = line.replace('"', "").split() 53 | break 54 | 55 | 56 | # ============================================================================= 57 | # RETRIEVE TE README 58 | # ============================================================================= 59 | 60 | README_MD_PATH = os.path.join(PATH, "README.md") 61 | 62 | with open(README_MD_PATH, 'r') as f: 63 | LONG_DESCRIPTION = f.read() 64 | 65 | 66 | # ============================================================================= 67 | # THE REQUIREMENTS! 68 | # ============================================================================= 69 | 70 | REQUIREMENTS = [ 71 | "numpy>=1.6.2", 72 | "scipy>=0.15", 73 | "scikit-image", 74 | "sep"] 75 | 76 | 77 | # ============================================================================= 78 | # THE SETUP ITSELF! 79 | # ============================================================================= 80 | 81 | def run(): 82 | setup( 83 | name='astroalign', 84 | version=AA_VERSION, 85 | description='Astrometric Alignment of Images', 86 | long_description=LONG_DESCRIPTION, 87 | long_description_content_type='text/markdown', 88 | author='Martin Beroiz', 89 | author_email='martinberoiz@gmail.com', 90 | url='https://astroalign.readthedocs.io/', 91 | py_modules=['astroalign', 'ez_setup'], 92 | install_requires=REQUIREMENTS, 93 | test_suite='tests') 94 | 95 | 96 | if __name__ == "__main__": 97 | run() 98 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | .. image:: aa_bw.jpg 4 | :width: 200px 5 | :align: left 6 | 7 | Astroalign documentation 8 | ======================== 9 | 10 | **ASTROALIGN** is a python module that will try to register (align) two stellar astronomical images, 11 | especially when there is no WCS information available. 12 | 13 | It does so by finding similar 3-point asterisms (triangles) in both images and estimating the 14 | affine transformation between them. 15 | 16 | Generic registration routines try to match point features, using corner detection routines to make the point correspondence. 17 | These generally fail for stellar astronomical images, since stars have very little stable structure and so, in general, indistinguishable from each other. 18 | Asterism matching is more robust, and closer to the human way of matching stellar images. 19 | 20 | Astroalign can match images of very different fields of view, point-spread functions, seeing and atmospheric conditions. 21 | 22 | You can find a Jupyter notebook example with the main features at http://toros-astro.github.io/astroalign. 23 | 24 | .. note:: 25 | 26 | It may not work, or work with special care, on images of extended objects with few point-like sources or in very crowded fields. 27 | 28 | .. note:: 29 | If your images contain a large number of hot pixels, this may result in an incorrect registration. 30 | Please refer to the tutorial for how to solve this problem using `CCDProc's cosmic-ray remover `_. 31 | 32 | 33 | Citation 34 | -------- 35 | 36 | If you use astroalign in a scientific publication, we would appreciate 37 | citations to the following paper: 38 | 39 | Astroalign: A Python module for astronomical image registration. 40 | Beroiz, M., Cabral, J. B., & Sanchez, B. 41 | Astronomy and Computing, Volume 32, July 2020, 100384. 42 | 43 | 44 | Bibtex entry:: 45 | 46 | @article{BEROIZ2020100384, 47 | title = "Astroalign: A Python module for astronomical image registration", 48 | journal = "Astronomy and Computing", 49 | volume = "32", 50 | pages = "100384", 51 | year = "2020", 52 | issn = "2213-1337", 53 | doi = "https://doi.org/10.1016/j.ascom.2020.100384", 54 | url = "http://www.sciencedirect.com/science/article/pii/S221313372030038X", 55 | author = "M. Beroiz and J.B. Cabral and B. Sanchez", 56 | keywords = "Astronomy, Image registration, Python package", 57 | abstract = "We present an algorithm implemented in the Astroalign Python module for image registration in astronomy. Our module does not rely on WCS information and instead matches three-point asterisms ( triangles) on the images to find the most accurate linear transformation between them. It is especially useful in the context of aligning images prior to stacking or performing difference image analysis. Astroalign can match images of different point-spread functions, seeing, and atmospheric conditions." 58 | } 59 | 60 | **Full Publication:** https://www.sciencedirect.com/science/article/pii/S221313372030038X 61 | 62 | 63 | Or cite the project itself from `ASCL `_: 64 | 65 | Beroiz, M. I. (2019). Astroalign: Asterism-matching alignment of 66 | astronomical images. Astrophysics Source Code Library. 67 | 68 | Bibtex:: 69 | 70 | @article{beroiz2019astroalign, 71 | title={Astroalign: Asterism-matching alignment of astronomical images}, 72 | author={Beroiz, Martin I}, 73 | journal={Astrophysics Source Code Library}, 74 | year={2019} 75 | } 76 | 77 | 78 | Guide: 79 | ^^^^^^ 80 | 81 | .. toctree:: 82 | :maxdepth: 2 83 | 84 | installation 85 | tutorial 86 | api 87 | 88 | 89 | Indices and tables 90 | ================== 91 | 92 | * :ref:`genindex` 93 | * :ref:`search` 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Astroalign 2 | 3 | *** 4 | 5 | [![Build Status](https://travis-ci.org/toros-astro/astroalign.svg?branch=master)](https://travis-ci.org/toros-astro/astroalign) 6 | [![Coverage](https://codecov.io/github/toros-astro/astroalign/coverage.svg?branch=master)](https://codecov.io/github/toros-astro/astroalign) 7 | [![Documentation Status](https://readthedocs.org/projects/astroalign/badge/?version=latest)](http://astroalign.readthedocs.org/en/latest/?badge=latest) 8 | [![Updates](https://pyup.io/repos/github/toros-astro/astroalign/shield.svg)](https://pyup.io/repos/github/toros-astro/astroalign/) 9 | [![Python 3](https://pyup.io/repos/github/toros-astro/astroalign/python-3-shield.svg)](https://pyup.io/repos/github/toros-astro/astroalign/) 10 | [![PyPI](https://img.shields.io/pypi/v/astroalign)](https://pypi.org/project/astroalign/) 11 | ![PyPI - Downloads](https://img.shields.io/pypi/dm/astroalign) 12 | [![ascl:1906.001](https://img.shields.io/badge/ascl-1906.001-blue.svg?colorB=262255)](http://ascl.net/1906.001) 13 | 14 | 15 | **ASTROALIGN** is a python module that will try to align two stellar astronomical images, especially when there is no WCS information available. 16 | 17 | It does so by finding similar 3-point asterisms (triangles) in both images and deducing the affine transformation between them. 18 | 19 | Generic registration routines try to match feature points, using corner 20 | detection routines to make the point correspondence. 21 | These generally fail for stellar astronomical images, since stars have very 22 | little stable structure and so, in general, indistinguishable from each other. 23 | Asterism matching is more robust, and closer to the human way of matching stellar images. 24 | 25 | Astroalign can match images of very different field of view, point-spread function, seeing and atmospheric conditions. 26 | 27 | It may not work, or work with special care, on images of extended objects with few point-like sources or in very crowded fields. 28 | 29 | You can find a Jupyter notebook example with the main features at [http://toros-astro.github.io/astroalign/](http://toros-astro.github.io/astroalign/). 30 | 31 | **Full documentation:** https://astroalign.readthedocs.io/ 32 | 33 | *** 34 | 35 | ## Installation 36 | 37 | Using setuptools: 38 | 39 | $ pip install astroalign 40 | 41 | or from this distribution with 42 | 43 | $ python setup.py install 44 | 45 | *** 46 | 47 | ## Usage example 48 | 49 | >>> import astroalign as aa 50 | >>> aligned_image, footprint = aa.register(source_image, target_image) 51 | 52 | In this example `source_image` will be interpolated by a transformation to coincide pixel to pixel with `target_image` and stored in `aligned_image`. 53 | 54 | If we are only interested in knowing the transformation and the correspondence of control points in both images, use `find_transform` will return the transformation in a [Scikit-Image](https://scikit-image.org/) SimilarityTransform object and a list of stars in source with the corresponding stars in target. 55 | 56 | >>> transf, (s_list, t_list) = aa.find_transform(source, target) 57 | 58 | `source` and `target` can each either be the numpy array of the image, or an iterable of (x, y) pairs of star positions on the image. 59 | 60 | The returned `transf` object is a scikit-image [`SimilarityTranform`](http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.SimilarityTransform) object that contains the transformation matrix along with the scale, rotation and translation parameters. 61 | 62 | `s_list` and `t_list` are numpy arrays of (x, y) point correspondence between `source` and `target`. `transf` applied to `s_list` will approximately render `t_list`. 63 | 64 | *** 65 | 66 | ## Citation 67 | 68 | If you use astroalign in a scientific publication, we would appreciate citations to the following [paper](https://www.sciencedirect.com/science/article/pii/S221313372030038X): 69 | 70 | Astroalign: A Python module for astronomical image registration. 71 | Beroiz, M., Cabral, J. B., & Sanchez, B. 72 | Astronomy & Computing, Volume 32, July 2020, 100384. 73 | 74 | *** 75 | 76 | TOROS Dev Team 77 | 78 | 79 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 1>NUL 2>NUL 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\astroalign.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\astroalign.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/tutorial.rst: -------------------------------------------------------------------------------- 1 | Tutorial 2 | ======== 3 | 4 | A simple usage example 5 | ---------------------- 6 | 7 | Suppose we have two images of about the same portion of the sky, and we would like to transform one of them to fit on top of the other. 8 | Suppose we do not have WCS information, but we are confident that we could do it by eye, by matching some obvious asterisms on the two images. 9 | 10 | In this particular use case, astroalign can be of great help to automatize the process. 11 | 12 | After we load our images into numpy arrays, we simple choose one to be the source image to be transformed, and the other to be the target. 13 | 14 | The usage for this simple most common case would be as follows:: 15 | 16 | >>> import astroalign as aa 17 | >>> registered_image, footprint = aa.register(source, target) 18 | 19 | ``registered_image`` is now a transformed (numpy array) image of ``source`` that will match pixel to pixel to ``target``. 20 | 21 | ``footprint`` is a boolean numpy array, ``True`` for masked pixels with no information. 22 | 23 | .. note:: 24 | * If instead of images, you have lists of bright, reference star positions on each image, 25 | see :ref:`ftransf`. 26 | 27 | * astroalign.register will also accept as input, data objects with `data` and `mask` properties, like NDData, CCDData and Numpy masked arrays. 28 | For more information, see :ref:`dataobjs` 29 | 30 | * Check this `Jupyter notebook `_ for a more complete example. 31 | 32 | .. warning:: 33 | Flux may not be conserved after the transformation. 34 | 35 | Mask Fill Value 36 | --------------- 37 | 38 | If you need to mask the aligned image with a special value over the region where transformation had no pixel information, 39 | you can use the ``footprint`` mask to do so:: 40 | 41 | >>> registered_image, footprint = aa.register(source, target) 42 | >>> registered_image[footprint] = -99999.99 43 | 44 | Or you can pass the value to the ``fill_value`` argument:: 45 | 46 | >>> registered_image, footprint = aa.register(source, target, fill_value=-99999.99) 47 | 48 | Both will yield the same result. 49 | 50 | .. _ftransf: 51 | 52 | Finding the transformation 53 | -------------------------- 54 | 55 | In some cases it may be necessary to inspect first the transformation parameters before applying it, 56 | or we may be interested only in a star to star correspondence between the images. 57 | For those cases, we can use ``find_transform``:: 58 | 59 | >>> transf, (source_list, target_list) = aa.find_transform(source, target) 60 | 61 | The inputs ``source`` and ``target`` can be either numpy arrays of the image pixels, 62 | **or any iterable of (x, y) pairs**, corresponding to star positions. 63 | 64 | Having an iterable of (x, y) pairs is especially useful in situations where source detection requires special care. 65 | In situations like that, source detection can be done separately and the resulting catalogs fed to ``find_transform``. 66 | 67 | ``find_transform`` returns a `scikit-image `_ `SimilarityTransform `_ object that encapsulates the matrix transformation, 68 | and the transformation parameters. 69 | It will also return a tuple with two lists of star positions of ``source`` and its corresponding ordered star postions on 70 | the ``target`` image. 71 | 72 | The transformation parameters can be found in ``transf.rotation``, ``transf.traslation``, ``transf.scale`` 73 | and the transformation matrix in ``transf.params``. 74 | 75 | If the transformation is satisfactory, we can apply it to the image with ``apply_transform``. 76 | Continuing our example:: 77 | 78 | >>> if transf.rotation > MIN_ROT: 79 | ... registered_image = aa.apply_transform(transf, source, target) 80 | 81 | If you know the star-to-star correspondence 82 | ------------------------------------------- 83 | 84 | .. note:: 85 | `estimate_transform `_ 86 | from `scikit-image` is imported into astroalign as a convenience. 87 | 88 | If for any reason you know which star corresponds to which other, you can call ``estimate_transform``. 89 | 90 | Let us suppose we know the correspondence: 91 | 92 | - (127.03, 85.98) in source --> (175.13, 111.36) in target 93 | - (23.11, 31.87) in source --> (0.58, 119.04) in target 94 | - (98.84, 142.99) in source --> (181.55, 206.49) in target 95 | - (150.93, 85.02) in source --> (205.60, 91.89) in target 96 | - (137.99, 12.88) in source --> (134.61, 7.94) in target 97 | 98 | Then we can estimate the transform:: 99 | 100 | >>> src = np.array([(127.03, 85.98), (23.11, 31.87), (98.84, 142.99), 101 | ... (150.93, 85.02), (137.99, 12.88)]) 102 | >>> dst = np.array([(175.13, 111.36), (0.58, 119.04), (181.55, 206.49), 103 | ... (205.60, 91.89), (134.61, 7.94)]) 104 | >>> tform = aa.estimate_transform('affine', src, dst) 105 | 106 | And apply it to an image with ``apply_transform`` or to a set of points with ``matrix_transform``. 107 | 108 | Applying a transformation to a set of points 109 | -------------------------------------------- 110 | 111 | .. note:: 112 | `matrix_transform `_ 113 | from `scikit-image` is imported into astroalign as a convenience. 114 | 115 | To apply a known transform to a set of points, we use ``matrix_transform``. 116 | Following the example in the previous section:: 117 | 118 | >>> dst_calc = aa.matrix_transform(src, tform.params) 119 | 120 | ``dst_calc`` should be a 5 by 2 array similar to the ``dst`` array. 121 | 122 | 123 | .. _dataobjs: 124 | 125 | Dealing with Data Objects with data and mask properties (NDData, CCDData, Numpy masked arrays) 126 | ---------------------------------------------------------------------------------------------- 127 | 128 | If your input data comes in the form of `ccdproc `_'s 129 | `CCDData `_ 130 | or `astropy `_'s 131 | `NDData `_ 132 | or a `numpy `_ 133 | `masked array `_, 134 | there are a few ways to interact with astroalign. 135 | 136 | In general, for objects with `data` and `mask` properties, it is convenient to transform their masks 137 | along with the data and to add the footprint onto the mask. 138 | 139 | Astroalign provides this functionality with the `propagate_mask` argument to `register` and `apply_transform`. 140 | 141 | For example:: 142 | 143 | >>> from astropy.nddata import NDData 144 | >>> nd = NDData([[0, 1], [2, 3]], [[True, False], [False, False]]) 145 | 146 | and we want to apply a clockwise 90 degree rotation:: 147 | 148 | >>> import numpy as np 149 | >>> from skimage.transform import SimilarityTransform 150 | >>> transf = SimilarityTransform(rotation=np.pi/2., translation=(1, 0)) 151 | 152 | Then we can call astroalign as usual, but with the `propagate_mask` set to True:: 153 | 154 | >>> aligned_image, footprint = aa.apply_transform(transf, nd, nd, propagate_mask=True) 155 | 156 | This will transform `nd.data` and `nd.mask` simultaneously and add the `footprint` mask from the transformation onto `nd.mask`:: 157 | 158 | >>> aligned_image 159 | array([[2., 0.], 160 | [3., 1.]]) 161 | >>> footprint 162 | array([[False, True], 163 | [False, False]]) 164 | 165 | Creating a new object of the same input type is now easier:: 166 | 167 | >>> new_nd = NDData(aligned_image, mask=footprint) 168 | 169 | The same will apply for CCDData objects and Numpy masked arrays. 170 | 171 | Dealing with hot pixels 172 | ----------------------- 173 | 174 | Hot pixels always appear on the same position of the CCD. 175 | If your image is dominated by hot pixels, the source detection algorithm may pick those up 176 | and output the identity tranformation. 177 | 178 | To avoid this, you can use `CCDProc's cosmicray_lacosmic `_ to clean the image before trying registration:: 179 | 180 | from ccdproc import cosmicray_lacosmic as lacosmic 181 | clean_image = lacosmic(myimage) 182 | 183 | ---------------------------------------- 184 | 185 | See :ref:`api` for the API specification. 186 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help 23 | help: 24 | @echo "Please use \`make ' where is one of" 25 | @echo " html to make standalone HTML files" 26 | @echo " dirhtml to make HTML files named index.html in directories" 27 | @echo " singlehtml to make a single large HTML file" 28 | @echo " pickle to make pickle files" 29 | @echo " json to make JSON files" 30 | @echo " htmlhelp to make HTML files and a HTML help project" 31 | @echo " qthelp to make HTML files and a qthelp project" 32 | @echo " applehelp to make an Apple Help Book" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | @echo " coverage to run coverage check of the documentation (if enabled)" 49 | 50 | .PHONY: clean 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | .PHONY: html 55 | html: 56 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 57 | @echo 58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 59 | 60 | .PHONY: dirhtml 61 | dirhtml: 62 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 63 | @echo 64 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 65 | 66 | .PHONY: singlehtml 67 | singlehtml: 68 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 69 | @echo 70 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 71 | 72 | .PHONY: pickle 73 | pickle: 74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 75 | @echo 76 | @echo "Build finished; now you can process the pickle files." 77 | 78 | .PHONY: json 79 | json: 80 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 81 | @echo 82 | @echo "Build finished; now you can process the JSON files." 83 | 84 | .PHONY: htmlhelp 85 | htmlhelp: 86 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 87 | @echo 88 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 89 | ".hhp project file in $(BUILDDIR)/htmlhelp." 90 | 91 | .PHONY: qthelp 92 | qthelp: 93 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 94 | @echo 95 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 96 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 97 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/astroalign.qhcp" 98 | @echo "To view the help file:" 99 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/astroalign.qhc" 100 | 101 | .PHONY: applehelp 102 | applehelp: 103 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 104 | @echo 105 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 106 | @echo "N.B. You won't be able to view it unless you put it in" \ 107 | "~/Library/Documentation/Help or install it in your application" \ 108 | "bundle." 109 | 110 | .PHONY: devhelp 111 | devhelp: 112 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 113 | @echo 114 | @echo "Build finished." 115 | @echo "To view the help file:" 116 | @echo "# mkdir -p $$HOME/.local/share/devhelp/astroalign" 117 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/astroalign" 118 | @echo "# devhelp" 119 | 120 | .PHONY: epub 121 | epub: 122 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 123 | @echo 124 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 125 | 126 | .PHONY: latex 127 | latex: 128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 129 | @echo 130 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 131 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 132 | "(use \`make latexpdf' here to do that automatically)." 133 | 134 | .PHONY: latexpdf 135 | latexpdf: 136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 137 | @echo "Running LaTeX files through pdflatex..." 138 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 139 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 140 | 141 | .PHONY: latexpdfja 142 | latexpdfja: 143 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 144 | @echo "Running LaTeX files through platex and dvipdfmx..." 145 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 146 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 147 | 148 | .PHONY: text 149 | text: 150 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 151 | @echo 152 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 153 | 154 | .PHONY: man 155 | man: 156 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 157 | @echo 158 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 159 | 160 | .PHONY: texinfo 161 | texinfo: 162 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 163 | @echo 164 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 165 | @echo "Run \`make' in that directory to run these through makeinfo" \ 166 | "(use \`make info' here to do that automatically)." 167 | 168 | .PHONY: info 169 | info: 170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 171 | @echo "Running Texinfo files through makeinfo..." 172 | make -C $(BUILDDIR)/texinfo info 173 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 174 | 175 | .PHONY: gettext 176 | gettext: 177 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 178 | @echo 179 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 180 | 181 | .PHONY: changes 182 | changes: 183 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 184 | @echo 185 | @echo "The overview file is in $(BUILDDIR)/changes." 186 | 187 | .PHONY: linkcheck 188 | linkcheck: 189 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 190 | @echo 191 | @echo "Link check complete; look for any errors in the above output " \ 192 | "or in $(BUILDDIR)/linkcheck/output.txt." 193 | 194 | .PHONY: doctest 195 | doctest: 196 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 197 | @echo "Testing of doctests in the sources finished, look at the " \ 198 | "results in $(BUILDDIR)/doctest/output.txt." 199 | 200 | .PHONY: coverage 201 | coverage: 202 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 203 | @echo "Testing of coverage in the sources finished, look at the " \ 204 | "results in $(BUILDDIR)/coverage/python.txt." 205 | 206 | .PHONY: xml 207 | xml: 208 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 209 | @echo 210 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 211 | 212 | .PHONY: pseudoxml 213 | pseudoxml: 214 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 215 | @echo 216 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 217 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # astroalign documentation build configuration file, created by 4 | # sphinx-quickstart on Mon Jan 18 14:29:21 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | # from recommonmark.parser import CommonMarkParser 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | sys.path.insert(0, os.path.abspath('..')) 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | #needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 34 | 'sphinx.ext.coverage', 35 | 'sphinx.ext.mathjax', 36 | 'sphinx.ext.napoleon', 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | source_parsers = { 43 | # '.md': CommonMarkParser, 44 | } 45 | 46 | # The suffix(es) of source filenames. 47 | # You can specify multiple suffix as a list of string: 48 | source_suffix = ['.rst', '.md'] 49 | # source_suffix = '.rst' 50 | 51 | # The encoding of source files. 52 | #source_encoding = 'utf-8-sig' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # General information about the project. 58 | project = u'astroalign' 59 | copyright = u'2017, Martin Beroiz' 60 | author = u'Martin Beroiz' 61 | 62 | # The version info for the project you're documenting, acts as replacement for 63 | # |version| and |release|, also used in various other places throughout the 64 | # built documents. 65 | # 66 | # Get the version from astroalign file itself (not imported) 67 | with open(os.path.join('..', 'astroalign.py'), 'r') as f: 68 | for line in f: 69 | if line.startswith('__version__'): 70 | _, _, __version__ = line.replace('"', "").split() 71 | break 72 | 73 | # The short X.Y version. 74 | version = __version__ 75 | # The full version, including alpha/beta/rc tags. 76 | release = __version__ 77 | 78 | # The language for content autogenerated by Sphinx. Refer to documentation 79 | # for a list of supported languages. 80 | # 81 | # This is also used if you do content translation via gettext catalogs. 82 | # Usually you set "language" from the command line for these cases. 83 | language = None 84 | 85 | # There are two options for replacing |today|: either, you set today to some 86 | # non-false value, then it is used: 87 | #today = '' 88 | # Else, today_fmt is used as the format for a strftime call. 89 | #today_fmt = '%B %d, %Y' 90 | 91 | # List of patterns, relative to source directory, that match files and 92 | # directories to ignore when looking for source files. 93 | exclude_patterns = ['_build'] 94 | 95 | # The reST default role (used for this markup: `text`) to use for all 96 | # documents. 97 | #default_role = None 98 | 99 | # If true, '()' will be appended to :func: etc. cross-reference text. 100 | #add_function_parentheses = True 101 | 102 | # If true, the current module name will be prepended to all description 103 | # unit titles (such as .. function::). 104 | #add_module_names = True 105 | 106 | # If true, sectionauthor and moduleauthor directives will be shown in the 107 | # output. They are ignored by default. 108 | #show_authors = False 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | #modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | #keep_warnings = False 118 | 119 | # If true, `todo` and `todoList` produce output, else they produce nothing. 120 | todo_include_todos = False 121 | 122 | # -- Options for Napoleon ------------------------------------------------- 123 | 124 | napoleon_google_docstring = True 125 | napoleon_numpy_docstring = False 126 | 127 | # -- Options for autodoc -------------------------------------------------- 128 | 129 | autodoc_mock_imports = [ 130 | "scipy", "scipy.spatial", "scipy.spatial.KDTree", "numpy", 131 | "skimage", "skimage.transform", 132 | "skimage.transform.estimate_transform", 133 | "skimage.transform.matrix_transform", 134 | "skimage.transform.warp", 135 | "sep", 136 | ] 137 | 138 | # -- Options for HTML output ---------------------------------------------- 139 | 140 | # The theme to use for HTML and HTML Help pages. See the documentation for 141 | # a list of builtin themes. 142 | html_theme = 'sphinx_rtd_theme' 143 | 144 | # Theme options are theme-specific and customize the look and feel of a theme 145 | # further. For a list of options available for each theme, see the 146 | # documentation. 147 | #html_theme_options = {} 148 | 149 | # Add any paths that contain custom themes here, relative to this directory. 150 | #html_theme_path = [] 151 | 152 | # The name for this set of Sphinx documents. If None, it defaults to 153 | # " v documentation". 154 | #html_title = None 155 | 156 | # A shorter title for the navigation bar. Default is the same as html_title. 157 | #html_short_title = None 158 | 159 | # The name of an image file (relative to this directory) to place at the top 160 | # of the sidebar. 161 | #html_logo = None 162 | 163 | # The name of an image file (within the static path) to use as favicon of the 164 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 165 | # pixels large. 166 | #html_favicon = None 167 | 168 | # Add any paths that contain custom static files (such as style sheets) here, 169 | # relative to this directory. They are copied after the builtin static files, 170 | # so a file named "default.css" will overwrite the builtin "default.css". 171 | # html_static_path = ['_static'] 172 | 173 | # Add any extra paths that contain custom files (such as robots.txt or 174 | # .htaccess) here, relative to this directory. These files are copied 175 | # directly to the root of the documentation. 176 | #html_extra_path = [] 177 | 178 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 179 | # using the given strftime format. 180 | #html_last_updated_fmt = '%b %d, %Y' 181 | 182 | # If true, SmartyPants will be used to convert quotes and dashes to 183 | # typographically correct entities. 184 | #html_use_smartypants = True 185 | 186 | # Custom sidebar templates, maps document names to template names. 187 | #html_sidebars = {} 188 | 189 | # Additional templates that should be rendered to pages, maps page names to 190 | # template names. 191 | #html_additional_pages = {} 192 | 193 | # If false, no module index is generated. 194 | #html_domain_indices = True 195 | 196 | # If false, no index is generated. 197 | #html_use_index = True 198 | 199 | # If true, the index is split into individual pages for each letter. 200 | #html_split_index = False 201 | 202 | # If true, links to the reST sources are added to the pages. 203 | #html_show_sourcelink = True 204 | 205 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 206 | #html_show_sphinx = True 207 | 208 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 209 | #html_show_copyright = True 210 | 211 | # If true, an OpenSearch description file will be output, and all pages will 212 | # contain a tag referring to it. The value of this option must be the 213 | # base URL from which the finished HTML is served. 214 | #html_use_opensearch = '' 215 | 216 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 217 | #html_file_suffix = None 218 | 219 | # Language to be used for generating the HTML full-text search index. 220 | # Sphinx supports the following languages: 221 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 222 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 223 | #html_search_language = 'en' 224 | 225 | # A dictionary with options for the search language support, empty by default. 226 | # Now only 'ja' uses this config value 227 | #html_search_options = {'type': 'default'} 228 | 229 | # The name of a javascript file (relative to the configuration directory) that 230 | # implements a search results scorer. If empty, the default will be used. 231 | #html_search_scorer = 'scorer.js' 232 | 233 | # Output file base name for HTML help builder. 234 | htmlhelp_basename = 'astroaligndoc' 235 | 236 | # -- Options for LaTeX output --------------------------------------------- 237 | 238 | latex_elements = { 239 | # The paper size ('letterpaper' or 'a4paper'). 240 | #'papersize': 'letterpaper', 241 | 242 | # The font size ('10pt', '11pt' or '12pt'). 243 | #'pointsize': '10pt', 244 | 245 | # Additional stuff for the LaTeX preamble. 246 | #'preamble': '', 247 | 248 | # Latex figure (float) alignment 249 | #'figure_align': 'htbp', 250 | } 251 | 252 | # Grouping the document tree into LaTeX files. List of tuples 253 | # (source start file, target name, title, 254 | # author, documentclass [howto, manual, or own class]). 255 | latex_documents = [ 256 | (master_doc, 'astroalign.tex', u'astroalign Documentation', 257 | u'Martin Beroiz', 'manual'), 258 | ] 259 | 260 | # The name of an image file (relative to this directory) to place at the top of 261 | # the title page. 262 | #latex_logo = None 263 | 264 | # For "manual" documents, if this is true, then toplevel headings are parts, 265 | # not chapters. 266 | #latex_use_parts = False 267 | 268 | # If true, show page references after internal links. 269 | #latex_show_pagerefs = False 270 | 271 | # If true, show URL addresses after external links. 272 | #latex_show_urls = False 273 | 274 | # Documents to append as an appendix to all manuals. 275 | #latex_appendices = [] 276 | 277 | # If false, no module index is generated. 278 | #latex_domain_indices = True 279 | 280 | 281 | # -- Options for manual page output --------------------------------------- 282 | 283 | # One entry per manual page. List of tuples 284 | # (source start file, name, description, authors, manual section). 285 | man_pages = [ 286 | (master_doc, 'astroalign', u'astroalign Documentation', 287 | [author], 1) 288 | ] 289 | 290 | # If true, show URL addresses after external links. 291 | #man_show_urls = False 292 | 293 | 294 | # -- Options for Texinfo output ------------------------------------------- 295 | 296 | # Grouping the document tree into Texinfo files. List of tuples 297 | # (source start file, target name, title, author, 298 | # dir menu entry, description, category) 299 | texinfo_documents = [ 300 | (master_doc, 'astroalign', u'astroalign Documentation', 301 | author, 'astroalign', 'One line description of project.', 302 | 'Miscellaneous'), 303 | ] 304 | 305 | # Documents to append as an appendix to all manuals. 306 | #texinfo_appendices = [] 307 | 308 | # If false, no module index is generated. 309 | #texinfo_domain_indices = True 310 | 311 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 312 | #texinfo_show_urls = 'footnote' 313 | 314 | # If true, do not generate a @detailmenu in the "Top" node's menu. 315 | #texinfo_no_detailmenu = False 316 | -------------------------------------------------------------------------------- /ez_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Setuptools bootstrapping installer. 5 | 6 | Run this script to install or upgrade setuptools. 7 | """ 8 | 9 | import os 10 | import shutil 11 | import sys 12 | import tempfile 13 | import zipfile 14 | import optparse 15 | import subprocess 16 | import platform 17 | import textwrap 18 | import contextlib 19 | import warnings 20 | 21 | from distutils import log 22 | 23 | try: 24 | from urllib.request import urlopen 25 | except ImportError: 26 | from urllib2 import urlopen 27 | 28 | try: 29 | from site import USER_SITE 30 | except ImportError: 31 | USER_SITE = None 32 | 33 | DEFAULT_VERSION = "18.0.1" 34 | DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" 35 | DEFAULT_SAVE_DIR = os.curdir 36 | 37 | 38 | def _python_cmd(*args): 39 | """ 40 | Execute a command. 41 | 42 | Return True if the command succeeded. 43 | """ 44 | args = (sys.executable,) + args 45 | return subprocess.call(args) == 0 46 | 47 | 48 | def _install(archive_filename, install_args=()): 49 | """Install Setuptools.""" 50 | with archive_context(archive_filename): 51 | # installing 52 | log.warn('Installing Setuptools') 53 | if not _python_cmd('setup.py', 'install', *install_args): 54 | log.warn('Something went wrong during the installation.') 55 | log.warn('See the error message above.') 56 | # exitcode will be 2 57 | return 2 58 | 59 | 60 | def _build_egg(egg, archive_filename, to_dir): 61 | """Build Setuptools egg.""" 62 | with archive_context(archive_filename): 63 | # building an egg 64 | log.warn('Building a Setuptools egg in %s', to_dir) 65 | _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) 66 | # returning the result 67 | log.warn(egg) 68 | if not os.path.exists(egg): 69 | raise IOError('Could not build the egg.') 70 | 71 | 72 | class ContextualZipFile(zipfile.ZipFile): 73 | 74 | """Supplement ZipFile class to support context manager for Python 2.6.""" 75 | 76 | def __enter__(self): 77 | return self 78 | 79 | def __exit__(self, type, value, traceback): 80 | self.close() 81 | 82 | def __new__(cls, *args, **kwargs): 83 | """Construct a ZipFile or ContextualZipFile as appropriate.""" 84 | if hasattr(zipfile.ZipFile, '__exit__'): 85 | return zipfile.ZipFile(*args, **kwargs) 86 | return super(ContextualZipFile, cls).__new__(cls) 87 | 88 | 89 | @contextlib.contextmanager 90 | def archive_context(filename): 91 | """ 92 | Unzip filename to a temporary directory, set to the cwd. 93 | 94 | The unzipped target is cleaned up after. 95 | """ 96 | tmpdir = tempfile.mkdtemp() 97 | log.warn('Extracting in %s', tmpdir) 98 | old_wd = os.getcwd() 99 | try: 100 | os.chdir(tmpdir) 101 | with ContextualZipFile(filename) as archive: 102 | archive.extractall() 103 | 104 | # going in the directory 105 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 106 | os.chdir(subdir) 107 | log.warn('Now working in %s', subdir) 108 | yield 109 | 110 | finally: 111 | os.chdir(old_wd) 112 | shutil.rmtree(tmpdir) 113 | 114 | 115 | def _do_download(version, download_base, to_dir, download_delay): 116 | """Download Setuptools.""" 117 | egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' 118 | % (version, sys.version_info[0], sys.version_info[1])) 119 | if not os.path.exists(egg): 120 | archive = download_setuptools(version, download_base, 121 | to_dir, download_delay) 122 | _build_egg(egg, archive, to_dir) 123 | sys.path.insert(0, egg) 124 | 125 | # Remove previously-imported pkg_resources if present (see 126 | # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). 127 | if 'pkg_resources' in sys.modules: 128 | del sys.modules['pkg_resources'] 129 | 130 | import setuptools 131 | setuptools.bootstrap_install_from = egg 132 | 133 | 134 | def use_setuptools( 135 | version=DEFAULT_VERSION, download_base=DEFAULT_URL, 136 | to_dir=DEFAULT_SAVE_DIR, download_delay=15): 137 | """ 138 | Ensure that a setuptools version is installed. 139 | 140 | Return None. Raise SystemExit if the requested version 141 | or later cannot be installed. 142 | """ 143 | to_dir = os.path.abspath(to_dir) 144 | 145 | # prior to importing, capture the module state for 146 | # representative modules. 147 | rep_modules = 'pkg_resources', 'setuptools' 148 | imported = set(sys.modules).intersection(rep_modules) 149 | 150 | try: 151 | import pkg_resources 152 | pkg_resources.require("setuptools>=" + version) 153 | # a suitable version is already installed 154 | return 155 | except ImportError: 156 | # pkg_resources not available; setuptools is not installed; download 157 | pass 158 | except pkg_resources.DistributionNotFound: 159 | # no version of setuptools was found; allow download 160 | pass 161 | except pkg_resources.VersionConflict as VC_err: 162 | if imported: 163 | _conflict_bail(VC_err, version) 164 | 165 | # otherwise, unload pkg_resources to allow the downloaded version to 166 | # take precedence. 167 | del pkg_resources 168 | _unload_pkg_resources() 169 | 170 | return _do_download(version, download_base, to_dir, download_delay) 171 | 172 | 173 | def _conflict_bail(VC_err, version): 174 | """ 175 | Setuptools was imported prior to invocation, so it is 176 | unsafe to unload it. Bail out. 177 | """ 178 | conflict_tmpl = textwrap.dedent(""" 179 | The required version of setuptools (>={version}) is not available, 180 | and can't be installed while this script is running. Please 181 | install a more recent version first, using 182 | 'easy_install -U setuptools'. 183 | 184 | (Currently using {VC_err.args[0]!r}) 185 | """) 186 | msg = conflict_tmpl.format(**locals()) 187 | sys.stderr.write(msg) 188 | sys.exit(2) 189 | 190 | 191 | def _unload_pkg_resources(): 192 | del_modules = [ 193 | name for name in sys.modules 194 | if name.startswith('pkg_resources') 195 | ] 196 | for mod_name in del_modules: 197 | del sys.modules[mod_name] 198 | 199 | 200 | def _clean_check(cmd, target): 201 | """ 202 | Run the command to download target. 203 | 204 | If the command fails, clean up before re-raising the error. 205 | """ 206 | try: 207 | subprocess.check_call(cmd) 208 | except subprocess.CalledProcessError: 209 | if os.access(target, os.F_OK): 210 | os.unlink(target) 211 | raise 212 | 213 | 214 | def download_file_powershell(url, target): 215 | """ 216 | Download the file at url to target using Powershell. 217 | 218 | Powershell will validate trust. 219 | Raise an exception if the command cannot complete. 220 | """ 221 | target = os.path.abspath(target) 222 | ps_cmd = ( 223 | "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " 224 | "[System.Net.CredentialCache]::DefaultCredentials; " 225 | "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" 226 | % vars() 227 | ) 228 | cmd = [ 229 | 'powershell', 230 | '-Command', 231 | ps_cmd, 232 | ] 233 | _clean_check(cmd, target) 234 | 235 | 236 | def has_powershell(): 237 | """Determine if Powershell is available.""" 238 | if platform.system() != 'Windows': 239 | return False 240 | cmd = ['powershell', '-Command', 'echo test'] 241 | with open(os.path.devnull, 'wb') as devnull: 242 | try: 243 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 244 | except Exception: 245 | return False 246 | return True 247 | download_file_powershell.viable = has_powershell 248 | 249 | 250 | def download_file_curl(url, target): 251 | cmd = ['curl', url, '--silent', '--output', target] 252 | _clean_check(cmd, target) 253 | 254 | 255 | def has_curl(): 256 | cmd = ['curl', '--version'] 257 | with open(os.path.devnull, 'wb') as devnull: 258 | try: 259 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 260 | except Exception: 261 | return False 262 | return True 263 | download_file_curl.viable = has_curl 264 | 265 | 266 | def download_file_wget(url, target): 267 | cmd = ['wget', url, '--quiet', '--output-document', target] 268 | _clean_check(cmd, target) 269 | 270 | 271 | def has_wget(): 272 | cmd = ['wget', '--version'] 273 | with open(os.path.devnull, 'wb') as devnull: 274 | try: 275 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 276 | except Exception: 277 | return False 278 | return True 279 | download_file_wget.viable = has_wget 280 | 281 | 282 | def download_file_insecure(url, target): 283 | """Use Python to download the file, without connection authentication.""" 284 | src = urlopen(url) 285 | try: 286 | # Read all the data in one block. 287 | data = src.read() 288 | finally: 289 | src.close() 290 | 291 | # Write all the data in one block to avoid creating a partial file. 292 | with open(target, "wb") as dst: 293 | dst.write(data) 294 | download_file_insecure.viable = lambda: True 295 | 296 | 297 | def get_best_downloader(): 298 | downloaders = ( 299 | download_file_powershell, 300 | download_file_curl, 301 | download_file_wget, 302 | download_file_insecure, 303 | ) 304 | viable_downloaders = (dl for dl in downloaders if dl.viable()) 305 | return next(viable_downloaders, None) 306 | 307 | 308 | def download_setuptools( 309 | version=DEFAULT_VERSION, download_base=DEFAULT_URL, 310 | to_dir=DEFAULT_SAVE_DIR, delay=15, 311 | downloader_factory=get_best_downloader): 312 | """ 313 | Download setuptools from a specified location and return its filename. 314 | 315 | `version` should be a valid setuptools version number that is available 316 | as an sdist for download under the `download_base` URL (which should end 317 | with a '/'). `to_dir` is the directory where the egg will be downloaded. 318 | `delay` is the number of seconds to pause before an actual download 319 | attempt. 320 | 321 | ``downloader_factory`` should be a function taking no arguments and 322 | returning a function for downloading a URL to a target. 323 | """ 324 | # making sure we use the absolute path 325 | to_dir = os.path.abspath(to_dir) 326 | zip_name = "setuptools-%s.zip" % version 327 | url = download_base + zip_name 328 | saveto = os.path.join(to_dir, zip_name) 329 | if not os.path.exists(saveto): # Avoid repeated downloads 330 | log.warn("Downloading %s", url) 331 | downloader = downloader_factory() 332 | downloader(url, saveto) 333 | return os.path.realpath(saveto) 334 | 335 | 336 | def _build_install_args(options): 337 | """ 338 | Build the arguments to 'python setup.py install' on the setuptools package. 339 | 340 | Returns list of command line arguments. 341 | """ 342 | return ['--user'] if options.user_install else [] 343 | 344 | 345 | def _parse_args(): 346 | """Parse the command line for options.""" 347 | parser = optparse.OptionParser() 348 | parser.add_option( 349 | '--user', dest='user_install', action='store_true', default=False, 350 | help='install in user site package (requires Python 2.6 or later)') 351 | parser.add_option( 352 | '--download-base', dest='download_base', metavar="URL", 353 | default=DEFAULT_URL, 354 | help='alternative URL from where to download the setuptools package') 355 | parser.add_option( 356 | '--insecure', dest='downloader_factory', action='store_const', 357 | const=lambda: download_file_insecure, default=get_best_downloader, 358 | help='Use internal, non-validating downloader' 359 | ) 360 | parser.add_option( 361 | '--version', help="Specify which version to download", 362 | default=DEFAULT_VERSION, 363 | ) 364 | parser.add_option( 365 | '--to-dir', 366 | help="Directory to save (and re-use) package", 367 | default=DEFAULT_SAVE_DIR, 368 | ) 369 | options, args = parser.parse_args() 370 | # positional arguments are ignored 371 | return options 372 | 373 | 374 | def _download_args(options): 375 | """Return args for download_setuptools function from cmdline args.""" 376 | return dict( 377 | version=options.version, 378 | download_base=options.download_base, 379 | downloader_factory=options.downloader_factory, 380 | to_dir=options.to_dir, 381 | ) 382 | 383 | 384 | def main(): 385 | """Install or upgrade setuptools and EasyInstall.""" 386 | options = _parse_args() 387 | archive = download_setuptools(**_download_args(options)) 388 | return _install(archive, _build_install_args(options)) 389 | 390 | if __name__ == '__main__': 391 | sys.exit(main()) 392 | -------------------------------------------------------------------------------- /benchmarks/flux_bench.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | # ============================================================================= 25 | # IMPORTS 26 | # ============================================================================= 27 | 28 | import sys 29 | import os 30 | import datetime as dt 31 | import argparse 32 | 33 | import numpy as np 34 | 35 | import astroalign as aa 36 | 37 | import pandas as pd 38 | 39 | import sep 40 | 41 | from skimage.transform import SimilarityTransform 42 | 43 | from scipy import stats 44 | 45 | 46 | test_path = os.path.abspath(os.path.dirname(aa.__file__)) 47 | sys.path.insert(0, test_path) 48 | 49 | from tests.test_align import simulate_image_single # noqa 50 | 51 | 52 | # ============================================================================= 53 | # CONSTANTS 54 | # ============================================================================= 55 | 56 | SIZE = 256 57 | 58 | STARS = 300 59 | 60 | NOISE = 100 61 | 62 | REPEATS = 35 63 | 64 | DEFAULT_SIZE = 6.4, 4.8 65 | 66 | 67 | # ============================================================================= 68 | # FUNCTIONS 69 | # ============================================================================= 70 | 71 | def get_image(size, stars, noise, seed): 72 | """Retrieves a single image""" 73 | if seed is not None: 74 | np.random.seed(seed) 75 | shape = (size, size) 76 | image = simulate_image_single( 77 | shape=shape, num_stars=stars, noise_level=noise)[:2] 78 | return image 79 | 80 | 81 | def benchmark(size=SIZE, stars=STARS, noise=NOISE, repeats=REPEATS, seed=None): 82 | # get image 83 | image = get_image(size, stars, noise, seed) 84 | imagedata = np.ascontiguousarray(image[0]) 85 | 86 | # detect sources (we know where they are, actually) 87 | bkg = sep.Background(imagedata) 88 | thresh = 3. * bkg.globalrms 89 | sources = sep.extract(imagedata - bkg.back(), thresh) 90 | sources.sort(order='flux') 91 | 92 | # perform photometry 93 | flux, fluxerr, flag = sep.sum_circle( 94 | imagedata-bkg.back(), sources['x'], 95 | sources['y'], 3.0, err=bkg.globalrms, gain=1.0) 96 | 97 | dframes = [] 98 | # transform it 99 | for i_trsf in range(repeats): 100 | dx, dy = np.random.randint( 101 | low=-1 * size / 32., high=size / 32., size=2) 102 | theta = (np.random.random()-0.5)*0.125*np.pi 103 | s = 0.85+np.random.random()*0.3 104 | trf = SimilarityTransform( 105 | translation=(dx, dy), rotation=theta, scale=s) 106 | 107 | target = np.zeros(shape=np.array(imagedata.shape) * 2) 108 | newimage = aa.apply_transform(trf, imagedata - bkg.back(), target) 109 | 110 | # perform photometry on new places 111 | src_coords = np.array([sources['x'], sources['y']]).T 112 | new_coords = trf(src_coords).T 113 | nflux, nfluxerr, nflag = sep.sum_circle( 114 | newimage[0], new_coords[0], new_coords[1], 3.0 * s, 115 | err=bkg.globalrms, gain=1.0) 116 | 117 | # compare fluxes 118 | good_flux = nflag == 0 119 | new_to_orig = nflux[good_flux]/flux[good_flux] 120 | 121 | # put everything in a pd dataframe 122 | df = pd.DataFrame() 123 | 124 | df["idx"] = np.array([i_trsf] * sum(good_flux)) 125 | df["seed"] = np.array([seed] * sum(good_flux)) 126 | df["repeats"] = np.array([repeats] * sum(good_flux)) 127 | 128 | df['orig_x'] = sources['x'][good_flux] 129 | df['orig_y'] = sources['y'][good_flux] 130 | df['orig_flux'] = flux[good_flux] 131 | df['orig_fluxerr'] = fluxerr[good_flux] 132 | df['orig_flag'] = flag[good_flux] 133 | 134 | df['new_x'] = new_coords[0][good_flux] 135 | df['new_y'] = new_coords[1][good_flux] 136 | df['new_flux'] = nflux[good_flux] 137 | df['new_fluxerr'] = nfluxerr[good_flux] 138 | df['new_flag'] = nflag[good_flux] 139 | 140 | df['flux_ratio'] = new_to_orig 141 | 142 | df['trf_theta'] = theta 143 | df['trf_dx'] = dx 144 | df['trf_dy'] = dy 145 | df['trf_scale'] = s 146 | 147 | slp, intpt, r_val, p_val, std_err = stats.linregress( 148 | flux[good_flux], nflux[good_flux]) 149 | df['stats_slope'] = slp 150 | df['stats_intpt'] = intpt 151 | df['flux_per_area_ratio'] = df['flux_ratio'] / (df['trf_scale'] ** 2) 152 | 153 | dframes.append(df) 154 | 155 | final_df = pd.concat(dframes) 156 | 157 | return final_df 158 | 159 | 160 | def describe(results): 161 | repetitions = results.repeats.values[0] 162 | resume = results[["flux_per_area_ratio"]].describe() 163 | return repetitions, resume 164 | 165 | 166 | def plot(results, ax): 167 | 168 | bins = np.arange(0.95, 1.05, 0.001) 169 | ax.hist( 170 | results.flux_per_area_ratio, normed=True, 171 | histtype='step', bins=bins, label='Data') 172 | 173 | ax.plot( 174 | bins + (bins[1] - bins[0]) / 2., 175 | stats.norm.pdf( 176 | bins, 177 | loc=np.mean(results.flux_per_area_ratio), 178 | scale=np.std(results.flux_per_area_ratio)), 179 | label='Gaussian') 180 | 181 | ax.legend(loc='best') 182 | 183 | ax.set_title("Flux ratio per unit area") 184 | ax.set_xlabel('Flux ratio per unit area') 185 | ax.set_ylabel('Normalized N') 186 | 187 | return ax 188 | 189 | 190 | # ============================================================================= 191 | # CLI MAIN 192 | # ============================================================================= 193 | 194 | class CLI: 195 | 196 | def __init__(self): 197 | self._parser = argparse.ArgumentParser( 198 | description="Astroalign flux benchmark tool") 199 | self._parser.set_defaults( 200 | callback=lambda ns: self.parser.print_usage()) 201 | 202 | self._parser.add_argument( 203 | '--version', action='version', version='%(prog)s 2019.10') 204 | 205 | subparsers = self._parser.add_subparsers() 206 | 207 | # ===================================================================== 208 | # benchmark subparser 209 | # ===================================================================== 210 | 211 | benchmark = subparsers.add_parser( 212 | "benchmark", 213 | help="Execute and collect the flux benchmark data of astroalign") 214 | benchmark.set_defaults(callback=self.benchmark_command) 215 | 216 | benchmark.add_argument( 217 | "--size", dest="size", type=int, default=SIZE, 218 | help=("The size in pixels of the image. This parameter creates " 219 | f"square figure (defaults={SIZE}).")) 220 | 221 | benchmark.add_argument( 222 | "--stars", dest="stars", type=int, default=STARS, 223 | help=("The total numbers of stars in the image " 224 | f"(defaults={STARS}).")) 225 | 226 | benchmark.add_argument( 227 | "--noise", dest="noise", type=int, default=NOISE, 228 | help=f"lambda parameter for poisson noise (default={NOISE})") 229 | 230 | benchmark.add_argument( 231 | "--number", dest="repeats", type=int, default=REPEATS, 232 | help=f"How many flux tests must be executed (default={REPEATS})") 233 | 234 | benchmark.add_argument( 235 | "--seed", dest="seed", type=int, default=None, 236 | help=("Random seed used to initialize the pseudo-random number " 237 | "generator. if seed is None, then random-state will try to " 238 | "read data from /dev/urandom (or the Windows analogue) if " 239 | "available or seed from the clock otherwise " 240 | "(default=None).")) 241 | 242 | benchmark.add_argument( 243 | "--out", "-o", dest="out", required=True, 244 | type=argparse.FileType('w'), 245 | help="Output file path. The data was stored in CSV format") 246 | 247 | # ===================================================================== 248 | # describe subparser 249 | # ===================================================================== 250 | 251 | describe = subparsers.add_parser( 252 | "describe", 253 | help="Show a resume and (optionally) of the benchmark results") 254 | describe.set_defaults(callback=self.describe_command) 255 | 256 | describe.add_argument( 257 | "--file", "-f", dest="file", required=True, 258 | type=argparse.FileType('r'), 259 | help="File path of the flux benchmark data in CSV format") 260 | 261 | # ===================================================================== 262 | # plot subparser 263 | # ===================================================================== 264 | 265 | plot = subparsers.add_parser( 266 | "plot", help="Show the histogram of a given results") 267 | plot.set_defaults(callback=self.plot_command) 268 | 269 | plot.add_argument( 270 | "--file", "-f", dest="file", required=True, 271 | type=argparse.FileType('r'), 272 | help="File path of the flux benchmark data in CSV format") 273 | 274 | plot.add_argument( 275 | "--size", dest="size", nargs=2, type=float, 276 | help=("The size of the entire figure in inches in the format " 277 | f"'width height' (default={DEFAULT_SIZE}).")) 278 | 279 | plot.add_argument( 280 | "--out", "-o", dest="out", 281 | help=("A file to store the generated plot. " 282 | "By default the default matplotlib backend shows the plot")) 283 | 284 | def parse_and_run(self, *args, **kwargs): 285 | ns = self._parser.parse_args(*args, **kwargs) 286 | return ns.callback(ns) 287 | 288 | def plot_command(self, ns): 289 | import matplotlib.pyplot as plt 290 | 291 | results = pd.read_csv(ns.file) 292 | 293 | size = ns.size if ns.size else DEFAULT_SIZE 294 | 295 | fig, ax = plt.subplots() 296 | fig.set_size_inches(*size) 297 | 298 | plot(results, ax) 299 | 300 | fig.suptitle("") 301 | plt.tight_layout() 302 | if ns.out is None: 303 | print(f"Showing plot for data stored in '{ns.file.name}'...") 304 | fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}") 305 | plt.show() 306 | else: 307 | print( 308 | f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...") 309 | plt.savefig(ns.out) 310 | print("DONE!") 311 | 312 | def describe_command(self, ns): 313 | results = pd.read_csv(ns.file) 314 | 315 | repetitions, resume = describe(results) 316 | 317 | print(f"Data size: {len(results)}") 318 | print(f"\twith {repetitions} repetitions \n") 319 | print(">>>>> Resume <<<<<") 320 | print(resume) 321 | print("") 322 | 323 | def benchmark_command(self, ns): 324 | if ns.repeats <= 0: 325 | self._parser.error(f"'repeats' must be > 0. Found {ns.repeats}") 326 | 327 | now = dt.datetime.now 328 | 329 | print( 330 | f"[{now()}] Starting flux benchmark " 331 | f"for astroalign {aa.__version__}...") 332 | print("") 333 | results = benchmark( 334 | size=ns.size, stars=ns.stars, noise=ns.noise, 335 | repeats=ns.repeats, seed=ns.seed) 336 | 337 | repetitions, resume = describe(results) 338 | 339 | print(f"[{now()}] Data size: {len(results)}") 340 | print(f"\twith {repetitions} repetitions \n") 341 | 342 | print(">>>>> Resume <<<<<") 343 | print(resume) 344 | print("") 345 | 346 | results.to_csv(ns.out, index=False) 347 | 348 | @property 349 | def parser(self): 350 | return self._parser 351 | 352 | 353 | # ============================================================================= 354 | # MAIN 355 | # ============================================================================= 356 | 357 | if __name__ == "__main__": 358 | parser = CLI() 359 | parser.parse_and_run() 360 | -------------------------------------------------------------------------------- /benchmarks/time_bench.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | # ============================================================================= 25 | # IMPORTS 26 | # ============================================================================= 27 | 28 | import sys 29 | import os 30 | import timeit 31 | import datetime as dt 32 | import argparse 33 | from collections import OrderedDict 34 | 35 | import numpy as np 36 | 37 | import astroalign as aa 38 | 39 | import pandas as pd 40 | 41 | import joblib 42 | 43 | from sklearn.model_selection import ParameterGrid 44 | 45 | import tqdm 46 | 47 | test_path = os.path.abspath(os.path.dirname(aa.__file__)) 48 | sys.path.insert(0, test_path) 49 | 50 | from tests.test_align import simulate_image_pair # noqa 51 | 52 | 53 | # ============================================================================= 54 | # CONSTANTS 55 | # ============================================================================= 56 | 57 | SIZES = (256, 512, 768, 1024) 58 | 59 | STARS = (300, 500, 1000, 10_000) 60 | 61 | NOISES = (100, 500, 1000, 5000) 62 | 63 | COMB_NUMBER = 10 64 | 65 | STATEMENT = "aa.register(source, target)" 66 | 67 | REPEATS = 50 68 | 69 | COLSROWS = { 70 | "vertical": {"ncols": 1, "nrows": 3}, 71 | "horizontal": {"ncols": 3, "nrows": 1} 72 | } 73 | 74 | DEFAULT_SIZES = { 75 | "vertical": (4, 12), 76 | "horizontal": (12, 4) 77 | } 78 | 79 | 80 | # ============================================================================= 81 | # FUNCTIONS 82 | # ============================================================================= 83 | 84 | def get_images(size, stars, noise, seed): 85 | """Retrieves a pair source and target image""" 86 | if seed is not None: 87 | np.random.seed(seed) 88 | shape = (size, size) 89 | source, target = simulate_image_pair( 90 | shape=shape, num_stars=stars, noise_level=noise)[:2] 91 | return source, target 92 | 93 | 94 | def get_parameters(sizes, stars, noises, comb_number, repeats, seed): 95 | """Create a list of dictionaries with all the combinations of the given 96 | parameters. 97 | 98 | """ 99 | grid = ParameterGrid({ 100 | "size": sizes, "stars": stars, "noise": noises}) 101 | grid = list(grid) * comb_number 102 | 103 | # set the random state for run in parallel 104 | random = np.random.RandomState(seed) 105 | images_seeds = random.randint(1_000_000, size=len(grid)) 106 | 107 | for idx, g in enumerate(grid): 108 | g["idx"] = idx 109 | g["seed"] = seed 110 | g["images_seed"] = images_seeds[idx] 111 | g["repeats"] = repeats 112 | 113 | return grid 114 | 115 | 116 | def _test(idx, size, stars, noise, seed, images_seed, repeats): 117 | 118 | # create the two images 119 | source, target = get_images( 120 | size=size, stars=stars, noise=noise, seed=images_seed) 121 | 122 | # create the timer 123 | test_globals = {"aa": aa, "source": source, "target": target} 124 | timer = timeit.Timer(stmt=STATEMENT, globals=test_globals) 125 | 126 | # find the number of loops 127 | loops = timer.autorange()[0] 128 | 129 | # create a copy of the params to be returned ad result 130 | result = OrderedDict({ 131 | "idx": idx, "size": size, "noise": noise, "stars": stars, "seed": seed, 132 | "images_seed": images_seed, "repeats": repeats, "loops": loops}) 133 | 134 | # execute the timeit 135 | times = timer.repeat(repeats, loops) 136 | 137 | # store the times into the result 138 | result["time"] = np.min(np.array(times) / loops) 139 | for tidx, time in enumerate(times): 140 | result[f"time_{tidx}"] = time 141 | 142 | return result 143 | 144 | 145 | def benchmark(sizes=SIZES, stars=STARS, noises=NOISES, 146 | comb_number=10, seed=None, repeats=REPEATS, n_jobs=-1): 147 | 148 | grid = get_parameters( 149 | sizes=sizes, stars=stars, noises=noises, 150 | comb_number=comb_number, seed=seed, repeats=repeats) 151 | 152 | with joblib.Parallel(n_jobs=n_jobs) as parallel: 153 | results = parallel( 154 | joblib.delayed(_test)(**params) for params in tqdm.tqdm(grid)) 155 | 156 | df = pd.DataFrame(results) 157 | return df 158 | 159 | 160 | def describe(results): 161 | repetitions = results.repeats.values[0] 162 | resume = results[["time", "loops"]].describe() 163 | return repetitions, resume 164 | 165 | 166 | def plot(results, ax_size, ax_stars, ax_noise): 167 | results = results[["size", "stars", "noise", "time"]] 168 | 169 | def plots(df, by, ax): 170 | df.boxplot(by=by, column=["time"], grid=False, ax=ax) 171 | ax.set_title(f"Time by {by.title()}") 172 | ax.set_ylabel("Seconds") 173 | ax.set_xlabel(by.title()) 174 | 175 | plots(results, "noise", ax_noise) 176 | plots(results, "stars", ax_stars) 177 | plots(results, "size", ax_size) 178 | 179 | return (ax_size, ax_stars, ax_noise) 180 | 181 | 182 | # ============================================================================= 183 | # CLI MAIN 184 | # ============================================================================= 185 | 186 | class CLI: 187 | 188 | def __init__(self): 189 | self._parser = argparse.ArgumentParser( 190 | description="Astroalign time benchmark tool based on timeit") 191 | self._parser.set_defaults( 192 | callback=lambda ns: self.parser.print_usage()) 193 | 194 | self._parser.add_argument( 195 | '--version', action='version', version='%(prog)s 2019.10') 196 | 197 | subparsers = self._parser.add_subparsers() 198 | 199 | # ===================================================================== 200 | # benchmark subparser 201 | # ===================================================================== 202 | 203 | benchmark = subparsers.add_parser( 204 | "benchmark", 205 | help="Execute and collect the benchmark data of astroalign") 206 | benchmark.set_defaults(callback=self.benchmark_command) 207 | 208 | benchmark.add_argument( 209 | "--sizes", dest="sizes", type=int, default=SIZES, nargs="+", 210 | help=("The size in pixels of the image. This parameter creates " 211 | f"square figure (defaults={SIZES}).")) 212 | 213 | benchmark.add_argument( 214 | "--stars", dest="stars", type=int, default=STARS, nargs="+", 215 | help=("The total numbers of stars in the image " 216 | f"(defaults={STARS}).")) 217 | 218 | benchmark.add_argument( 219 | "--noises", dest="noises", type=int, default=NOISES, nargs="+", 220 | help=f"lambda parameter for poisson noise (default={NOISES})") 221 | 222 | benchmark.add_argument( 223 | "--number", dest="comb_number", type=int, default=10, 224 | help=("How many random images pairs must be created for one " 225 | "combination of sizes, stars and noise (default=10).")) 226 | 227 | benchmark.add_argument( 228 | "--seed", dest="seed", type=int, default=None, 229 | help=("Random seed used to initialize the pseudo-random number " 230 | "generator. if seed is None, then random-state will try to " 231 | "read data from /dev/urandom (or the Windows analogue) if " 232 | "available or seed from the clock otherwise " 233 | "(default=None).")) 234 | 235 | benchmark.add_argument( 236 | "--repeats", dest="repeats", type=int, default=REPEATS, 237 | help=("How many measurements must be taken for every image pair. " 238 | "The final 'time' is the lower bound of all the times. " 239 | "Docs: https://docs.python.org/3.7/library/timeit.html")) 240 | 241 | benchmark.add_argument( 242 | "--jobs", dest="n_jobs", type=int, default=-1, 243 | help=("The number of CPU to run the benchmars. " 244 | "-1 uses all the available CPUS (default=-1)")) 245 | 246 | benchmark.add_argument( 247 | "--out", "-o", dest="out", required=True, 248 | type=argparse.FileType('w'), 249 | help="Output file path. The data was stored in CSV format") 250 | 251 | # ===================================================================== 252 | # describe subparser 253 | # ===================================================================== 254 | 255 | describe = subparsers.add_parser( 256 | "describe", 257 | help="Show a resume and (optionally) of the benchmark results") 258 | describe.set_defaults(callback=self.describe_command) 259 | 260 | describe.add_argument( 261 | "--file", "-f", dest="file", required=True, 262 | type=argparse.FileType('r'), 263 | help="File path of the time benchmark data in CSV format") 264 | 265 | # ===================================================================== 266 | # plot subparser 267 | # ===================================================================== 268 | 269 | plot = subparsers.add_parser( 270 | "plot", help="Show three boxplots of a given results") 271 | plot.set_defaults(callback=self.plot_command) 272 | 273 | plot.add_argument( 274 | "--file", "-f", dest="file", required=True, 275 | type=argparse.FileType('r'), 276 | help="File path of the time benchmark data in CSV format") 277 | 278 | plot.add_argument( 279 | "--orientation", dest="orientation", 280 | choices=list(COLSROWS.keys()), default="horizontal", 281 | help=("If the plots will be a single row (horizontal) " 282 | f"or vertical for a single column (default='horizontal')")) 283 | 284 | plot.add_argument( 285 | "--size", dest="size", nargs=2, type=float, 286 | help=("The size of the entire figure in inches in the format " 287 | "'width height' for horizontal orientation the size by " 288 | f"default is {DEFAULT_SIZES['horizontal']} and for " 289 | f"vertical {DEFAULT_SIZES['vertical']}.")) 290 | 291 | plot.add_argument( 292 | "--out", "-o", dest="out", 293 | help=("A file to store the generated plot. " 294 | "By default the default matplotlib backend shows the plot")) 295 | 296 | def parse_and_run(self, *args, **kwargs): 297 | ns = self._parser.parse_args(*args, **kwargs) 298 | return ns.callback(ns) 299 | 300 | def plot_command(self, ns): 301 | import matplotlib.pyplot as plt 302 | 303 | results = pd.read_csv(ns.file) 304 | 305 | orientation = COLSROWS[ns.orientation] 306 | size = ns.size if ns.size else DEFAULT_SIZES[ns.orientation] 307 | 308 | fig, axes = plt.subplots(**orientation) 309 | fig.set_size_inches(*size) 310 | 311 | plot(results, *axes) 312 | 313 | fig.suptitle("") 314 | plt.tight_layout() 315 | if ns.out is None: 316 | print(f"Showing plot for data stored in '{ns.file.name}'...") 317 | fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}") 318 | plt.show() 319 | else: 320 | print( 321 | f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...") 322 | plt.savefig(ns.out) 323 | print("DONE!") 324 | 325 | def describe_command(self, ns): 326 | results = pd.read_csv(ns.file) 327 | 328 | repetitions, resume = describe(results) 329 | 330 | print(f"Executed: {len(results)} cases") 331 | 332 | print(f"\twith {repetitions} repetitions \n") 333 | print(">>>>> Resume <<<<<") 334 | print(resume) 335 | print("") 336 | 337 | def benchmark_command(self, ns): 338 | if ns.repeats <= 0: 339 | self._parser.error(f"'repeats' must be > 0. Found {ns.repeats}") 340 | 341 | now = dt.datetime.now 342 | 343 | print( 344 | f"[{now()}] Starting benchmark for astroalign {aa.__version__}...") 345 | print("") 346 | results = benchmark( 347 | sizes=ns.sizes, stars=ns.stars, noises=ns.noises, 348 | comb_number=ns.comb_number, seed=ns.seed, 349 | repeats=ns.repeats, n_jobs=ns.n_jobs) 350 | 351 | repetitions, resume = describe(results) 352 | 353 | print(f"[{now()}] Executed: {len(results)} cases") 354 | 355 | print(f"\twith {repetitions} repetitions \n") 356 | print(">>>>> Resume <<<<<") 357 | print(resume) 358 | print("") 359 | 360 | results.to_csv(ns.out, index=False) 361 | print(f"[{now()}] Data stored in '{ns.out.name}'") 362 | 363 | @property 364 | def parser(self): 365 | return self._parser 366 | 367 | 368 | # ============================================================================= 369 | # MAIN 370 | # ============================================================================= 371 | 372 | if __name__ == "__main__": 373 | parser = CLI() 374 | parser.parse_and_run() 375 | -------------------------------------------------------------------------------- /benchmarks/time_regression.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz, Juan B. Cabral, Bruno Sanchez 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | # ============================================================================= 24 | # IMPORTS 25 | # ============================================================================= 26 | 27 | import sys 28 | import os 29 | import timeit 30 | import datetime as dt 31 | import argparse 32 | from collections import OrderedDict 33 | 34 | import numpy as np 35 | 36 | import astroalign as aa 37 | 38 | from sklearn.model_selection import ParameterGrid 39 | from sklearn.linear_model import LinearRegression 40 | from sklearn.metrics import mean_squared_error, r2_score 41 | 42 | import pandas as pd 43 | 44 | import joblib 45 | 46 | import tqdm 47 | 48 | 49 | test_path = os.path.abspath(os.path.dirname(aa.__file__)) 50 | sys.path.insert(0, test_path) 51 | 52 | from tests.test_align import simulate_image_pair # noqa 53 | 54 | 55 | # ============================================================================= 56 | # CONSTANTS 57 | # ============================================================================= 58 | 59 | SIZES = (256, 512, 768, 1024) 60 | 61 | STARS = 10000 62 | 63 | NOISE = 1000 64 | 65 | STEP = 10 66 | 67 | STATEMENT = "aa.register(source, target)" 68 | 69 | REPEATS = 50 70 | 71 | COMB_NUMBER = 10 72 | 73 | DEFAULT_SIZE = (8, 8) 74 | 75 | 76 | # ============================================================================= 77 | # FUNCTIONS 78 | # ============================================================================= 79 | 80 | def get_images(size, stars, noise, seed): 81 | """Retrieves a pair source and target image""" 82 | if seed is not None: 83 | np.random.seed(seed) 84 | shape = (size, size) 85 | source, target = simulate_image_pair( 86 | shape=shape, num_stars=stars, noise_level=noise)[:2] 87 | return source, target 88 | 89 | 90 | def get_parameters(min_size, max_size, step_size, stars, 91 | noise, seed, comb_number, repeats): 92 | """Create a list of dictionaries with all the combinations of the given 93 | parameters. 94 | 95 | """ 96 | 97 | sample_size = int((max_size - min_size) / step_size) 98 | sizes = np.linspace(min_size, max_size, sample_size, dtype=int) 99 | 100 | grid = ParameterGrid({ 101 | "size": sizes, "stars": [stars], 102 | "noise": [noise], "repeats": [repeats]}) 103 | grid = list(grid) * comb_number 104 | 105 | # set the random state for run in parallel 106 | random = np.random.RandomState(seed) 107 | images_seeds = random.randint(1_000_000, size=len(grid)) 108 | 109 | for idx, g in enumerate(grid): 110 | g["idx"] = idx 111 | g["seed"] = seed 112 | g["min_size"] = min_size 113 | g["max_size"] = max_size 114 | g["step_size"] = step_size 115 | g["images_seed"] = images_seeds[idx] 116 | return grid 117 | 118 | 119 | def _test(idx, min_size, max_size, step_size, size, 120 | stars, noise, seed, repeats, images_seed): 121 | 122 | # create the two images 123 | source, target = get_images( 124 | size=size, stars=stars, noise=noise, seed=images_seed) 125 | 126 | # create the timer 127 | test_globals = {"aa": aa, "source": source, "target": target} 128 | timer = timeit.Timer(stmt=STATEMENT, globals=test_globals) 129 | 130 | # find the number of loops 131 | loops = timer.autorange()[0] 132 | 133 | # create a copy of the params to be returned ad result 134 | result = OrderedDict({ 135 | "idx": idx, "min_size": min_size, "max_size": max_size, 136 | "step_size": step_size, "size": size, "noise": noise, 137 | "stars": stars, "seed": seed, "images_seed": images_seed, 138 | "repeats": repeats, "loops": loops}) 139 | 140 | # execute the timeit 141 | times = timer.repeat(repeats, loops) 142 | 143 | # store the times into the result 144 | result["time"] = np.min(np.array(times) / loops) 145 | for tidx, time in enumerate(times): 146 | result[f"time_{tidx}"] = time 147 | 148 | return result 149 | 150 | 151 | def benchmark(min_size=min(SIZES), max_size=max(SIZES), step_size=STEP, 152 | stars=STARS, noise=NOISE, seed=None, repeats=REPEATS, 153 | n_jobs=-1, comb_number=COMB_NUMBER): 154 | 155 | grid = get_parameters( 156 | min_size=min_size, max_size=max_size, step_size=step_size, 157 | repeats=repeats, stars=stars, noise=noise, seed=seed, 158 | comb_number=comb_number) 159 | 160 | with joblib.Parallel(n_jobs=n_jobs) as parallel: 161 | results = parallel( 162 | joblib.delayed(_test)(**params) for params in tqdm.tqdm(grid)) 163 | 164 | df = pd.DataFrame(results) 165 | return df 166 | 167 | 168 | def describe(results): 169 | repetitions = results.repeats.values[0] 170 | resume = results[["time", "size", "loops"]].describe() 171 | return repetitions, resume 172 | 173 | 174 | def plot(results, ax): 175 | df = results[["size", "time"]] 176 | 177 | df.plot.scatter(x='size', y='time', c='LightBlue', ax=ax, marker=".") 178 | 179 | # linear regression 180 | x = df["size"].values.reshape((-1, 1)) 181 | y = df["time"].values 182 | linear = LinearRegression().fit(x, y) 183 | y_pred = linear.predict(x) 184 | 185 | mqe = mean_squared_error(y, y_pred) 186 | r2 = r2_score(y, y_pred) 187 | 188 | ax.plot(x, y_pred, color='DarkBlue', linewidth=2) 189 | 190 | ax.set_title( 191 | "Linear regression between size and time " 192 | f"\n$mse={mqe:.3f}$ - $R^2={r2:.3f}$") 193 | ax.set_xlabel("Size") 194 | ax.set_ylabel("Seconds") 195 | 196 | return ax 197 | 198 | 199 | # ============================================================================= 200 | # CLI MAIN 201 | # ============================================================================= 202 | 203 | class CLI: 204 | 205 | def __init__(self): 206 | self._parser = argparse.ArgumentParser( 207 | description="Astroalign time benchmark tool based on timeit") 208 | self._parser.set_defaults( 209 | callback=lambda ns: self.parser.print_usage()) 210 | 211 | self._parser.add_argument( 212 | '--version', action='version', version='%(prog)s 2019.10') 213 | 214 | subparsers = self._parser.add_subparsers() 215 | 216 | # ===================================================================== 217 | # benchmark subparser 218 | # ===================================================================== 219 | 220 | benchmark = subparsers.add_parser( 221 | "benchmark", 222 | help="Execute and collect the regression benchmark of astroalign") 223 | benchmark.set_defaults(callback=self.benchmark_command) 224 | 225 | benchmark.add_argument( 226 | "--max", dest="max_size", type=int, default=max(SIZES), 227 | help=("The size in pixels of the bigger square image. " 228 | f"(defaults={max(SIZES)}).")) 229 | 230 | benchmark.add_argument( 231 | "--min", dest="min_size", type=int, default=min(SIZES), 232 | help=("The size in pixels of the smallest square image. " 233 | f"(defaults={max(SIZES)}).")) 234 | 235 | benchmark.add_argument( 236 | "--step", dest="step_size", type=int, default=STEP, 237 | help=f"The size between every image (defaults={STEP}).") 238 | 239 | benchmark.add_argument( 240 | "--stars", dest="stars", type=int, default=STARS, 241 | help=("The total numbers of stars in the image " 242 | f"(defaults={STARS}).")) 243 | 244 | benchmark.add_argument( 245 | "--noise", dest="noise", type=int, default=NOISE, 246 | help=f"lambda parameter for poisson noise (default={NOISE})") 247 | 248 | benchmark.add_argument( 249 | "--number", dest="comb_number", type=int, default=10, 250 | help=("How many random images pairs must be created for one " 251 | f"size (default={COMB_NUMBER}).")) 252 | 253 | benchmark.add_argument( 254 | "--seed", dest="seed", type=int, default=None, 255 | help=("Random seed used to initialize the pseudo-random number " 256 | "generator. if seed is None, then random-state will try to " 257 | "read data from /dev/urandom (or the Windows analogue) if " 258 | "available or seed from the clock otherwise " 259 | "(default=None).")) 260 | 261 | benchmark.add_argument( 262 | "--repeats", dest="repeats", type=int, default=REPEATS, 263 | help=("How many measurements must be taken for every image pair. " 264 | "The final 'time' is the lower bound of all the times. " 265 | "Docs: https://docs.python.org/3.7/library/timeit.html")) 266 | 267 | benchmark.add_argument( 268 | "--jobs", dest="n_jobs", type=int, default=-1, 269 | help=("The number of CPU to run the benchmars. " 270 | "-1 uses all the available CPUS (default=-1)")) 271 | 272 | benchmark.add_argument( 273 | "--out", "-o", dest="out", required=True, 274 | type=argparse.FileType('w'), 275 | help="Output file path. The data was stored in CSV format") 276 | 277 | # ===================================================================== 278 | # describe subparser 279 | # ===================================================================== 280 | 281 | describe = subparsers.add_parser( 282 | "describe", 283 | help="Show a resume and (optionally) of the benchmark results") 284 | describe.set_defaults(callback=self.describe_command) 285 | 286 | describe.add_argument( 287 | "--file", "-f", dest="file", required=True, 288 | type=argparse.FileType('r'), 289 | help="File path of the time benchmark data in CSV format") 290 | 291 | # ===================================================================== 292 | # plot subparser 293 | # ===================================================================== 294 | 295 | plot = subparsers.add_parser( 296 | "plot", help="Show three boxplots of a given results") 297 | plot.set_defaults(callback=self.plot_command) 298 | 299 | plot.add_argument( 300 | "--file", "-f", dest="file", required=True, 301 | type=argparse.FileType('r'), 302 | help="File path of the time benchmark data in CSV format") 303 | 304 | plot.add_argument( 305 | "--size", dest="size", nargs=2, type=float, 306 | help=("The size of the entire figure in inches in the format " 307 | f"'width height' (default={DEFAULT_SIZE}).")) 308 | 309 | plot.add_argument( 310 | "--out", "-o", dest="out", 311 | help=("A file to store the generated plot. " 312 | "By default the default matplotlib backend shows the plot")) 313 | 314 | def parse_and_run(self, *args, **kwargs): 315 | ns = self._parser.parse_args(*args, **kwargs) 316 | return ns.callback(ns) 317 | 318 | def plot_command(self, ns): 319 | import matplotlib.pyplot as plt 320 | 321 | results = pd.read_csv(ns.file) 322 | 323 | size = ns.size if ns.size else DEFAULT_SIZE 324 | 325 | fig, ax = plt.subplots() 326 | fig.set_size_inches(*size) 327 | 328 | plot(results, ax) 329 | 330 | fig.suptitle("") 331 | plt.tight_layout() 332 | if ns.out is None: 333 | print(f"Showing plot for data stored in '{ns.file.name}'...") 334 | fig.canvas.set_window_title(f"{self.parser.prog} - {ns.file.name}") 335 | plt.show() 336 | else: 337 | print( 338 | f"Storing plot for data in '{ns.file.name}' -> '{ns.out}'...") 339 | plt.savefig(ns.out) 340 | print("DONE!") 341 | 342 | def describe_command(self, ns): 343 | results = pd.read_csv(ns.file) 344 | 345 | repetitions, resume = describe(results) 346 | 347 | print(f"Executed: {len(results)} cases") 348 | 349 | print(f"\twith {repetitions} repetitions \n") 350 | print(">>>>> Resume <<<<<") 351 | print(resume) 352 | print("") 353 | 354 | def benchmark_command(self, ns): 355 | if ns.step_size <= 0: 356 | self._parser.error(f"'step' must be > 0. Found {ns.step_size}") 357 | 358 | now = dt.datetime.now 359 | 360 | print( 361 | f"[{now()}] Starting benchmark for astroalign {aa.__version__}...") 362 | print("") 363 | results = benchmark( 364 | max_size=ns.max_size, min_size=ns.min_size, step_size=ns.step_size, 365 | stars=ns.stars, noise=ns.noise, seed=ns.seed, 366 | repeats=ns.repeats, n_jobs=ns.n_jobs, comb_number=ns.comb_number) 367 | 368 | repetitions, resume = describe(results) 369 | 370 | print(f"[{now()}] Executed: {len(results)} cases") 371 | 372 | print(f"\twith {repetitions} repetitions \n") 373 | print(">>>>> Resume <<<<<") 374 | print(resume) 375 | print("") 376 | 377 | results.to_csv(ns.out, index=False) 378 | print(f"[{now()}] Data stored in '{ns.out.name}'") 379 | 380 | @property 381 | def parser(self): 382 | return self._parser 383 | 384 | 385 | # ============================================================================= 386 | # MAIN 387 | # ============================================================================= 388 | 389 | if __name__ == "__main__": 390 | parser = CLI() 391 | parser.parse_and_run() 392 | -------------------------------------------------------------------------------- /tests/test_align.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | import unittest 24 | import numpy as np 25 | import astroalign as aa 26 | 27 | 28 | def gauss(shape=(11, 11), center=None, sx=2, sy=2): 29 | "Returns a Gaussian of given shape, normalized to 1." 30 | h, w = shape 31 | if center is None: 32 | center = ((h - 1) / 2.0, (w - 1) / 2.0) 33 | x0, y0 = center 34 | x, y = np.meshgrid(range(w), range(h)) 35 | krnl = np.exp(-0.5 * ((x - x0) ** 2 / sx ** 2 + (y - y0) ** 2 / sy ** 2)) 36 | krnl /= krnl.sum() 37 | return krnl 38 | 39 | 40 | def simulate_image_pair( 41 | shape=(512, 512), 42 | kshape=(10, 10), 43 | noise_level=500, 44 | gshape=(21, 21), 45 | gsigma=1.5, 46 | translation=(10, -20), 47 | rot_angle_deg=50.0, 48 | num_stars=1500, 49 | star_refx=None, 50 | star_refy=None, 51 | star_flux=None, 52 | ): 53 | from scipy import signal 54 | 55 | h, w = shape # image height and width 56 | kh, kw = kshape # kernel height and width 57 | psf = gauss(shape=gshape, sx=gsigma, sy=gsigma) 58 | # Transformation parameters 59 | x_offset, y_offset = translation 60 | 61 | rot_angle = rot_angle_deg * np.pi / 180.0 62 | 63 | big_r = 0.5 * np.sqrt(h ** 2 + w ** 2) + max(abs(x_offset), abs(y_offset)) 64 | 65 | image_ref = np.random.poisson(noise_level, size=(h + kh, w + kw)).astype( 66 | "float64" 67 | ) 68 | image = np.random.poisson(noise_level, size=(h + kh, w + kw)).astype( 69 | "float64" 70 | ) 71 | 72 | # x and y of stars in the ref frame (int's) 73 | if star_refx is None: 74 | star_refx = np.random.randint( 75 | low=int(-big_r) + w / 2, high=int(big_r) + w / 2, size=(num_stars,) 76 | ) 77 | if star_refy is None: 78 | star_refy = np.random.randint( 79 | low=int(-big_r) + h / 2, high=int(big_r) + h / 2, size=(num_stars,) 80 | ) 81 | # Fluxes of stars 82 | if star_flux is None: 83 | a, m = 0.8, 3.0 * image_ref.std() # This are Pareto dist coeff's 84 | star_flux = (1.0 + np.random.pareto(a, num_stars)) * m 85 | 86 | # inframe will contain the stars in the reference image 87 | inframe = [] 88 | ymax, xmax = image_ref.shape 89 | for x, y, f in zip(star_refx, star_refy, star_flux): 90 | if x > 0 and x < xmax and y > 0 and y < ymax: 91 | inframe.append((int(x), int(y), f)) 92 | ref_cols, ref_rows, ref_flux = np.array(inframe).astype(int).T 93 | 94 | image_ref[ref_rows, ref_cols] += ref_flux 95 | image_ref = signal.convolve2d(image_ref, psf, mode="same") 96 | image_ref = image_ref[kh // 2: -kh // 2, kw // 2: -kw // 2] 97 | # Adjust here the positions of rows and cols after cropping image 98 | ref_cols -= kw // 2 99 | ref_rows -= kh // 2 100 | 101 | newx, newy = [], [] 102 | for x, y in zip(star_refx, star_refy): 103 | x -= w / 2 104 | y -= h / 2 105 | xp = x * np.cos(rot_angle) - y * np.sin(rot_angle) + x_offset 106 | yp = x * np.sin(rot_angle) + y * np.cos(rot_angle) + y_offset 107 | xp += w / 2 108 | yp += h / 2 109 | newx.append(xp) 110 | newy.append(yp) 111 | # x and y of stars in the new frame (float's) 112 | star_newx = np.array(newx) 113 | star_newy = np.array(newy) 114 | 115 | inframe = [] 116 | ymax, xmax = image.shape 117 | for x, y, f in zip(star_newx, star_newy, star_flux): 118 | if x > 0 and x < xmax and y > 0 and y < xmax: 119 | inframe.append((int(x), int(y), f)) 120 | new_cols, new_rows, new_flux = np.array(inframe).astype(int).T 121 | 122 | image[new_rows, new_cols] += new_flux 123 | image = signal.convolve2d(image, psf, mode="same") 124 | image = image[kh // 2: -kh // 2, kw // 2: -kw // 2] 125 | # Adjust here the positions of rows and cols after cropping image 126 | new_cols -= kw // 2 127 | new_rows -= kh // 2 128 | 129 | star_ref_pos = np.array(list(zip(ref_cols, ref_rows))) 130 | star_new_pos = np.array(list(zip(new_cols, new_rows))) 131 | 132 | return image, image_ref, star_ref_pos, star_new_pos 133 | 134 | 135 | def simulate_image_single( 136 | shape=(512, 512), 137 | kshape=(10, 10), 138 | noise_level=500, 139 | gshape=(21, 21), 140 | gsigma=1.5, 141 | num_stars=1500, 142 | star_refx=None, 143 | star_refy=None, 144 | star_flux=None, 145 | ): 146 | from scipy import signal 147 | 148 | h, w = shape # image height and width 149 | kh, kw = kshape # kernel height and width 150 | psf = gauss(shape=gshape, sx=gsigma, sy=gsigma) 151 | 152 | big_r = 0.5 * np.sqrt(h ** 2 + w ** 2) 153 | 154 | # Sky background 155 | image = np.random.poisson(noise_level, size=(h + kh, w + kw)).astype( 156 | "float64" 157 | ) 158 | 159 | # x and y of stars in the ref frame (int's) 160 | if star_refx is None: 161 | star_refx = np.random.randint( 162 | low=int(-big_r) + w / 2, high=int(big_r) + w / 2, size=(num_stars,) 163 | ) 164 | if star_refy is None: 165 | star_refy = np.random.randint( 166 | low=int(-big_r) + h / 2, high=int(big_r) + h / 2, size=(num_stars,) 167 | ) 168 | # Fluxes of stars 169 | if star_flux is None: 170 | a, m = 0.8, 3.0 * image.std() # This are Pareto dist coeff's 171 | star_flux = (1.0 + np.random.pareto(a, num_stars)) * m 172 | 173 | # inframe will contain the stars in the reference image 174 | inframe = [] 175 | ymax, xmax = image.shape 176 | for x, y, f in zip(star_refx, star_refy, star_flux): 177 | if x > 0 and x < xmax and y > 0 and y < ymax: 178 | inframe.append((int(x), int(y), f)) 179 | cols, rows, flux = np.array(inframe).astype(int).T 180 | 181 | image[rows, cols] += flux 182 | image = signal.convolve2d(image, psf, mode="same") 183 | image = image[kh // 2: -kh // 2, kw // 2: -kw // 2] 184 | # Adjust here the positions of rows and cols after cropping image 185 | cols -= kw // 2 186 | rows -= kh // 2 187 | 188 | star_pos = np.array(list(zip(cols, rows))) 189 | 190 | return image, star_pos 191 | 192 | 193 | class TestAlign(unittest.TestCase): 194 | def setUp(self): 195 | self.h = 512 # image height 196 | self.w = 512 # image width 197 | self.x_offset = 10 198 | self.y_offset = -20 199 | self.rot_angle = 50.0 * np.pi / 180.0 200 | ( 201 | self.image, 202 | self.image_ref, 203 | self.star_ref_pos, 204 | self.star_new_pos, 205 | ) = simulate_image_pair( 206 | shape=(self.h, self.w), 207 | translation=(self.x_offset, self.y_offset), 208 | rot_angle_deg=50.0, 209 | ) 210 | 211 | def test_find_transform_givensources(self): 212 | from skimage.transform import estimate_transform, matrix_transform 213 | 214 | source = np.array( 215 | [ 216 | [1.4, 2.2], 217 | [5.3, 1.0], 218 | [3.7, 1.5], 219 | [10.1, 9.6], 220 | [1.3, 10.2], 221 | [7.1, 2.0], 222 | ] 223 | ) 224 | nsrc = source.shape[0] 225 | scale = 1.5 # scaling parameter 226 | alpha = np.pi / 8.0 # rotation angle 227 | mm = scale * np.array( 228 | [[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]] 229 | ) 230 | tx, ty = 2.0, 1.0 # translation parameters 231 | transl = np.array([nsrc * [tx], nsrc * [ty]]) 232 | dest = (mm.dot(source.T) + transl).T 233 | t_true = estimate_transform("similarity", source, dest) 234 | 235 | # disorder dest points so they don't match the order of source 236 | np.random.shuffle(dest) 237 | 238 | t, (src_pts, dst_pts) = aa.find_transform(source, dest) 239 | self.assertLess(t_true.scale - t.scale, 1e-10) 240 | self.assertLess(t_true.rotation - t.rotation, 1e-10) 241 | self.assertLess( 242 | np.linalg.norm(t_true.translation - t.translation), 1e-10 243 | ) 244 | self.assertEqual(src_pts.shape[0], dst_pts.shape[0]) 245 | self.assertEqual(src_pts.shape[1], 2) 246 | self.assertEqual(dst_pts.shape[1], 2) 247 | dst_pts_test = matrix_transform(src_pts, t.params) 248 | self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1e-10) 249 | 250 | def test_register(self): 251 | def compare_image(the_image): 252 | """Return the fraction of sources found in the reference image""" 253 | # pixel comparison is not good, doesn't work. Compare catalogs. 254 | full_algn = the_image.astype("float32") 255 | import sep 256 | 257 | bkg = sep.Background(full_algn) 258 | thresh = 3.0 * bkg.globalrms 259 | allobjs = sep.extract(full_algn - bkg.back(), thresh) 260 | allxy = np.array([[obj["x"], obj["y"]] for obj in allobjs]) 261 | 262 | from scipy.spatial import KDTree 263 | 264 | ref_coordtree = KDTree(self.star_ref_pos) 265 | 266 | # Compare here srcs list with self.star_ref_pos 267 | num_sources = 0 268 | for asrc in allxy: 269 | found_source = ref_coordtree.query_ball_point(asrc, 3) 270 | if found_source: 271 | num_sources += 1 272 | fraction_found = float(num_sources) / float(len(allxy)) 273 | return fraction_found 274 | 275 | registered_img, footp = aa.register( 276 | source=self.image, target=self.image_ref 277 | ) 278 | self.assertIs(type(registered_img), np.ndarray) 279 | self.assertIs(type(footp), np.ndarray) 280 | self.assertIs(footp.dtype, np.dtype("bool")) 281 | fraction = compare_image(registered_img) 282 | self.assertGreater(fraction, 0.85) 283 | 284 | def test_register_nddata(self): 285 | from astropy.nddata import NDData 286 | from skimage.transform import SimilarityTransform 287 | 288 | transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) 289 | 290 | nd = NDData( 291 | [[0.0, 1.0], [2.0, 3.0]], mask=[[True, False], [False, False]] 292 | ) 293 | registered_img, footp = aa.apply_transform( 294 | transf, nd, nd, propagate_mask=True 295 | ) 296 | err = np.linalg.norm( 297 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 298 | ) 299 | self.assertLess(err, 1e-6) 300 | err_mask = footp == np.array([[False, True], [False, False]]) 301 | self.assertTrue(all(err_mask.flatten())) 302 | 303 | # Test now if there is no assigned mask during creation 304 | nd = NDData([[0.0, 1.0], [2.0, 3.0]]) 305 | registered_img, footp = aa.apply_transform( 306 | transf, nd, nd, propagate_mask=True 307 | ) 308 | err = np.linalg.norm( 309 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 310 | ) 311 | self.assertLess(err, 1e-6) 312 | err_mask = footp == np.array([[False, False], [False, False]]) 313 | self.assertTrue(all(err_mask.flatten())) 314 | 315 | def test_register_ccddata(self): 316 | from ccdproc import CCDData 317 | from skimage.transform import SimilarityTransform 318 | 319 | transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) 320 | 321 | cd = CCDData( 322 | [[0.0, 1.0], [2.0, 3.0]], 323 | mask=[[True, False], [False, False]], 324 | unit="adu", 325 | ) 326 | registered_img, footp = aa.apply_transform( 327 | transf, cd, cd, propagate_mask=True 328 | ) 329 | err = np.linalg.norm( 330 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 331 | ) 332 | self.assertLess(err, 1e-6) 333 | err_mask = footp == np.array([[False, True], [False, False]]) 334 | self.assertTrue(all(err_mask.flatten())) 335 | 336 | cd = CCDData([[0.0, 1.0], [2.0, 3.0]], unit="adu") 337 | registered_img, footp = aa.apply_transform( 338 | transf, cd, cd, propagate_mask=True 339 | ) 340 | err = np.linalg.norm( 341 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 342 | ) 343 | self.assertLess(err, 1e-6) 344 | err_mask = footp == np.array([[False, False], [False, False]]) 345 | self.assertTrue(all(err_mask.flatten())) 346 | 347 | def test_register_npma(self): 348 | from skimage.transform import SimilarityTransform 349 | 350 | transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) 351 | nparr = np.array([[0.0, 1.0], [2.0, 3.0]]) 352 | mask = [[True, False], [False, False]] 353 | 354 | ma = np.ma.array(nparr, mask=mask) 355 | registered_img, footp = aa.apply_transform( 356 | transf, ma, ma, propagate_mask=True 357 | ) 358 | err = np.linalg.norm( 359 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 360 | ) 361 | self.assertLess(err, 1e-6) 362 | err_mask = footp == np.array([[False, True], [False, False]]) 363 | self.assertTrue(all(err_mask.flatten())) 364 | 365 | ma = np.ma.array(nparr) 366 | registered_img, footp = aa.apply_transform( 367 | transf, ma, ma, propagate_mask=True 368 | ) 369 | err = np.linalg.norm( 370 | registered_img - np.array([[2.0, 0.0], [3.0, 1.0]]) 371 | ) 372 | self.assertLess(err, 1e-6) 373 | err_mask = footp == np.array([[False, False], [False, False]]) 374 | self.assertTrue(all(err_mask.flatten())) 375 | 376 | def test_fill_value(self): 377 | registered_img, footp = aa.register( 378 | source=self.image, target=self.image_ref, fill_value=-9999.99 379 | ) 380 | self.assertTrue(all(registered_img[footp] == -9999.99)) 381 | self.assertTrue(all(registered_img[~footp] != -9999.99)) 382 | 383 | def test_find_sources(self): 384 | srcs = aa._find_sources(self.image_ref) 385 | 386 | from scipy.spatial import KDTree 387 | 388 | ref_coordtree = KDTree(self.star_ref_pos) 389 | 390 | # Compare here srcs list with self.star_ref_pos 391 | num_sources = 0 392 | for asrc in srcs: 393 | found_source = ref_coordtree.query_ball_point(asrc, 3) 394 | if found_source: 395 | num_sources += 1 396 | fraction_found = float(num_sources) / float(len(srcs)) 397 | self.assertGreater(fraction_found, 0.85) 398 | 399 | def test_dtypes(self): 400 | # aa.register(self.image.astype('float16'), self.image_ref) 401 | aa.register(self.image.astype("float32"), self.image_ref) 402 | aa.register(self.image.astype("float64"), self.image_ref) 403 | aa.register(self.image.astype("int32"), self.image_ref) 404 | aa.register(self.image.astype("int64"), self.image_ref) 405 | 406 | def test_consistent_invert(self): 407 | t, __ = aa.find_transform(self.image, self.image_ref) 408 | tinv, __ = aa.find_transform(self.image_ref, self.image) 409 | rpoint = np.random.rand(3) * self.h 410 | rpoint[2] = 1.0 411 | rtransf = tinv.params.dot(t.params.dot(rpoint)) 412 | err = np.linalg.norm(rpoint - rtransf) / np.linalg.norm(rpoint) 413 | self.assertLess(err, 1e-2) 414 | 415 | def test_unrepeated_sources(self): 416 | source = np.array( 417 | [[0.0, 2.0], [1.0, 3.0], [2.1, 1.75], [3.5, 1.0], [4.0, 2.0]] 418 | ) 419 | R = np.array( 420 | [ 421 | [np.cos(30.0 * np.pi / 180), np.sin(30.0 * np.pi / 180)], 422 | [-np.sin(30.0 * np.pi / 180), np.cos(30.0 * np.pi / 180)], 423 | ] 424 | ) 425 | tr = np.array([-0.5, 2.5]) 426 | target = R.dot(source.T).T + tr 427 | best_t, (s_list, t_list) = aa.find_transform(source, target) 428 | self.assertEqual(len(s_list), len(t_list)) 429 | self.assertLessEqual(len(s_list), len(source)) 430 | # Assert no repeated sources used 431 | source_set = set((x, y) for x, y in s_list) 432 | self.assertEqual(len(s_list), len(source_set)) 433 | # Assert no repeated targets used 434 | target_set = set((x, y) for x, y in t_list) 435 | self.assertEqual(len(t_list), len(target_set)) 436 | # Assert s_list is a subset of source 437 | self.assertTrue(source_set <= set((x, y) for x, y in source)) 438 | # Assert t_list is a subset of target 439 | self.assertTrue(target_set <= set((x, y) for x, y in target)) 440 | 441 | 442 | class TestFewSources(unittest.TestCase): 443 | def setUp(self): 444 | self.h = 512 # image height 445 | self.w = 512 # image width 446 | self.x_offset = 10 447 | self.y_offset = -20 448 | self.rot_angle = 50.0 * np.pi / 180.0 449 | 450 | def check_if_ok(self, numstars): 451 | "Helper function with common test code for 3, 4, 5, and 6 stars" 452 | from skimage.transform import estimate_transform, matrix_transform 453 | 454 | if numstars > 6: 455 | raise NotImplementedError 456 | 457 | # x and y of stars in the ref frame (int's) 458 | self.star_refx = np.array([100, 120, 400, 400, 200, 200])[:numstars] 459 | self.star_refy = np.array([150, 200, 200, 320, 210, 350])[:numstars] 460 | self.num_stars = numstars 461 | # Fluxes of stars 462 | self.star_f = np.array(numstars * [700.0]) 463 | 464 | ( 465 | self.image, 466 | self.image_ref, 467 | self.star_ref_pos, 468 | self.star_new_pos, 469 | ) = simulate_image_pair( 470 | shape=(self.h, self.w), 471 | translation=(self.x_offset, self.y_offset), 472 | rot_angle_deg=50.0, 473 | num_stars=self.num_stars, 474 | star_refx=self.star_refx, 475 | star_refy=self.star_refy, 476 | star_flux=self.star_f, 477 | ) 478 | 479 | source = self.star_ref_pos 480 | dest = self.star_new_pos.copy() 481 | t_true = estimate_transform("similarity", source, dest) 482 | 483 | # disorder dest points so they don't match the order of source 484 | np.random.shuffle(dest) 485 | 486 | t, (src_pts, dst_pts) = aa.find_transform(source, dest) 487 | self.assertLess(t_true.scale - t.scale, 1e-10) 488 | self.assertLess(t_true.rotation - t.rotation, 1e-10) 489 | self.assertLess( 490 | np.linalg.norm(t_true.translation - t.translation), 1.0 491 | ) 492 | self.assertEqual(src_pts.shape[0], dst_pts.shape[0]) 493 | self.assertLessEqual(src_pts.shape[0], source.shape[0]) 494 | self.assertEqual(src_pts.shape[1], 2) 495 | self.assertEqual(dst_pts.shape[1], 2) 496 | dst_pts_test = matrix_transform(src_pts, t.params) 497 | self.assertLess(np.linalg.norm(dst_pts_test - dst_pts), 1.0) 498 | 499 | def test_find_transform_twosources(self): 500 | with self.assertRaises(Exception): 501 | self.check_if_ok(2) 502 | 503 | def test_find_transform_threesources(self): 504 | self.check_if_ok(3) 505 | 506 | def test_find_transform_foursources(self): 507 | self.check_if_ok(4) 508 | 509 | def test_find_transform_fivesources(self): 510 | self.check_if_ok(5) 511 | 512 | def test_find_transform_sixsources(self): 513 | self.check_if_ok(6) 514 | 515 | # def test_register(self): 516 | # ... 517 | # def test_consistent_invert(self): 518 | # ... 519 | 520 | 521 | if __name__ == "__main__": 522 | unittest.main() 523 | -------------------------------------------------------------------------------- /astroalign.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | # Copyright (c) 2016-2019 Martin Beroiz 4 | 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | """ 25 | ASTROALIGN is a simple package that will try to align two stellar astronomical 26 | images, especially when there is no WCS information available. 27 | 28 | It does so by finding similar 3-point asterisms (triangles) in both images and 29 | deducing the affine transformation between them. 30 | 31 | General registration routines try to match feature points, using corner 32 | detection routines to make the point correspondence. 33 | These generally fail for stellar astronomical images, since stars have very 34 | little stable structure and so, in general, indistinguishable from each other. 35 | 36 | Asterism matching is more robust, and closer to the human way of matching 37 | stellar images. 38 | 39 | Astroalign can match images of very different field of view, point-spread 40 | functions, seeing and atmospheric conditions. 41 | 42 | (c) Martin Beroiz 43 | """ 44 | 45 | 46 | __version__ = "2.0.2" 47 | 48 | __all__ = [ 49 | "MAX_CONTROL_POINTS", 50 | "MIN_MATCHES_FRACTION", 51 | "MaxIterError", 52 | "NUM_NEAREST_NEIGHBORS", 53 | "PIXEL_TOL", 54 | "apply_transform", 55 | "estimate_transform", 56 | "find_transform", 57 | "matrix_transform", 58 | "register", 59 | ] 60 | 61 | 62 | import numpy as _np 63 | 64 | from skimage.transform import estimate_transform 65 | from skimage.transform import matrix_transform # noqa 66 | 67 | 68 | MAX_CONTROL_POINTS = 50 69 | """The maximum control points (stars) to use to build the invariants. 70 | 71 | Default: 50""" 72 | 73 | PIXEL_TOL = 2 74 | """The pixel distance tolerance to assume two invariant points are the same. 75 | 76 | Default: 2""" 77 | 78 | MIN_MATCHES_FRACTION = 0.8 79 | """The minimum fraction of triangle matches to accept a transformation. 80 | 81 | If the minimum fraction yields more than 10 triangles, 10 is used instead. 82 | 83 | Default: 0.8 84 | """ 85 | 86 | NUM_NEAREST_NEIGHBORS = 5 87 | """ 88 | The number of nearest neighbors of a given star (including itself) to construct 89 | the triangle invariants. 90 | 91 | Default: 5 92 | """ 93 | 94 | 95 | def _invariantfeatures(x1, x2, x3): 96 | "Given 3 points x1, x2, x3, return the invariant features for the set." 97 | sides = _np.sort( 98 | [ 99 | _np.linalg.norm(x1 - x2), 100 | _np.linalg.norm(x2 - x3), 101 | _np.linalg.norm(x1 - x3), 102 | ] 103 | ) 104 | return [sides[2] / sides[1], sides[1] / sides[0]] 105 | 106 | 107 | def _arrangetriplet(sources, vertex_indices): 108 | """Return vertex_indices ordered in an (a, b, c) form where: 109 | a is the vertex defined by L1 & L2 110 | b is the vertex defined by L2 & L3 111 | c is the vertex defined by L3 & L1 112 | and L1 < L2 < L3 are the sides of the triangle defined by vertex_indices.""" 113 | ind1, ind2, ind3 = vertex_indices 114 | x1, x2, x3 = sources[vertex_indices] 115 | 116 | side_ind = _np.array([(ind1, ind2), (ind2, ind3), (ind3, ind1)]) 117 | side_lengths = list(map(_np.linalg.norm, (x1 - x2, x2 - x3, x3 - x1))) 118 | l1_ind, l2_ind, l3_ind = _np.argsort(side_lengths) 119 | 120 | # the most common vertex in the list of vertices for two sides is the 121 | # point at which they meet. 122 | from collections import Counter 123 | 124 | count = Counter(side_ind[[l1_ind, l2_ind]].flatten()) 125 | a = count.most_common(1)[0][0] 126 | count = Counter(side_ind[[l2_ind, l3_ind]].flatten()) 127 | b = count.most_common(1)[0][0] 128 | count = Counter(side_ind[[l3_ind, l1_ind]].flatten()) 129 | c = count.most_common(1)[0][0] 130 | 131 | return _np.array([a, b, c]) 132 | 133 | 134 | def _generate_invariants(sources): 135 | """Return an array of (unique) invariants derived from the array `sources`. 136 | Return an array of the indices of `sources` that correspond to each invariant, 137 | arranged as described in _arrangetriplet. 138 | """ 139 | from scipy.spatial import KDTree 140 | from itertools import combinations 141 | from functools import partial 142 | 143 | arrange = partial(_arrangetriplet, sources=sources) 144 | 145 | inv = [] 146 | triang_vrtx = [] 147 | coordtree = KDTree(sources) 148 | # The number of nearest neighbors to request (to work with few sources) 149 | knn = min(len(sources), NUM_NEAREST_NEIGHBORS) 150 | for asrc in sources: 151 | __, indx = coordtree.query(asrc, knn) 152 | 153 | # Generate all possible triangles with the 5 indx provided, and store 154 | # them with the order (a, b, c) defined in _arrangetriplet 155 | all_asterism_triang = [ 156 | arrange(vertex_indices=list(cmb)) for cmb in combinations(indx, 3) 157 | ] 158 | triang_vrtx.extend(all_asterism_triang) 159 | 160 | inv.extend( 161 | [ 162 | _invariantfeatures(*sources[triplet]) 163 | for triplet in all_asterism_triang 164 | ] 165 | ) 166 | 167 | # Remove here all possible duplicate triangles 168 | uniq_ind = [ 169 | pos for (pos, elem) in enumerate(inv) if elem not in inv[pos + 1:] 170 | ] 171 | inv_uniq = _np.array(inv)[uniq_ind] 172 | triang_vrtx_uniq = _np.array(triang_vrtx)[uniq_ind] 173 | 174 | return inv_uniq, triang_vrtx_uniq 175 | 176 | 177 | class _MatchTransform: 178 | def __init__(self, source, target): 179 | self.source = source 180 | self.target = target 181 | 182 | def fit(self, data): 183 | """ 184 | Return the best 2D similarity transform from the points given in data. 185 | 186 | data: N sets of similar corresponding triangles. 187 | 3 indices for a triangle in ref 188 | and the 3 indices for the corresponding triangle in target; 189 | arranged in a (N, 3, 2) array. 190 | """ 191 | d1, d2, d3 = data.shape 192 | s, d = data.reshape(d1 * d2, d3).T 193 | approx_t = estimate_transform( 194 | "similarity", self.source[s], self.target[d] 195 | ) 196 | return approx_t 197 | 198 | def get_error(self, data, approx_t): 199 | d1, d2, d3 = data.shape 200 | s, d = data.reshape(d1 * d2, d3).T 201 | resid = approx_t.residuals(self.source[s], self.target[d]).reshape( 202 | d1, d2 203 | ) 204 | error = resid.max(axis=1) 205 | return error 206 | 207 | 208 | def find_transform(source, target): 209 | """Estimate the transform between ``source`` and ``target``. 210 | 211 | Return a SimilarityTransform object ``T`` that maps pixel x, y indices from 212 | the source image s = (x, y) into the target (destination) image t = (x, y). 213 | T contains parameters of the tranformation: ``T.rotation``, 214 | ``T.translation``, ``T.scale``, ``T.params``. 215 | 216 | Args: 217 | source (array-like): Either a numpy array of the source image to be 218 | transformed or an interable of (x, y) coordinates of the target 219 | control points. 220 | target (array-like): Either a numpy array of the target (destination) 221 | image or an interable of (x, y) coordinates of the target 222 | control points. 223 | 224 | Returns: 225 | The transformation object and a tuple of corresponding star positions 226 | in source and target.:: 227 | 228 | T, (source_pos_array, target_pos_array) 229 | 230 | Raises: 231 | TypeError: If input type of ``source`` or ``target`` is not supported. 232 | ValueError: If it cannot find more than 3 stars on any input. 233 | """ 234 | from scipy.spatial import KDTree 235 | 236 | try: 237 | if len(source[0]) == 2: 238 | # Assume it's a list of (x, y) pairs 239 | source_controlp = _np.array(source)[:MAX_CONTROL_POINTS] 240 | else: 241 | # Assume it's a 2D image 242 | source_controlp = _find_sources(source)[:MAX_CONTROL_POINTS] 243 | except Exception: 244 | raise TypeError("Input type for source not supported.") 245 | 246 | try: 247 | if len(target[0]) == 2: 248 | # Assume it's a list of (x, y) pairs 249 | target_controlp = _np.array(target)[:MAX_CONTROL_POINTS] 250 | else: 251 | # Assume it's a 2D image 252 | target_controlp = _find_sources(target)[:MAX_CONTROL_POINTS] 253 | except Exception: 254 | raise TypeError("Input type for target not supported.") 255 | 256 | # Check for low number of reference points 257 | if len(source_controlp) < 3: 258 | raise ValueError( 259 | "Reference stars in source image are less than the " 260 | "minimum value (3)." 261 | ) 262 | if len(target_controlp) < 3: 263 | raise ValueError( 264 | "Reference stars in target image are less than the " 265 | "minimum value (3)." 266 | ) 267 | 268 | source_invariants, source_asterisms = _generate_invariants(source_controlp) 269 | source_invariant_tree = KDTree(source_invariants) 270 | 271 | target_invariants, target_asterisms = _generate_invariants(target_controlp) 272 | target_invariant_tree = KDTree(target_invariants) 273 | 274 | # r = 0.1 is the maximum search distance, 0.1 is an empirical value that 275 | # returns about the same number of matches than inputs 276 | # matches_list is a list of lists such that for each element 277 | # source_invariant_tree.data[i], matches_list[i] is a list of the indices 278 | # of its neighbors in target_invariant_tree.data 279 | matches_list = source_invariant_tree.query_ball_tree( 280 | target_invariant_tree, r=0.1 281 | ) 282 | 283 | # matches unravels the previous list of matches into pairs of source and 284 | # target control point matches. 285 | # matches is a (N, 3, 2) array. N sets of similar corresponding triangles. 286 | # 3 indices for a triangle in ref 287 | # and the 3 indices for the corresponding triangle in target; 288 | matches = [] 289 | # t1 is an asterism in source, t2 in target 290 | for t1, t2_list in zip(source_asterisms, matches_list): 291 | for t2 in target_asterisms[t2_list]: 292 | matches.append(list(zip(t1, t2))) 293 | matches = _np.array(matches) 294 | 295 | inv_model = _MatchTransform(source_controlp, target_controlp) 296 | n_invariants = len(matches) 297 | max_iter = n_invariants 298 | # Set the minimum matches to be between 1 and 10 asterisms 299 | min_matches = max(1, min(10, int(n_invariants * MIN_MATCHES_FRACTION))) 300 | if (len(source_controlp) == 3 or len(target_controlp) == 3) and len( 301 | matches 302 | ) == 1: 303 | best_t = inv_model.fit(matches) 304 | inlier_ind = _np.arange(len(matches)) # All of the indices 305 | else: 306 | best_t, inlier_ind = _ransac( 307 | matches, inv_model, 1, max_iter, PIXEL_TOL, min_matches 308 | ) 309 | triangle_inliers = matches[inlier_ind] 310 | d1, d2, d3 = triangle_inliers.shape 311 | inl_arr = triangle_inliers.reshape(d1 * d2, d3) 312 | inl_unique = set(tuple(pair) for pair in inl_arr) 313 | # In the next, multiple assignements to the same source point s are removed 314 | # We keep the pair (s, t) with the lowest reprojection error. 315 | inl_dict = {} 316 | for s_i, t_i in inl_unique: 317 | # calculate error 318 | s_vertex = source_controlp[s_i] 319 | t_vertex = target_controlp[t_i] 320 | s_vertex_expanded = _np.append(s_vertex, [1]).reshape(3, 1) 321 | s_vertex_pred = best_t.params.dot(s_vertex_expanded)[:2].reshape(-1) 322 | error = _np.linalg.norm(s_vertex_pred - t_vertex) 323 | 324 | # if s_i not in dict, or if its error is smaller than previous error 325 | if s_i not in inl_dict or (error < inl_dict[s_i][1]): 326 | inl_dict[s_i] = (t_i, error) 327 | inl_arr_unique = _np.array( 328 | [[s_i, t_i] for s_i, (t_i, e) in inl_dict.items()] 329 | ) 330 | s, d = inl_arr_unique.T 331 | 332 | return best_t, (source_controlp[s], target_controlp[d]) 333 | 334 | 335 | def apply_transform( 336 | transform, source, target, fill_value=None, propagate_mask=False 337 | ): 338 | """Applies the transformation ``transform`` to ``source``. 339 | 340 | The output image will have the same shape as ``target``. 341 | 342 | Args: 343 | transform: A scikit-image ``SimilarityTransform`` object. 344 | source (numpy array): A 2D numpy array of the source image to be 345 | transformed. 346 | target (numpy array): A 2D numpy array of the target image. Only used 347 | to set the output image shape. 348 | fill_value (float): A value to fill in the areas of aligned_image 349 | where footprint == True. 350 | propagate_mask (bool): Wether to propagate the mask in source.mask 351 | onto footprint. 352 | 353 | Return: 354 | A tuple (aligned_image, footprint). 355 | aligned_image is a numpy 2D array of the transformed source 356 | footprint is a mask 2D array with True on the regions 357 | with no pixel information. 358 | """ 359 | from skimage.transform import warp 360 | 361 | if hasattr(source, "data") and isinstance(source.data, _np.ndarray): 362 | source_data = source.data 363 | else: 364 | source_data = source 365 | if hasattr(target, "data") and isinstance(target.data, _np.ndarray): 366 | target_data = target.data 367 | else: 368 | target_data = target 369 | 370 | aligned_image = warp( 371 | source_data, 372 | inverse_map=transform.inverse, 373 | output_shape=target_data.shape, 374 | order=3, 375 | mode="constant", 376 | cval=_np.median(source_data), 377 | clip=False, 378 | preserve_range=True, 379 | ) 380 | footprint = warp( 381 | _np.zeros(source_data.shape, dtype="float32"), 382 | inverse_map=transform.inverse, 383 | output_shape=target_data.shape, 384 | cval=1.0, 385 | ) 386 | footprint = footprint > 0.4 387 | 388 | if hasattr(source, "mask") and propagate_mask: 389 | source_mask = _np.array(source.mask) 390 | if source_mask.shape == source_data.shape: 391 | source_mask_rot = warp( 392 | source_mask.astype("float32"), 393 | inverse_map=transform.inverse, 394 | output_shape=target_data.shape, 395 | cval=1.0, 396 | ) 397 | source_mask_rot = source_mask_rot > 0.4 398 | footprint = footprint | source_mask_rot 399 | if fill_value is not None: 400 | aligned_image[footprint] = fill_value 401 | 402 | return aligned_image, footprint 403 | 404 | 405 | def register(source, target, fill_value=None, propagate_mask=False): 406 | """Transform ``source`` to coincide pixel to pixel with ``target``. 407 | 408 | Args: 409 | source (numpy array): A 2D numpy array of the source image to be 410 | transformed. 411 | target (numpy array): A 2D numpy array of the target image. Only used 412 | to set the output image shape. 413 | fill_value (float): A value to fill in the areas of aligned_image 414 | where footprint == True. 415 | propagate_mask (bool): Wether to propagate the mask in source.mask 416 | onto footprint. 417 | 418 | Return: 419 | A tuple (aligned_image, footprint). 420 | aligned_image is a numpy 2D array of the transformed source 421 | footprint is a mask 2D array with True on the regions 422 | with no pixel information. 423 | 424 | 425 | """ 426 | t, __ = find_transform(source=source, target=target) 427 | aligned_image, footprint = apply_transform( 428 | t, source, target, fill_value, propagate_mask 429 | ) 430 | return aligned_image, footprint 431 | 432 | 433 | def _find_sources(img): 434 | "Return sources (x, y) sorted by brightness." 435 | 436 | import sep 437 | 438 | if isinstance(img, _np.ma.MaskedArray): 439 | image = img.filled(fill_value=_np.median(img)).astype("float32") 440 | else: 441 | image = img.astype("float32") 442 | bkg = sep.Background(image) 443 | thresh = 3.0 * bkg.globalrms 444 | sources = sep.extract(image - bkg.back(), thresh) 445 | sources.sort(order="flux") 446 | return _np.array([[asrc["x"], asrc["y"]] for asrc in sources[::-1]]) 447 | 448 | 449 | # Copyright (c) 2004-2007, Andrew D. Straw. All rights reserved. 450 | 451 | # Redistribution and use in source and binary forms, with or without 452 | # modification, are permitted provided that the following conditions are 453 | # met: 454 | 455 | # * Redistributions of source code must retain the above copyright 456 | # notice, this list of conditions and the following disclaimer. 457 | 458 | # * Redistributions in binary form must reproduce the above 459 | # copyright notice, this list of conditions and the following 460 | # disclaimer in the documentation and/or other materials provided 461 | # with the distribution. 462 | 463 | # * Neither the name of the Andrew D. Straw nor the names of its 464 | # contributors may be used to endorse or promote products derived 465 | # from this software without specific prior written permission. 466 | 467 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 468 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 469 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 470 | # a PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 471 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 472 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 473 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 474 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 475 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 476 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 477 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 478 | # 479 | # 480 | # Modified by Martin Beroiz 481 | 482 | 483 | class MaxIterError(RuntimeError): 484 | pass 485 | 486 | 487 | def _ransac(data, model, min_data_points, max_iter, thresh, min_matches): 488 | """fit model parameters to data using the RANSAC algorithm 489 | 490 | This implementation written from pseudocode found at 491 | http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182 492 | 493 | Given: 494 | data: a set of data points 495 | model: a model that can be fitted to data points 496 | min_data_points: the minimum number of data values required to fit the 497 | model 498 | max_iter: the maximum number of iterations allowed in the algorithm 499 | thresh: a threshold value to determine when a data point fits a model 500 | min_matches: the min number of matches required to assert that a model 501 | fits well to data 502 | Return: 503 | bestfit: model parameters which best fit the data (or nil if no good model 504 | is found) 505 | """ 506 | iterations = 0 507 | bestfit = None 508 | best_inlier_idxs = None 509 | n_data = data.shape[0] 510 | n = min_data_points 511 | all_idxs = _np.arange(n_data) 512 | 513 | while iterations < max_iter: 514 | # Partition indices into two random subsets 515 | _np.random.shuffle(all_idxs) 516 | maybe_idxs, test_idxs = all_idxs[:n], all_idxs[n:] 517 | maybeinliers = data[maybe_idxs, :] 518 | test_points = data[test_idxs, :] 519 | maybemodel = model.fit(maybeinliers) 520 | test_err = model.get_error(test_points, maybemodel) 521 | # select indices of rows with accepted points 522 | also_idxs = test_idxs[test_err < thresh] 523 | alsoinliers = data[also_idxs, :] 524 | if len(alsoinliers) >= min_matches: 525 | betterdata = _np.concatenate((maybeinliers, alsoinliers)) 526 | bestfit = model.fit(betterdata) 527 | best_inlier_idxs = _np.concatenate((maybe_idxs, also_idxs)) 528 | break 529 | iterations += 1 530 | if bestfit is None: 531 | raise MaxIterError( 532 | "Max iterations exceeded while trying to find " 533 | "acceptable transformation." 534 | ) 535 | 536 | return bestfit, best_inlier_idxs 537 | --------------------------------------------------------------------------------