├── .DS_Store ├── .flake8 ├── .github └── workflows │ ├── publish-to-pypi.yml │ └── python-package.yml ├── .gitignore ├── .gitmodules ├── .readthedocs.yml ├── CHANGES.rst ├── CITATION.cff ├── LICENSE.txt ├── MANIFEST ├── README.rst ├── binder └── requirements.txt ├── docs ├── Makefile ├── _templates │ └── autosummary │ │ ├── base.rst │ │ ├── class.rst │ │ └── module.rst ├── conf.py ├── eazy │ ├── api.rst │ ├── filters.rst │ ├── install.rst │ ├── photoz.rst │ ├── templates.rst │ └── zout_columns.rst ├── examples │ ├── HDFN-demo.ipynb │ ├── Riverside-demo.ipynb │ ├── compare_sps_algorithm.ipynb │ └── dash_viewer.png ├── index.rst ├── make.bat └── rtd-pip-requirements ├── eazy ├── __init__.py ├── data │ ├── DLAcoeff.txt │ ├── LAFcoeff.txt │ ├── alpha_lyr_stis_008.fits │ ├── emlines_info.dat │ ├── spectra_kc13_12_tweak.params │ └── zphot.param.default ├── filters.py ├── hdf5.py ├── igm.py ├── param.py ├── photoz.py ├── sps.py ├── templates.py ├── tests │ ├── __init__.py │ ├── test_filters.py │ ├── test_igm.py │ ├── test_param.py │ ├── test_photoz.py │ ├── test_templates.py │ └── test_utils.py ├── utils.py └── visualization.py ├── pyproject.toml ├── requirements.txt ├── scripts ├── README.rst ├── nmf.py ├── photoz.py ├── run.py └── sps.py └── setup.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gbrammer/eazy-py/292172c331b67f34358e9ff2500fe09a82757635/.DS_Store -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | count = true 3 | select = E9,F63,F7,F82 4 | show-source = true 5 | statistics = true 6 | exclude = .git,build,docs,scripts,binder -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish-to-pypi 2 | 3 | on: 4 | release: 5 | types: [released] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | build_sdist: 10 | name: source distribution 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | - name: Build sdist 18 | run: | 19 | pip install build 20 | python -m build --sdist 21 | - uses: actions/upload-artifact@v4 22 | with: 23 | path: dist/*.tar.gz 24 | 25 | upload_pypi: 26 | name: upload to PyPI 27 | needs: [build_sdist] 28 | runs-on: ubuntu-latest 29 | if: github.event_name == 'push' && 30 | github.event.ref_type == 'tag' && 31 | github.ref == 'refs/heads/${{ github.event.repository.default_branch }}' 32 | steps: 33 | - uses: actions/download-artifact@v4.1.7 34 | with: 35 | name: artifact 36 | path: dist 37 | - uses: pypa/gh-action-pypi-publish@v1.5.1 38 | with: 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: CI 5 | 6 | on: 7 | push: 8 | branches: 9 | - master 10 | pull_request: 11 | 12 | env: 13 | # Dummy to make prospector work 14 | SPS_HOME: xxxxdummyxxxx 15 | 16 | jobs: 17 | build: 18 | runs-on: ${{ matrix.os }} 19 | strategy: 20 | matrix: 21 | python-version: [3.8, 3.9, '3.10'] 22 | os: [ubuntu-latest] 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python ${{ matrix.python-version }} 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | - name: Build check 30 | run: | 31 | pip install build twine 32 | python -m build 33 | python -m twine check --strict dist/* 34 | 35 | codestyle: 36 | runs-on: ${{ matrix.os }} 37 | strategy: 38 | matrix: 39 | python-version: ['3.9'] 40 | os: [ubuntu-latest] 41 | steps: 42 | - uses: actions/checkout@v3 43 | - name: Set up Python ${{ matrix.python-version }} 44 | uses: actions/setup-python@v3 45 | with: 46 | python-version: ${{ matrix.python-version }} 47 | - name: Install package 48 | run: | 49 | pip install flake8 50 | - name: Lint with flake8 51 | run: | 52 | flake8 . 53 | test: 54 | runs-on: ${{ matrix.os }} 55 | strategy: 56 | matrix: 57 | python-version: [3.9, '3.10', '3.11', '3.12'] 58 | os: [ubuntu-latest] 59 | steps: 60 | - uses: actions/checkout@v3 61 | - name: Set up Python ${{ matrix.python-version }} 62 | uses: actions/setup-python@v3 63 | with: 64 | python-version: ${{ matrix.python-version }} 65 | - name: Install package 66 | run: | 67 | pip install -e .[test] -r requirements.txt 68 | # - name: Test with pytest 69 | # run: | 70 | # pytest 71 | - name: Run tests with coverage 72 | run: | 73 | pip install pytest coverage 74 | python3 -c "import eazy; eazy.fetch_eazy_photoz()" 75 | coverage run -m pytest 76 | - name: Upload coverage reports to Codecov 77 | uses: codecov/codecov-action@v4.0.1 78 | with: 79 | token: ${{ secrets.CODECOV_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | eazy/version.py 2 | 3 | # now cloned by the pytest script 4 | eazy/data/eazy-photoz 5 | eazy/data/filters 6 | eazy/data/templates 7 | eazy/data/hdfn_fs99 8 | 9 | # created by pytest 10 | eazy/data/test.hdf5 11 | 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | Test/* 18 | eazy/data/eazy_test* 19 | eazy/data/zphot.translate.test* 20 | 21 | # C extensions 22 | *.so 23 | 24 | # Demo things 25 | docs/examples/inputs 26 | docs/examples/templates 27 | docs/examples/hdfn* 28 | docs/examples/FILTER* 29 | docs/examples/zphot* 30 | docs/examples/zfourge.uds* 31 | 32 | *DS_Store 33 | 34 | # Distribution / packaging 35 | .Python 36 | env/ 37 | build/ 38 | develop-eggs/ 39 | dist/ 40 | downloads/ 41 | eggs/ 42 | .eggs/ 43 | lib/ 44 | lib64/ 45 | parts/ 46 | sdist/ 47 | var/ 48 | *.egg-info/ 49 | .installed.cfg 50 | *.egg 51 | 52 | # PyInstaller 53 | # Usually these files are written by a python script from a template 54 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 55 | *.manifest 56 | *.spec 57 | 58 | # Installer logs 59 | pip-log.txt 60 | pip-delete-this-directory.txt 61 | 62 | # Unit test / coverage reports 63 | htmlcov/ 64 | .tox/ 65 | .coverage 66 | .coverage.* 67 | .cache 68 | nosetests.xml 69 | coverage.xml 70 | *,cover 71 | .hypothesis/ 72 | 73 | # Translations 74 | *.mo 75 | *.pot 76 | 77 | # Django stuff: 78 | *.log 79 | local_settings.py 80 | 81 | # Flask stuff: 82 | instance/ 83 | .webassets-cache 84 | 85 | # Scrapy stuff: 86 | .scrapy 87 | 88 | # Sphinx documentation 89 | docs/_build/ 90 | docs/api/ 91 | 92 | # PyBuilder 93 | target/ 94 | 95 | # IPython Notebook 96 | .ipynb_checkpoints 97 | 98 | # pyenv 99 | .python-version 100 | 101 | # celery beat schedule file 102 | celerybeat-schedule 103 | 104 | # dotenv 105 | .env 106 | 107 | # virtualenv 108 | venv/ 109 | ENV/ 110 | 111 | # Spyder project settings 112 | .spyderproject 113 | 114 | # Rope project settings 115 | .ropeproject 116 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gbrammer/eazy-py/292172c331b67f34358e9ff2500fe09a82757635/.gitmodules -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-20.04 11 | tools: 12 | python: "3.10" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: docs/conf.py 17 | 18 | # Optionally build your docs in additional formats such as PDF and ePub 19 | formats: 20 | - htmlzip 21 | 22 | python: 23 | install: 24 | - method: pip 25 | path: . 26 | extra_requirements: 27 | - docs 28 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | 0.6.2 2 | ----- 3 | 4 | - Minor updates for Github CI -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | title: eazy-py 3 | abstract: >- 4 | Pythonic photometric redshift tools based on EAZY (Brammer, van Dokkum & Coppi, 2008, 10.1086/591786). 5 | message: Please cite this software using these metadata. 6 | version: 0.5.2 7 | doi: 10.5281/zenodo.5012704 8 | date-released: 2021-06-22 9 | url: https://github.com/gbrammer/eazy-py 10 | publisher: Zenodo 11 | keywords: 12 | - Astronomy software (1855) 13 | type: software 14 | authors: 15 | - given-names: Gabriel 16 | family-names: Brammer 17 | email: gbrammer@gmail.com 18 | affiliation: >- 19 | Cosmic Dawn Center, Niels Bohr Institute, 20 | University of Copenhagen 21 | orcid: 'https://orcid.org/0000-0003-2680-0055' 22 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016-2022 Gabriel Brammer 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | # file GENERATED by distutils, do NOT edit 2 | setup.cfg 3 | setup.py 4 | eazy/__init__.py 5 | eazy/filters.py 6 | eazy/igm.py 7 | eazy/nmf.py 8 | eazy/param.py 9 | eazy/photoz.py 10 | eazy/run.py 11 | eazy/templates.py 12 | eazy/utils.py 13 | eazy/data/DLAcoeff.txt 14 | eazy/data/LAFcoeff.txt 15 | eazy/data/spectra_kc13_12_tweak.params 16 | eazy/data/zphot.param.default 17 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://github.com/gbrammer/eazy-py/actions/workflows/python-package.yml/badge.svg 2 | :target: https://github.com/gbrammer/eazy-py/actions 3 | 4 | .. image:: https://badge.fury.io/py/eazy.svg 5 | :target: https://badge.fury.io/py/eazy 6 | 7 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.5012705.svg 8 | :target: https://doi.org/10.5281/zenodo.5012705 9 | 10 | 11 | eazy-py: Pythonic photometric redshift tools based on EAZY 12 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 | 14 | Under heavy construction.... 15 | 16 | Documentation will be here: https://eazy-py.readthedocs.io/, though it's essentially just the module API for now. 17 | 18 | Templates and filter files still here: https://github.com/gbrammer/eazy-photoz/. 19 | 20 | .. note:: 21 | Please submit any questions/comments/problems you have through the `Issues `_ interface. 22 | 23 | Installation instructions 24 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | 26 | .. code:: bash 27 | 28 | $ pip install eazy 29 | $ pip install eazy[vistool] # for the dash visualiation tool 30 | 31 | # Install extra dependency 32 | $ pip install git+https://github.com/karllark/dust_attenuation.git 33 | 34 | # Get templates and filters from https://github.com/gbrammer/eazy-photoz 35 | $ python -c "import eazy; eazy.fetch_eazy_photoz()" 36 | 37 | Demo 38 | ~~~~ 39 | 40 | .. image:: https://colab.research.google.com/assets/colab-badge.svg 41 | :target: https://colab.research.google.com/github/gbrammer/eazy-py/blob/master/docs/examples/HDFN-demo.ipynb 42 | 43 | .. image:: https://mybinder.org/badge_logo.svg 44 | :target: https://mybinder.org/v2/gh/gbrammer/eazy-py/HEAD?filepath=docs%2Fexamples%2FHDFN-demo.ipynb 45 | 46 | 47 | Citation 48 | ~~~~~~~~ 49 | Please cite both this repository and `Brammer et al. (2008) `_. A BiBTeX for this repository can be generated via the *Cite this repository* link in the upper left corner of the `GitHub page `_. 50 | -------------------------------------------------------------------------------- /binder/requirements.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements.txt 2 | dash 3 | requests 4 | flask 5 | retrying 6 | ipython 7 | ipykernel 8 | ansi2html 9 | notebook 10 | jupyter_server_proxy 11 | pandas 12 | numpy 13 | jupyter-dash -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | #This is needed with git because git doesn't create a dir if it's empty 18 | $(shell [ -d "_static" ] || mkdir -p _static) 19 | 20 | help: 21 | @echo "Please use \`make ' where is one of" 22 | @echo " html to make standalone HTML files" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " singlehtml to make a single large HTML file" 25 | @echo " pickle to make pickle files" 26 | @echo " json to make JSON files" 27 | @echo " htmlhelp to make HTML files and a HTML help project" 28 | @echo " qthelp to make HTML files and a qthelp project" 29 | @echo " devhelp to make HTML files and a Devhelp project" 30 | @echo " epub to make an epub" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " text to make text files" 34 | @echo " man to make manual pages" 35 | @echo " changes to make an overview of all changed/added/deprecated items" 36 | @echo " linkcheck to check all external links for integrity" 37 | 38 | clean: 39 | -rm -rf $(BUILDDIR) 40 | -rm -rf api 41 | -rm -rf generated 42 | 43 | html: 44 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 45 | @echo 46 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 47 | 48 | dirhtml: 49 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 50 | @echo 51 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 52 | 53 | singlehtml: 54 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 55 | @echo 56 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 57 | 58 | pickle: 59 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 60 | @echo 61 | @echo "Build finished; now you can process the pickle files." 62 | 63 | json: 64 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 65 | @echo 66 | @echo "Build finished; now you can process the JSON files." 67 | 68 | htmlhelp: 69 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 70 | @echo 71 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 72 | ".hhp project file in $(BUILDDIR)/htmlhelp." 73 | 74 | qthelp: 75 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 76 | @echo 77 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 78 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 79 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Astropy.qhcp" 80 | @echo "To view the help file:" 81 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Astropy.qhc" 82 | 83 | devhelp: 84 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 85 | @echo 86 | @echo "Build finished." 87 | @echo "To view the help file:" 88 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Astropy" 89 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Astropy" 90 | @echo "# devhelp" 91 | 92 | epub: 93 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 94 | @echo 95 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 96 | 97 | latex: 98 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 99 | @echo 100 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 101 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 102 | "(use \`make latexpdf' here to do that automatically)." 103 | 104 | latexpdf: 105 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 106 | @echo "Running LaTeX files through pdflatex..." 107 | make -C $(BUILDDIR)/latex all-pdf 108 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 109 | 110 | text: 111 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 112 | @echo 113 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 114 | 115 | man: 116 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 117 | @echo 118 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 119 | 120 | changes: 121 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 122 | @echo 123 | @echo "The overview file is in $(BUILDDIR)/changes." 124 | 125 | linkcheck: 126 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 127 | @echo 128 | @echo "Link check complete; look for any errors in the above output " \ 129 | "or in $(BUILDDIR)/linkcheck/output.txt." 130 | 131 | doctest: 132 | @echo "Run 'python setup.py test' in the root directory to run doctests " \ 133 | @echo "in the documentation." 134 | -------------------------------------------------------------------------------- /docs/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/base.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/class.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/module.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 3 | # 4 | # Astropy documentation build configuration file. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this file. 9 | # 10 | # All configuration values have a default. Some values are defined in 11 | # the global Astropy configuration which is loaded here before anything else. 12 | # See astropy.sphinx.conf for which values are set there. 13 | 14 | # If extensions (or modules to document with autodoc) are in another directory, 15 | # add these directories to sys.path here. If the directory is relative to the 16 | # documentation root, use os.path.abspath to make it absolute, like shown here. 17 | # sys.path.insert(0, os.path.abspath('..')) 18 | # IMPORTANT: the above commented section was generated by sphinx-quickstart, but 19 | # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left 20 | # commented out with this explanation to make it clear why this should not be 21 | # done. If the sys.path entry above is added, when the astropy.sphinx.conf 22 | # import occurs, it will import the *source* version of astropy instead of the 23 | # version installed (if invoked as "make html" or directly with sphinx), or the 24 | # version in the build directory (if "python setup.py build_sphinx" is used). 25 | # Thus, any C-extensions that are needed to build the documentation will *not* 26 | # be accessible, and the documentation will not build correctly. 27 | 28 | import os 29 | import sys 30 | import datetime 31 | from importlib import import_module 32 | 33 | import toml 34 | 35 | try: 36 | from sphinx_astropy.conf.v1 import * # noqa 37 | except ImportError: 38 | print('ERROR: the documentation requires the sphinx-astropy package to be installed') 39 | sys.exit(1) 40 | 41 | # Get configuration information from setup.cfg 42 | # from configparser import ConfigParser 43 | # conf = ConfigParser() 44 | # conf.read([os.path.join(os.path.dirname(__file__), '..', 'pyproject.toml')]) 45 | # setup_cfg = dict(conf.items('metadata')) 46 | 47 | setup_cfg = toml.load("../pyproject.toml")["project"] 48 | 49 | # -- General configuration ---------------------------------------------------- 50 | 51 | # By default, highlight as Python 3. 52 | highlight_language = 'python3' 53 | 54 | # If your documentation needs a minimal Sphinx version, state it here. 55 | #needs_sphinx = '1.2' 56 | 57 | # To perform a Sphinx version check that needs to be more specific than 58 | # major.minor, call `check_sphinx_version("x.y.z")` here. 59 | # check_sphinx_version("1.2.1") 60 | 61 | # List of patterns, relative to source directory, that match files and 62 | # directories to ignore when looking for source files. 63 | exclude_patterns.append('_templates') 64 | 65 | # This is added to the end of RST files - a good place to put substitutions to 66 | # be used globally. 67 | rst_epilog += """ 68 | """ 69 | 70 | # -- Project information ------------------------------------------------------ 71 | 72 | # This does not *have* to match the package name, but typically does 73 | project = setup_cfg['name'] 74 | author = setup_cfg['authors'][0]['name'] 75 | copyright = '{0}, {1}'.format( 76 | datetime.datetime.now().year, author) 77 | 78 | # The version info for the project you're documenting, acts as replacement for 79 | # |version| and |release|, also used in various other places throughout the 80 | # built documents. 81 | 82 | import_module(setup_cfg['name']) 83 | package = sys.modules[setup_cfg['name']] 84 | 85 | # The short X.Y version. 86 | version = package.__version__.split('-', 1)[0] 87 | # The full version, including alpha/beta/rc tags. 88 | release = package.__version__ 89 | 90 | 91 | # -- Options for HTML output -------------------------------------------------- 92 | 93 | # A NOTE ON HTML THEMES 94 | # The global astropy configuration uses a custom theme, 'bootstrap-astropy', 95 | # which is installed along with astropy. A different theme can be used or 96 | # the options for this theme can be modified by overriding some of the 97 | # variables set in the global configuration. The variables set in the 98 | # global configuration are listed below, commented out. 99 | 100 | 101 | # Add any paths that contain custom themes here, relative to this directory. 102 | # To use a different custom theme, add the directory containing the theme. 103 | #html_theme_path = [] 104 | 105 | # The theme to use for HTML and HTML Help pages. See the documentation for 106 | # a list of builtin themes. To override the custom theme, set this to the 107 | # name of a builtin theme or the name of a custom theme in html_theme_path. 108 | #html_theme = None 109 | 110 | 111 | html_theme_options = { 112 | 'logotext1': 'eazy', # white, semi-bold 113 | 'logotext2': 'docs', # orange, light 114 | 'logotext3': '' # white, light 115 | } 116 | 117 | 118 | # Custom sidebar templates, maps document names to template names. 119 | #html_sidebars = {} 120 | 121 | # The name of an image file (relative to this directory) to place at the top 122 | # of the sidebar. 123 | #html_logo = '' 124 | 125 | # The name of an image file (within the static path) to use as favicon of the 126 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 127 | # pixels large. 128 | html_favicon = '' 129 | 130 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 131 | # using the given strftime format. 132 | #html_last_updated_fmt = '' 133 | 134 | # The name for this set of Sphinx documents. If None, it defaults to 135 | # " v documentation". 136 | html_title = '{0} v{1}'.format(project, release) 137 | 138 | # Output file base name for HTML help builder. 139 | htmlhelp_basename = project + 'doc' 140 | 141 | 142 | # -- Options for LaTeX output ------------------------------------------------- 143 | 144 | # Grouping the document tree into LaTeX files. List of tuples 145 | # (source start file, target name, title, author, documentclass [howto/manual]). 146 | latex_documents = [('index', project + '.tex', project + u' Documentation', 147 | author, 'manual')] 148 | 149 | 150 | # -- Options for manual page output ------------------------------------------- 151 | 152 | # One entry per manual page. List of tuples 153 | # (source start file, name, description, authors, manual section). 154 | man_pages = [('index', project.lower(), project + u' Documentation', 155 | [author], 1)] 156 | 157 | # -- Resolving issue number to links in changelog ----------------------------- 158 | github_issues_url = setup_cfg['urls']['Tracker'] 159 | 160 | # -- Turn on nitpicky mode for sphinx (to warn about references not found) ---- 161 | # 162 | # nitpicky = True 163 | # nitpick_ignore = [] 164 | # 165 | # Some warnings are impossible to suppress, and you can list specific references 166 | # that should be ignored in a nitpick-exceptions file which should be inside 167 | # the docs/ directory. The format of the file should be: 168 | # 169 | # 170 | # 171 | # for example: 172 | # 173 | # py:class astropy.io.votable.tree.Element 174 | # py:class astropy.io.votable.tree.SimpleElement 175 | # py:class astropy.io.votable.tree.SimpleElementWithContent 176 | # 177 | # Uncomment the following lines to enable the exceptions: 178 | # 179 | # for line in open('nitpick-exceptions'): 180 | # if line.strip() == "" or line.startswith("#"): 181 | # continue 182 | # dtype, target = line.split(None, 1) 183 | # target = target.strip() 184 | # nitpick_ignore.append((dtype, six.u(target))) -------------------------------------------------------------------------------- /docs/eazy/api.rst: -------------------------------------------------------------------------------- 1 | Additional API 2 | ~~~~~~~~~~~~~~ 3 | .. automodapi:: eazy.utils 4 | :no-inheritance-diagram: 5 | 6 | .. automodapi:: eazy.sps 7 | :no-inheritance-diagram: 8 | 9 | .. automodapi:: eazy.hdf5 10 | :no-inheritance-diagram: 11 | 12 | .. automodapi:: eazy.visualization 13 | :no-inheritance-diagram: 14 | 15 | 16 | -------------------------------------------------------------------------------- /docs/eazy/install.rst: -------------------------------------------------------------------------------- 1 | Installation instructions 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | Install with `pip` 5 | ================== 6 | .. code:: bash 7 | 8 | pip install eazy 9 | 10 | Install from the repository 11 | =========================== 12 | .. code:: bash 13 | 14 | ### [OPTIONAL!] create a fresh conda environment 15 | conda create -n eazy39 python=3.9 16 | conda activate eazy39 17 | 18 | cd /usr/local/share/python # or some other location 19 | 20 | ### Fetch the eazy-py repo 21 | git clone https://github.com/gbrammer/eazy-py.git 22 | 23 | ### Build the python code 24 | cd eazy-py 25 | pip install . -r requirements.txt 26 | 27 | ### Install and run the test suite, which also downloads the templates and 28 | ### filters from the eazy-photoz repository if necessary 29 | pip install .[test] -r requirements.txt 30 | pytest 31 | 32 | 33 | Binder Demo 34 | ~~~~~~~~~~~ 35 | .. image:: https://mybinder.org/badge_logo.svg 36 | :target: https://mybinder.org/v2/gh/gbrammer/eazy-py/HEAD?filepath=docs%2Fexamples%2FHDFN-demo.ipynb 37 | -------------------------------------------------------------------------------- /docs/eazy/photoz.rst: -------------------------------------------------------------------------------- 1 | Running photozs 2 | =============== 3 | 4 | Parameters 5 | ~~~~~~~~~~ 6 | 7 | Photometric catalog 8 | ~~~~~~~~~~~~~~~~~~~ 9 | 10 | Execution 11 | ~~~~~~~~~ 12 | 13 | API 14 | ~~~ 15 | 16 | .. automodapi:: eazy.param 17 | :no-inheritance-diagram: 18 | 19 | .. automodapi:: eazy.igm 20 | :no-inheritance-diagram: 21 | 22 | .. automodapi:: eazy.photoz 23 | :no-inheritance-diagram: 24 | -------------------------------------------------------------------------------- /docs/eazy/templates.rst: -------------------------------------------------------------------------------- 1 | .. _template-objects: 2 | 3 | Templates 4 | ========= 5 | 6 | The `eazy.templates.Template` object provides a number of tools for working with 7 | population synthesis templates, e.g., from `FSPS `_. 8 | 9 | Templates integrated through filter bandpasses 10 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 | 12 | Redshift-dependent templates 13 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 | 15 | Additional functionality 16 | ~~~~~~~~~~~~~~~~~~~~~~~~ 17 | Smoothing and resampling... 18 | 19 | The "Template Error Function" 20 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 21 | 22 | API 23 | ~~~ 24 | .. automodapi:: eazy.templates 25 | :no-inheritance-diagram: 26 | -------------------------------------------------------------------------------- /docs/eazy/zout_columns.rst: -------------------------------------------------------------------------------- 1 | Columns in ``{MAIN_OUTPUT_FILE}.zout.fits``. 2 | ============================================ 3 | 4 | +-------------------+-----------------------------------------------------+ 5 | | Column | Description | 6 | +===================+=====================================================+ 7 | | ``id`` | Object identifier | 8 | +-------------------+-----------------------------------------------------+ 9 | | ``ra`` | Right Ascension [deg] | 10 | +-------------------+-----------------------------------------------------+ 11 | | ``dec`` | Declination [deg] | 12 | +-------------------+-----------------------------------------------------+ 13 | | ``z_spec`` | Spectroscopic redshift (if available) | 14 | +-------------------+-----------------------------------------------------+ 15 | | ``nusefilt`` | Number of filters used for photo-z | 16 | +-------------------+-----------------------------------------------------+ 17 | | ``z_ml`` | Maximum Likelihood redshift ``max( pdf(z) ))``). | 18 | +-------------------+-----------------------------------------------------+ 19 | | ``z_ml_chi2`` | chi-squared at ``z==z_ml`` | 20 | | | | 21 | +-------------------+-----------------------------------------------------+ 22 | | ``z_ml_risk`` | ``R(z_ml)``, "Risk" parameter defined by | 23 | | | `Tanaka et al. (2017) `_ | 24 | | | , evaluated at ``z_ml`` | 25 | +-------------------+-----------------------------------------------------+ 26 | | ``lc_min`` | Minimum effective wavelength of valid filters [Å] | 27 | +-------------------+-----------------------------------------------------+ 28 | | ``lc_max`` | Maximum effective wavelength of valid filters [Å] | 29 | +-------------------+-----------------------------------------------------+ 30 | | ``numpeaks`` | Number of separate peaks identified in ``pdf(z)``, | 31 | | | optional. | 32 | +-------------------+-----------------------------------------------------+ 33 | | ``z_phot`` | ``z_ml`` or, optionally, a user-specified redshift | 34 | | | Parameters below evaluated at ``z_phot``. | 35 | +-------------------+-----------------------------------------------------+ 36 | | ``z_phot_chi2`` | chi-squared at ``z==z_phot`` | 37 | | | | 38 | +-------------------+-----------------------------------------------------+ 39 | | ``z_phot_risk`` | Risk evaluated at ``z_phot`` | 40 | +-------------------+-----------------------------------------------------+ 41 | | ``z_min_risk`` | Redshift where ``R(z)`` minimized | 42 | +-------------------+-----------------------------------------------------+ 43 | | ``min_risk`` | ``R(z_min_risk)`` | 44 | +-------------------+-----------------------------------------------------+ 45 | | | Redshift where chi-squared maximized, before adding | 46 | | ``z_chi2_noprior``| the (optional) prior | 47 | +-------------------+-----------------------------------------------------+ 48 | | ``chi2_noprior`` | ``chi2(z_chi2_noprior)`` | 49 | +-------------------+-----------------------------------------------------+ 50 | | ``z025`` | 2.5 percentile of ``pdf(z)`` (2-:math:`\sigma`) | 51 | +-------------------+-----------------------------------------------------+ 52 | | ``z160`` | 16 percentile of ``pdf(z)`` (1-:math:`\sigma`) | 53 | +-------------------+-----------------------------------------------------+ 54 | | ``z500`` | 50 percentile of ``pdf(z)`` | 55 | +-------------------+-----------------------------------------------------+ 56 | | ``z840`` | 84 percentile of ``pdf(z)`` (1-:math:`\sigma`) | 57 | +-------------------+-----------------------------------------------------+ 58 | | ``z975`` | 97.5 percentile of ``pdf(z)`` (2-:math:`\sigma`) | 59 | +-------------------+-----------------------------------------------------+ 60 | | ``restU`` | Rest-frame U-band flux (units of catalog fluxes) | 61 | +-------------------+-----------------------------------------------------+ 62 | | ``restU_err`` | Uncertainty | 63 | +-------------------+-----------------------------------------------------+ 64 | | ``restB`` | Rest-frame B-band flux (units of catalog fluxes) | 65 | +-------------------+-----------------------------------------------------+ 66 | | ``restB_err`` | Uncertainty | 67 | +-------------------+-----------------------------------------------------+ 68 | | ``restV`` | Rest-frame V-band flux (units of catalog fluxes) | 69 | +-------------------+-----------------------------------------------------+ 70 | | ``restV_err`` | Uncertainty | 71 | +-------------------+-----------------------------------------------------+ 72 | | ``restJ`` | Rest-frame J-band flux (units of catalog fluxes) | 73 | +-------------------+-----------------------------------------------------+ 74 | | ``restJ_err`` | Uncertainty | 75 | +-------------------+-----------------------------------------------------+ 76 | | ``dL`` | Luminosity distance at ``z_phot`` [``Mpc``] | 77 | +-------------------+-----------------------------------------------------+ 78 | | ``Lv`` | V-band luminosity [:math:`L_\odot`] | 79 | +-------------------+-----------------------------------------------------+ 80 | | ``MLv`` | Mass-to-light ratio in V-band | 81 | | | [:math:`M_\odot/L_\odot`] | 82 | +-------------------+-----------------------------------------------------+ 83 | | ``Av`` | Extinction in V-band [mag] | 84 | +-------------------+-----------------------------------------------------+ 85 | | ``mass`` | Stellar mass [:math:`M_\odot`] | 86 | +-------------------+-----------------------------------------------------+ 87 | | ``sfr`` | Star formation rate | 88 | | | [:math:`M_\odot\,\mathrm{yr}^{-1}`] | 89 | +-------------------+-----------------------------------------------------+ 90 | | ``LIR`` | Total 8--1000 µm luminosity [:math:`L_\odot`] | 91 | +-------------------+-----------------------------------------------------+ 92 | | ``energy_abs`` | Implied absorbed energy associated with :math:`A_V`,| 93 | | | [:math:`L_\odot`] | 94 | +-------------------+-----------------------------------------------------+ 95 | | ``massp`` | 2.5, 16, 50, 84, 97.5 Percentiles of stellar mass | 96 | +-------------------+-----------------------------------------------------+ 97 | | ``SFRp`` | Percentiles of SFR | 98 | +-------------------+-----------------------------------------------------+ 99 | | ``sSFRp`` | Percentiles of specific SFR (``SFR/mass``) | 100 | +-------------------+-----------------------------------------------------+ 101 | | ``LIRp`` | Percentiles of LIR | 102 | +-------------------+-----------------------------------------------------+ 103 | | ``line_flux_Ha`` | Template line flux, H-:math:`\alpha` [cgs] | 104 | +-------------------+-----------------------------------------------------+ 105 | | ``line_EW_Ha`` | Template line EW, H-:math:`\alpha` [Å] | 106 | +-------------------+-----------------------------------------------------+ 107 | | ``line_flux_O3`` | Template line flux, [OIII] | 108 | +-------------------+-----------------------------------------------------+ 109 | | ``line_EW_O3`` | Template line EW, [OIII] | 110 | +-------------------+-----------------------------------------------------+ 111 | | ``line_flux_Hb`` | Template line flux, H-:math:`\beta` | 112 | +-------------------+-----------------------------------------------------+ 113 | | ``line_EW_Hb`` | Template line EW, H-:math:`\beta` | 114 | +-------------------+-----------------------------------------------------+ 115 | | ``line_flux_O2`` | Template line flux, [OII] | 116 | +-------------------+-----------------------------------------------------+ 117 | | ``line_EW_O2`` | Template line EW, [OII] | 118 | +-------------------+-----------------------------------------------------+ 119 | | ``line_flux_Lya`` | Template line flux, Ly-:math:`\alpha` | 120 | +-------------------+-----------------------------------------------------+ 121 | | ``line_EW_Lya`` | Template line EW, Ly-:math:`\alpha` | 122 | +-------------------+-----------------------------------------------------+ 123 | -------------------------------------------------------------------------------- /docs/examples/dash_viewer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gbrammer/eazy-py/292172c331b67f34358e9ff2500fe09a82757635/docs/examples/dash_viewer.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | ********* 8 | eazy-py 9 | ********* 10 | 11 | Pythonic photometric redshift tools based on EAZY 12 | 13 | Under heavy construction.... 14 | 15 | Documentation will be here: https://eazy-py.readthedocs.io/, though it's essentially just the module API for now. 16 | 17 | Templates and filter files still here: https://github.com/gbrammer/eazy-photoz/. 18 | 19 | .. note:: 20 | Please submit any questions/comments/problems you have through the `Issues `_ interface. 21 | 22 | ~~~~~~~~~~~ 23 | Binder Demo 24 | ~~~~~~~~~~~ 25 | .. image:: https://mybinder.org/badge_logo.svg 26 | :target: https://mybinder.org/v2/gh/gbrammer/eazy-py/HEAD?filepath=docs%2Fexamples%2FHDFN-demo.ipynb 27 | 28 | 29 | ~~~~~~~~~~~~~ 30 | Documentation 31 | ~~~~~~~~~~~~~ 32 | 33 | .. toctree:: 34 | :maxdepth: 2 35 | 36 | eazy/install.rst 37 | eazy/filters.rst 38 | eazy/templates.rst 39 | eazy/photoz.rst 40 | eazy/zout_columns.rst 41 | 42 | ~~~ 43 | API 44 | ~~~ 45 | 46 | .. toctree:: 47 | :maxdepth: 2 48 | 49 | eazy/api.rst 50 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | if NOT "%PAPER%" == "" ( 11 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 12 | ) 13 | 14 | if "%1" == "" goto help 15 | 16 | if "%1" == "help" ( 17 | :help 18 | echo.Please use `make ^` where ^ is one of 19 | echo. html to make standalone HTML files 20 | echo. dirhtml to make HTML files named index.html in directories 21 | echo. singlehtml to make a single large HTML file 22 | echo. pickle to make pickle files 23 | echo. json to make JSON files 24 | echo. htmlhelp to make HTML files and a HTML help project 25 | echo. qthelp to make HTML files and a qthelp project 26 | echo. devhelp to make HTML files and a Devhelp project 27 | echo. epub to make an epub 28 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 29 | echo. text to make text files 30 | echo. man to make manual pages 31 | echo. changes to make an overview over all changed/added/deprecated items 32 | echo. linkcheck to check all external links for integrity 33 | echo. doctest to run all doctests embedded in the documentation if enabled 34 | goto end 35 | ) 36 | 37 | if "%1" == "clean" ( 38 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 39 | del /q /s %BUILDDIR%\* 40 | goto end 41 | ) 42 | 43 | if "%1" == "html" ( 44 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 45 | if errorlevel 1 exit /b 1 46 | echo. 47 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 48 | goto end 49 | ) 50 | 51 | if "%1" == "dirhtml" ( 52 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 53 | if errorlevel 1 exit /b 1 54 | echo. 55 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 56 | goto end 57 | ) 58 | 59 | if "%1" == "singlehtml" ( 60 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 61 | if errorlevel 1 exit /b 1 62 | echo. 63 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 64 | goto end 65 | ) 66 | 67 | if "%1" == "pickle" ( 68 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 69 | if errorlevel 1 exit /b 1 70 | echo. 71 | echo.Build finished; now you can process the pickle files. 72 | goto end 73 | ) 74 | 75 | if "%1" == "json" ( 76 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished; now you can process the JSON files. 80 | goto end 81 | ) 82 | 83 | if "%1" == "htmlhelp" ( 84 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished; now you can run HTML Help Workshop with the ^ 88 | .hhp project file in %BUILDDIR%/htmlhelp. 89 | goto end 90 | ) 91 | 92 | if "%1" == "qthelp" ( 93 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 94 | if errorlevel 1 exit /b 1 95 | echo. 96 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 97 | .qhcp project file in %BUILDDIR%/qthelp, like this: 98 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Astropy.qhcp 99 | echo.To view the help file: 100 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Astropy.ghc 101 | goto end 102 | ) 103 | 104 | if "%1" == "devhelp" ( 105 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 106 | if errorlevel 1 exit /b 1 107 | echo. 108 | echo.Build finished. 109 | goto end 110 | ) 111 | 112 | if "%1" == "epub" ( 113 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 117 | goto end 118 | ) 119 | 120 | if "%1" == "latex" ( 121 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 122 | if errorlevel 1 exit /b 1 123 | echo. 124 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 125 | goto end 126 | ) 127 | 128 | if "%1" == "text" ( 129 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 130 | if errorlevel 1 exit /b 1 131 | echo. 132 | echo.Build finished. The text files are in %BUILDDIR%/text. 133 | goto end 134 | ) 135 | 136 | if "%1" == "man" ( 137 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 141 | goto end 142 | ) 143 | 144 | if "%1" == "changes" ( 145 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.The overview file is in %BUILDDIR%/changes. 149 | goto end 150 | ) 151 | 152 | if "%1" == "linkcheck" ( 153 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Link check complete; look for any errors in the above output ^ 157 | or in %BUILDDIR%/linkcheck/output.txt. 158 | goto end 159 | ) 160 | 161 | if "%1" == "doctest" ( 162 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 163 | if errorlevel 1 exit /b 1 164 | echo. 165 | echo.Testing of doctests in the sources finished, look at the ^ 166 | results in %BUILDDIR%/doctest/output.txt. 167 | goto end 168 | ) 169 | 170 | :end 171 | -------------------------------------------------------------------------------- /docs/rtd-pip-requirements: -------------------------------------------------------------------------------- 1 | Cython 2 | matplotlib 3 | numpy 4 | scikit-image 5 | scipy 6 | astropy 7 | sphinx_astropy 8 | tqdm 9 | 10 | dust_extinction 11 | git+https://github.com/karllark/dust_attenuation -------------------------------------------------------------------------------- /eazy/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | # from . import igm 5 | # from . import param 6 | # from . import templates 7 | # from . import filters 8 | # from . import photoz 9 | 10 | from .version import __version__ 11 | 12 | try: 13 | import dust_attenuation 14 | except ImportError: 15 | print('Failed to `import dust_attenuation`') 16 | print('Install from the repo with $ pip install ' + 17 | 'git+https://github.com/karllark/dust_attenuation.git') 18 | # print('Install my fork with $ pip install ' + 19 | # 'git+https://github.com/gbrammer/dust_attenuation.git') 20 | 21 | ### dust_extinction is now in dependencies and should be installed from PyPI 22 | 23 | # try: 24 | # import dust_extinction 25 | # except ImportError: 26 | # print('Failed to `import dust_extinction`') 27 | # print('Install from the forked repo with $ pip install ' + 28 | # 'git+https://github.com/gbrammer/dust_extinction.git') 29 | 30 | 31 | ## Hot fix for importing prospector without SPS_HOME variable set 32 | # try: 33 | # from prospect.utils.smoothing import smoothspec 34 | # except (FileNotFoundError, TypeError): 35 | # if 'SPS_HOME' not in os.environ: 36 | # sps_home = 'xxxxdummyxxxx' #os.path.dirname(__file__) 37 | # print(f'Warning: setting environment variable SPS_HOME={sps_home} ' 38 | # 'to be able to import prospect.') 39 | # os.environ['SPS_HOME'] = sps_home 40 | 41 | DATA_PATH = None 42 | 43 | def set_data_path(path='$EAZYCODE'): 44 | """ 45 | Make symbolic links to EAZY inputs 46 | 47 | Parameters 48 | ---------- 49 | path : str 50 | Full directory path or environment variable pointing to the old eazy 51 | C-code repository that provides the template and filter files. 52 | 53 | If `path.startswith('$')` then treat path as an environment variable. 54 | 55 | If you install from the repository that provides the eazy-photozy 56 | code as a submodule, then you should be able to run with `path=None` 57 | and retrieve the files directly from the repository. This should 58 | also work with the `pip` installation. 59 | 60 | Another safe way to ensure that the necessary files are avialable is 61 | to clone the `eazy-photoz` repository and set an environment variable 62 | to point to it (e.g, 'EAZYCODE'), which you then pass as the `path` 63 | argument. 64 | 65 | """ 66 | global DATA_PATH 67 | if path.startswith('$'): 68 | path = os.getenv(path[1:]) 69 | 70 | if path is None: 71 | # Use the code attached to the repository 72 | path = os.path.join(os.path.dirname(__file__), 'data') 73 | if not os.path.exists(os.path.join(path, 'templates')): 74 | path = os.path.join(path, 'eazy-photoz') 75 | 76 | if path.endswith("/"): 77 | DATA_PATH = path[:-1] 78 | else: 79 | DATA_PATH = path 80 | 81 | return path 82 | 83 | # Set the data path and download if necessary 84 | set_data_path() 85 | 86 | def fetch_eazy_photoz(): 87 | """ 88 | If necessary, clone the eazy-photoz repository to get templates and filters 89 | """ 90 | global DATA_PATH 91 | 92 | current_path = os.getcwd() 93 | 94 | # module_path = os.path.dirname(__file__) 95 | # data_path = os.path.join(module_path, 'data/') 96 | if DATA_PATH.endswith("eazy-photoz"): 97 | _data_path = os.path.split(DATA_PATH)[0] 98 | else: 99 | _data_path = DATA_PATH 100 | 101 | if not os.path.exists(_data_path): 102 | os.makedirs(_data_path) 103 | 104 | os.chdir(_data_path) 105 | 106 | eazy_photoz = os.path.join(_data_path, 'eazy-photoz') 107 | git_url = 'https://github.com/gbrammer/eazy-photoz.git' 108 | 109 | if not os.path.exists(eazy_photoz): 110 | os.system(f'git clone {git_url}') 111 | print(f'cloning {git_url} to {_data_path}') 112 | 113 | # Back to working directory 114 | os.chdir(current_path) 115 | 116 | 117 | def symlink_eazy_inputs(path='$EAZYCODE', get_hdfn_test_catalog=False, copy=False): 118 | """ 119 | Make symbolic links to EAZY inputs 120 | 121 | Parameters 122 | ---------- 123 | path : str 124 | Full directory path or environment variable pointing to the old eazy 125 | C-code repository that provides the template and filter files. 126 | 127 | If `path.startswith('$')` then treat path as an environment variable. 128 | 129 | If you install from the repository that provides the eazy-photozy 130 | code as a submodule, then you should be able to run with `path=None` 131 | and retrieve the files directly from the repository. This should 132 | also work with the `pip` installation. 133 | 134 | Another safe way to ensure that the necessary files are avialable is 135 | to clone the `eazy-photoz` repository and set an environment variable 136 | to point to it (e.g, 'EAZYCODE'), which you then pass as the `path` 137 | argument. 138 | 139 | copy : bool 140 | Copy ``templates`` directory and ``FILTER.RES.latest`` file, rather 141 | than symlink 142 | 143 | Returns 144 | ------- 145 | Symbolic links to the `FILTER.RES.latest` file and `templates` 146 | directory are created in the current working directory (`./`). 147 | 148 | """ 149 | from .utils import DATA_PATH 150 | 151 | if path.startswith('$'): 152 | path = os.getenv(path) 153 | 154 | current_path = os.getcwd() 155 | 156 | if path is None: 157 | # Use the code attached to the repository 158 | # path = os.path.join(os.path.dirname(__file__), 'data/') 159 | path = DATA_PATH 160 | if not os.path.exists(os.path.join(path, 'templates')): 161 | fetch_eazy_photoz() 162 | 163 | os.chdir(current_path) 164 | 165 | if not os.path.exists(path): 166 | print('Couldn\'t find path {0}'.format(path)) 167 | return False 168 | 169 | # Templates directory 170 | if os.path.exists('./templates'): 171 | try: 172 | shutil.rmtree('./templates') 173 | except PermissionError: 174 | os.system('rm -rf templates') 175 | 176 | t_path = os.path.join(path, 'templates') 177 | if copy: 178 | os.system('cp -R {0} .'.format(t_path)) 179 | else: 180 | os.symlink(t_path, './templates') 181 | 182 | print('{0} -> {1}'.format(t_path, './templates')) 183 | 184 | # Filter file 185 | if os.path.exists('./FILTER.RES.latest'): 186 | shutil.rmtree('./FILTER.RES.latest') 187 | 188 | res_path = os.path.join(path, 'filters/FILTER.RES.latest') 189 | if copy: 190 | os.system(f'cp {0} .'.format(res_path)) 191 | else: 192 | os.symlink(res_path, './FILTER.RES.latest') 193 | 194 | print('{0} -> {1}'.format(res_path, './FILTER.RES.latest')) 195 | 196 | if get_hdfn_test_catalog: 197 | for cat_path in ['inputs', 'hdfn_fs99']: 198 | parent = os.path.join(path, cat_path, 'hdfn_fs99_eazy.cat') 199 | translate = os.path.join(path, cat_path, 'zphot.translate') 200 | if os.path.exists(parent): 201 | for file in [parent, translate]: 202 | os.symlink(file, os.path.basename(file)) 203 | print('{0} -> {1}'.format(file, os.path.basename(file))) 204 | 205 | 206 | def get_test_catalog(path=None, path_is_env=True): 207 | """ 208 | Make symbolic links to EAZY inputs 209 | 210 | Parameters 211 | ---------- 212 | path : str 213 | Full directory path or environment variable pointing to the old eazy 214 | C-code repository that provides the template and filter files. 215 | 216 | path_is_env : bool 217 | If True, then `path` is an environment variable pointing to the Eazy 218 | repository. If False, then treat as a directory path. 219 | 220 | Returns 221 | ------- 222 | Symbolic links in `./`. 223 | 224 | """ 225 | if path_is_env: 226 | path = os.getenv(path) 227 | 228 | if not os.path.exists(path): 229 | print('Couldn\'t find path {0}'.format(path)) 230 | return False 231 | -------------------------------------------------------------------------------- /eazy/data/DLAcoeff.txt: -------------------------------------------------------------------------------- 1 | 2 1215.670 1.61698E-04 5.38995E-05 2 | 3 1025.720 1.54539E-04 5.15129E-05 3 | 4 972.537 1.49767E-04 4.99222E-05 4 | 5 949.743 1.46031E-04 4.86769E-05 5 | 6 937.803 1.42893E-04 4.76312E-05 6 | 7 930.748 1.40159E-04 4.67196E-05 7 | 8 926.226 1.37714E-04 4.59048E-05 8 | 9 923.150 1.35495E-04 4.51650E-05 9 | 10 920.963 1.33452E-04 4.44841E-05 10 | 11 919.352 1.31561E-04 4.38536E-05 11 | 12 918.129 1.29785E-04 4.32617E-05 12 | 13 917.181 1.28117E-04 4.27056E-05 13 | 14 916.429 1.26540E-04 4.21799E-05 14 | 15 915.824 1.25041E-04 4.16804E-05 15 | 16 915.329 1.23614E-04 4.12046E-05 16 | 17 914.919 1.22248E-04 4.07494E-05 17 | 18 914.576 1.20938E-04 4.03127E-05 18 | 19 914.286 1.19681E-04 3.98938E-05 19 | 20 914.039 1.18469E-04 3.94896E-05 20 | 21 913.826 1.17298E-04 3.90995E-05 21 | 22 913.641 1.16167E-04 3.87225E-05 22 | 23 913.480 1.15071E-04 3.83572E-05 23 | 24 913.339 1.14011E-04 3.80037E-05 24 | 25 913.215 1.12983E-04 3.76609E-05 25 | 26 913.104 1.11972E-04 3.73241E-05 26 | 27 913.006 1.11002E-04 3.70005E-05 27 | 28 912.918 1.10051E-04 3.66836E-05 28 | 29 912.839 1.09125E-04 3.63749E-05 29 | 30 912.768 1.08220E-04 3.60734E-05 30 | 31 912.703 1.07337E-04 3.57789E-05 31 | 32 912.645 1.06473E-04 3.54909E-05 32 | 33 912.592 1.05629E-04 3.52096E-05 33 | 34 912.543 1.04802E-04 3.49340E-05 34 | 35 912.499 1.03991E-04 3.46636E-05 35 | 36 912.458 1.03198E-04 3.43994E-05 36 | 37 912.420 1.02420E-04 3.41402E-05 37 | 38 912.385 1.01657E-04 3.38856E-05 38 | 39 912.353 1.00908E-04 3.36359E-05 39 | 40 912.324 1.00168E-04 3.33895E-05 40 | -------------------------------------------------------------------------------- /eazy/data/LAFcoeff.txt: -------------------------------------------------------------------------------- 1 | 2 1215.670 1.68976E-02 2.35379E-03 1.02611E-04 2 | 3 1025.720 4.69229E-03 6.53625E-04 2.84940E-05 3 | 4 972.537 2.23898E-03 3.11884E-04 1.35962E-05 4 | 5 949.743 1.31901E-03 1.83735E-04 8.00974E-06 5 | 6 937.803 8.70656E-04 1.21280E-04 5.28707E-06 6 | 7 930.748 6.17843E-04 8.60640E-05 3.75186E-06 7 | 8 926.226 4.60924E-04 6.42055E-05 2.79897E-06 8 | 9 923.150 3.56887E-04 4.97135E-05 2.16720E-06 9 | 10 920.963 2.84278E-04 3.95992E-05 1.72628E-06 10 | 11 919.352 2.31771E-04 3.22851E-05 1.40743E-06 11 | 12 918.129 1.92348E-04 2.67936E-05 1.16804E-06 12 | 13 917.181 1.62155E-04 2.25878E-05 9.84689E-07 13 | 14 916.429 1.38498E-04 1.92925E-05 8.41033E-07 14 | 15 915.824 1.19611E-04 1.66615E-05 7.26340E-07 15 | 16 915.329 1.04314E-04 1.45306E-05 6.33446E-07 16 | 17 914.919 9.17397E-05 1.27791E-05 5.57091E-07 17 | 18 914.576 8.12784E-05 1.13219E-05 4.93564E-07 18 | 19 914.286 7.25069E-05 1.01000E-05 4.40299E-07 19 | 20 914.039 6.50549E-05 9.06198E-06 3.95047E-07 20 | 21 913.826 5.86816E-05 8.17421E-06 3.56345E-07 21 | 22 913.641 5.31918E-05 7.40949E-06 3.23008E-07 22 | 23 913.480 4.84261E-05 6.74563E-06 2.94068E-07 23 | 24 913.339 4.42740E-05 6.16726E-06 2.68854E-07 24 | 25 913.215 4.06311E-05 5.65981E-06 2.46733E-07 25 | 26 913.104 3.73821E-05 5.20723E-06 2.27003E-07 26 | 27 913.006 3.45377E-05 4.81102E-06 2.09731E-07 27 | 28 912.918 3.19891E-05 4.45601E-06 1.94255E-07 28 | 29 912.839 2.97110E-05 4.13867E-06 1.80421E-07 29 | 30 912.768 2.76635E-05 3.85346E-06 1.67987E-07 30 | 31 912.703 2.58178E-05 3.59636E-06 1.56779E-07 31 | 32 912.645 2.41479E-05 3.36374E-06 1.46638E-07 32 | 33 912.592 2.26347E-05 3.15296E-06 1.37450E-07 33 | 34 912.543 2.12567E-05 2.96100E-06 1.29081E-07 34 | 35 912.499 1.99967E-05 2.78549E-06 1.21430E-07 35 | 36 912.458 1.88476E-05 2.62543E-06 1.14452E-07 36 | 37 912.420 1.77928E-05 2.47850E-06 1.08047E-07 37 | 38 912.385 1.68222E-05 2.34330E-06 1.02153E-07 38 | 39 912.353 1.59286E-05 2.21882E-06 9.67268E-08 39 | 40 912.324 1.50996E-05 2.10334E-06 9.16925E-08 40 | -------------------------------------------------------------------------------- /eazy/data/alpha_lyr_stis_008.fits: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gbrammer/eazy-py/292172c331b67f34358e9ff2500fe09a82757635/eazy/data/alpha_lyr_stis_008.fits -------------------------------------------------------------------------------- /eazy/data/emlines_info.dat: -------------------------------------------------------------------------------- 1 | 923.148,Ly 923 2 | 926.249,Ly 926 3 | 930.751,Ly 930 4 | 937.814,Ly 937 5 | 949.742,Ly delta 949 6 | 972.517,Ly gamma 972 7 | 1025.728,Ly beta 1025 8 | 1215.6701,Ly alpha 1216 9 | 1640.42,HeII 1640 10 | 1661.241,OIII]1661 11 | 1666.15,OIII]1666 12 | 1812.205,[NeIII]1815 13 | 1854.716,[AlIII] 1855 14 | 1862.7895,[AlIII] 1863 15 | 1906.68,[CIII] 16 | 1908.73,[CIII] 17 | 2142.3,NII]2141 18 | 2321.664,[OIII]2321 19 | 2324.21,CII]2326 20 | 2325.4,CII]2326 21 | 2326.11,CII]2326 22 | 2327.64,CII]2326 23 | 2328.83,CII]2326 24 | 2471.088,[OII]2471 25 | 2661.146,[AlII] 2660 26 | 2669.951,[AlII] 2670 27 | 2796.352,MgII 2800 28 | 2803.53,MgII 2800 29 | 3109.98,[ArIII]3110 30 | 3343.5,[NeIII]3343 31 | 3722.75,[SIII]3723 32 | 3727.1,[OII]3726 33 | 3729.86,[OII]3729 34 | 3798.987,H 3798 35 | 3836.485,H 3835 36 | 3869.86,[NeIII]3870 37 | 3889.75,HeI 3889 38 | 3890.166,H 3889 39 | 3968.59,[NeIII]3968 40 | 3971.198,H 3970 41 | 4069.75,[SII]4070 42 | 4077.5,[SII]4078 43 | 4102.892,H delta 4102 44 | 4341.692,H gamma 4340 45 | 4364.435,[OIII]4364 46 | 4472.735,HeI 4472 47 | 4622.864,[CI]4621 48 | 4725.47,[NeIV]4720 49 | 4862.71,H beta 4861 50 | 4960.295,[OIII]4960 51 | 5008.24,[OIII]5007 52 | 5193.27,[ArIII]5193 53 | 5201.705,[NI]5200 54 | 5519.242,[ClIII]5518 55 | 5539.411,[ClIII] 5538 56 | 5578.89,[OI]5578 57 | 5756.19,[NII]5756 58 | 5877.249,HeI 5877 59 | 6302.046,[OI]6302 60 | 6313.81,[SIII]6314 61 | 6365.535,[OI]6365 62 | 6549.86,[NII]6549 63 | 6564.6,H alpha 6563 64 | 6585.27,[NII]6585 65 | 6679.995,HeI 6680 66 | 6718.294,[SII]6717 67 | 6732.673,[SII]6732 68 | 7067.138,HeI 7065 69 | 7137.77,[ArIII]7138 70 | 7321.94,[OII]7323 71 | 7332.21,[OII]7332 72 | 7334.17,[ArIV]7330 73 | 7753.19,[ArIII]7753 74 | 8581.06,[ClII] 8579 75 | 8729.53,[CI]8727 76 | 9017.8,Pa 9015 77 | 9071.1,[SIII]9071 78 | 9126.1,[ClII] 9124 79 | 9232.2,Pa 9229 80 | 9533.2,[SIII]9533 81 | 9548.8,Pa 9546 82 | 9852.96,[CI]9850 83 | 10052.6,Pa delta 10050 84 | 10323.32,[SII]10331 85 | 10832.057,HeI 10829 86 | 10833.306,HeI 10833 87 | 10941.17,Pa gamma 10939 88 | 12570.21,[FeII] 1.26m 89 | 12821.578,Pa beta 12819 90 | 17366.885,Br 17363 91 | 18179.2,Br 18175 92 | 18756.4,Pa alpha 18752 93 | 19450.89,Br delta 19447 94 | 21661.178,Br gamma 21657 95 | 26258.71,Br beta 26254 96 | 30392.02,Pf 30386 97 | 32969.8,Pf delta 32964 98 | 37405.76,Pf gamma 37398 99 | 40522.79,Br alpha 40515 100 | 46537.8,Pf beta 46529 101 | 51286.5,Hu delta 51277 102 | 59082.2,Hu gamma 59071 103 | 69852.74,[ArII]7m 104 | 74599.0,Pf alpha 74585 105 | 75024.4,Hu beta 75011 106 | 89913.8,[ArIII]9m 107 | 105105.0,[SIV]10.5m 108 | 123719.12,Hu alpha 12.4m 109 | 128135.48,[NeII]12.8m 110 | 143678.0,[ClII] 14.4m 111 | 155551.0,[NeIII]15.5m 112 | 187130.0,[SIII]18.7m 113 | 218302.0,[ArIII]22m 114 | 328709.0,[PII] 32m 115 | 334800.0,[SIII]33.5m 116 | 348140.0,[SiII]35m 117 | 360135.0,[NeIII]36m 118 | 518145.0,[OIII]52m 119 | 573300.0,[NIII]57m 120 | 606420.0,[PII] 60m 121 | 631852.0,[OI]63m 122 | 883564.0,[OIII]88m 123 | 1218000.0,[NII]122m 124 | 1455350.0,[OI]145m 125 | 1576429.62,[CII]157.7m 126 | 2053000.0,[NII]205m 127 | 3703700.0,[CI]369m 128 | 6097000.0,[CI]610m 129 | -------------------------------------------------------------------------------- /eazy/data/spectra_kc13_12_tweak.params: -------------------------------------------------------------------------------- 1 | # MLv SFRv 2 | 0.115 3.925e-10 3 | 0.221 1.204e-09 4 | 0.362 7.458e-12 5 | 0.739 1.235e-09 6 | 1.507 2.922e-08 7 | 1.814 6.716e-13 8 | 2.146 8.509e-13 9 | 4.302 1.726e-13 10 | 5.614 3.933e-13 11 | 7.483 2.046e-10 12 | 9.508 4.568e-12 13 | 56.966 3.392e-11 14 | -------------------------------------------------------------------------------- /eazy/data/zphot.param.default: -------------------------------------------------------------------------------- 1 | #### EAZY Default parameters 2 | 3 | VERBOSITY 1 # Print information 4 | 5 | ## Filters 6 | FILTERS_RES FILTER.RES.latest # Filter transmission data 7 | FILTER_FORMAT 1 # Format of FILTERS_RES file -- 0: energy- 1: photon-counting detector (deprecated in eazypy) 8 | SMOOTH_FILTERS n # Smooth filter curves with Gaussian (deprecated in eazypy) 9 | SMOOTH_SIGMA 100. # Gaussian sigma (in Angstroms) to smooth filters 10 | 11 | ## Templates 12 | TEMPLATES_FILE templates/fsps_full/tweak_fsps_QSF_12_v3.param # Template definition file 13 | TEMPLATE_COMBOS a # Template combination options: 14 | # 1 : one template at a time 15 | # 2 : two templates, read allowed combinations from TEMPLATES_FILE 16 | # -2 : two templates, all permutations 17 | # a 99 : all templates simultaneously 18 | NMF_TOLERANCE 1.e-4 # Tolerance for non-negative combinations (TEMPLATE_COMBOS=a) (deprecated in eazypy) 19 | WAVELENGTH_FILE templates/uvista_nmf/lambda.def # Wavelength grid definition file 20 | TEMP_ERR_FILE templates/uvista_nmf/template_error_10.def # Template error definition file 21 | TEMP_ERR_A2 0.20 # Template error amplitude 22 | SYS_ERR 0.01 # Systematic flux error (% of flux) 23 | APPLY_IGM y # Apply Madau 1995 IGM absorption 24 | IGM_SCALE_TAU 1.0 # Scale factor times Inoue14 IGM tau 25 | ADD_CGM y # Add Asada24 CGM damping wing absorption 26 | SIGMOID_PARAM1 3.5918 # Sigmoid func parameter (A) for the N_HI-z relation in Asada24 27 | SIGMOID_PARAM2 1.8414 # Sigmoid func parameter (a) for the N_HI-z relation in Asada24 28 | SIGMOID_PARAM3 18.001 # Sigmoid func parameter (C) for the N_HI-z relation in Asada24 29 | 30 | 31 | FITTER nnls # Template fit mode 32 | RENORM_TEMPLATES y # Renormalize templates before fitting 33 | HESS_THRESHOLD 1 # Threshold for removing degenerate templates using the Hessian matrix 34 | 35 | SCALE_2175_BUMP 0.00 # Scaling of 2175A bump. Values 0.13 (0.27) absorb ~10 (20) % at peak. 36 | TEMPLATE_SMOOTH 0.0 # Velocity smoothing (km/s) for templates, < 0 for no smoothing 37 | RESAMPLE_WAVE None 38 | 39 | MW_EBV 0.00 # Galactic reddening (http://irsa.ipac.caltech.edu/applications/DUST/) 40 | CAT_HAS_EXTCORR y # Catalog already corrected for reddening 41 | 42 | DUMP_TEMPLATE_CACHE n # Write binary template cache (deprecated in eazypy) 43 | USE_TEMPLATE_CACHE n # Load in template cache (deprecated in eazypy) 44 | CACHE_FILE photz.tempfilt # Template cache file (in OUTPUT_DIRECTORY) (deprecated in eazypy) 45 | 46 | ## Input Files 47 | CATALOG_FILE hdfn_fs99_eazy.cat # Catalog data file 48 | CATALOG_FORMAT ascii.commented_header # Format if not FITS 49 | MAGNITUDES n # Catalog photometry in magnitudes rather than f_nu fluxes 50 | NOT_OBS_THRESHOLD -90 # Ignore flux point if 6.e4): 134 | Alambda = 0. 135 | else: 136 | f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv) 137 | Alambda = f99(self.wave) 138 | 139 | src_red = trapz(self.throughput*src*10**(-0.4*Alambda)/self.wave, 140 | self.wave) 141 | src_nored = trapz(self.throughput*src/self.wave, self.wave) 142 | 143 | delta = src_red/src_nored 144 | 145 | if mag: 146 | return 2.5*np.log10(delta) 147 | else: 148 | return 1./delta 149 | 150 | 151 | @property 152 | def ABVega(self): 153 | """ 154 | Compute AB-Vega conversion 155 | """ 156 | from astropy.constants import c 157 | import astropy.units as u 158 | try: 159 | import grizli.utils_c 160 | interp = grizli.utils_c.interp.interp_conserve_c 161 | except ImportError: 162 | interp = utils.interp_conserve 163 | 164 | # Union of throughput and Vega spectrum arrays 165 | full_x = np.hstack([self.wave, VEGA['WAVELENGTH']]) 166 | full_x = full_x[np.argsort(full_x)] 167 | 168 | # Vega spectrum, units of f-lambda flux density, cgs 169 | # Interpolate to wavelength grid, no extrapolation 170 | vega_full = interp(full_x, VEGA['WAVELENGTH'], VEGA['FLUX'], 171 | left=0, right=0) 172 | 173 | thru_full = interp(full_x, self.wave, self.throughput, 174 | left=0, right=0) 175 | 176 | # AB = 0, same units 177 | absp = 3631*1e-23*c.to(u.m/u.s).value*1.e10/full_x**2 178 | 179 | # Integrate over the bandpass, flam dlam 180 | num = trapz(vega_full*thru_full, full_x) 181 | den = trapz(absp*thru_full, full_x) 182 | 183 | return -2.5*np.log10(num/den) 184 | 185 | 186 | @property 187 | def pivot(self): 188 | """ 189 | Pivot wavelength 190 | 191 | http://pysynphot.readthedocs.io/en/latest/properties.html 192 | """ 193 | integrator = trapz 194 | 195 | num = integrator(self.wave*self.throughput, self.wave) 196 | den = integrator(self.throughput/self.wave, self.wave) 197 | pivot = np.sqrt(num/den) 198 | return pivot 199 | 200 | @property 201 | def equivwidth(self): 202 | """ 203 | Filter equivalent width 204 | 205 | http://pysynphot.readthedocs.io/en/latest/properties.html 206 | """ 207 | return trapz(self.throughput, self.wave) 208 | 209 | 210 | @property 211 | def rectwidth(self): 212 | """ 213 | Filter rectangular width 214 | 215 | http://pysynphot.readthedocs.io/en/latest/properties.html 216 | """ 217 | 218 | rect = self.equivwidth / self.throughput.max() 219 | return rect 220 | 221 | 222 | @property 223 | def ctw95(self): 224 | """ 225 | 95% cumulative throughput width 226 | 227 | http://www.stsci.edu/hst/acs/analysis/bandwidths/#keywords 228 | 229 | """ 230 | 231 | dl = np.diff(self.wave) 232 | filt = np.cumsum((self.wave*self.throughput)[1:]*dl) 233 | ctw95 = np.interp([0.025, 0.975], filt/filt.max(), self.wave[1:]) 234 | return np.diff(ctw95)[0] 235 | 236 | 237 | def for_filter_file(self, row_str='{i:6} {wave:.5e} {thru:.5e}'): 238 | """ 239 | Return a string that can be put in the EAZY filter file 240 | """ 241 | header = '{0} {1} lambda_c= {2:.4e} AB-Vega= {3:.3f} w95={4:.1f}' 242 | N = len(self.wave) 243 | lines = [header.format(N, self.name.split('lambda_c')[0], 244 | self.pivot, self.ABVega, self.ctw95)] 245 | 246 | lines += [row_str.format(i=i+1, wave=w, thru=t) 247 | for i, (w, t) in enumerate(zip(self.wave, self.throughput))] 248 | 249 | return '\n'.join(lines) 250 | 251 | 252 | class FilterFile: 253 | def __init__(self, file='FILTER.RES.latest', path=None): 254 | """ 255 | Read a EAZY filter file. 256 | 257 | .. plot:: 258 | :include-source: 259 | 260 | import matplotlib.pyplot as plt 261 | from eazy.filters import FilterFile 262 | 263 | res = FilterFile(path=None) 264 | print(len(res.filters)) 265 | 266 | bp = res[205] 267 | print(bp) 268 | 269 | fig, ax = plt.subplots(1,1,figsize=(6,4)) 270 | 271 | ax.plot(bp.wave, bp.throughput, label=bp.name.split()[0]) 272 | 273 | ax.set_xlabel('wavelength, Angstroms') 274 | ax.set_ylabel('throughput') 275 | ax.legend() 276 | ax.grid() 277 | 278 | fig.tight_layout(pad=0.5) 279 | 280 | 281 | """ 282 | if os.path.exists(file): 283 | file_path = file 284 | elif path is None: 285 | file_path = os.path.join(utils.DATA_PATH, 'filters', file) 286 | else: 287 | file_path = os.path.join(path, file) 288 | 289 | with open(file_path, 'r') as fp: 290 | lines = fp.readlines() 291 | 292 | self.filename = file_path 293 | 294 | filters = [] 295 | wave = [] 296 | trans = [] 297 | header = '' 298 | 299 | for line in lines: 300 | if 'lambda_c' in line: 301 | if len(wave) > 0: 302 | # Make filter from lines already read in 303 | new_filter = FilterDefinition(name=header, 304 | wave=np.asarray(wave,dtype=float), 305 | throughput=np.asarray(trans,dtype=float)) 306 | # new_filter.name = header 307 | # new_filter.wave = np.asarray(wave,dtype=float) 308 | # new_filter.throughput = np.asarray(trans,dtype=float) 309 | filters.append(new_filter) 310 | 311 | # Initialize filter 312 | header = ' '.join(line.split()[1:]) 313 | wave = [] 314 | trans = [] 315 | else: 316 | lspl = np.asarray(line.split(),dtype=float) 317 | wave.append(lspl[1]) 318 | trans.append(lspl[2]) 319 | 320 | # last one 321 | # new_filter = FilterDefinition() 322 | # new_filter.name = header 323 | # new_filter.wave = np.asarray(wave,dtype=float) 324 | # new_filter.throughput = np.asarray(trans,dtype=float) 325 | new_filter = FilterDefinition(name=header, 326 | wave=np.asarray(wave,dtype=float), 327 | throughput=np.asarray(trans,dtype=float)) 328 | 329 | filters.append(new_filter) 330 | 331 | self.filters = filters 332 | 333 | 334 | @property 335 | def NFILT(self): 336 | """ 337 | Number of filters in the list 338 | """ 339 | return len(self.filters) 340 | 341 | 342 | def __getitem__(self, i1): 343 | """ 344 | Return unit-indexed filter, e.g., 161 = 2mass-j 345 | """ 346 | return self.filters[i1-1] 347 | 348 | 349 | def names(self, verbose=True): 350 | """ 351 | Print the filter names. 352 | """ 353 | if verbose: 354 | for i in range(len(self.filters)): 355 | print('{0:5d} {1}'.format(i+1, self.filters[i].name)) 356 | else: 357 | string_list = ['{0:5d} {1}\n'.format(i+1, self.filters[i].name) for i in range(len(self.filters))] 358 | return string_list 359 | 360 | 361 | def write(self, file='xxx.res', verbose=True): 362 | """ 363 | Dump the filter information to a filter file. 364 | """ 365 | fp = open(file,'w') 366 | for filter in self.filters: 367 | fp.write('{0:6d} {1}\n'.format(len(filter.wave), filter.name)) 368 | for i in range(len(filter.wave)): 369 | fp.write('{0:6d} {1:.5e} {2:.5e}\n'.format(i+1, filter.wave[i], filter.throughput[i])) 370 | 371 | fp.close() 372 | 373 | string_list = self.names(verbose=False) 374 | fp = open(file+'.info', 'w') 375 | fp.writelines(string_list) 376 | fp.close() 377 | 378 | if verbose: 379 | print('Wrote <{0}[.info]>'.format(file)) 380 | 381 | 382 | def search(self, search_string, case=False, verbose=True): 383 | """ 384 | Search filter names for ``search_string``. If ``case`` is True, then 385 | match case. 386 | """ 387 | import re 388 | 389 | if not case: 390 | search_string = search_string.upper() 391 | 392 | matched = [] 393 | 394 | for i in range(len(self.filters)): 395 | filt_name = self.filters[i].name 396 | if not case: 397 | filt_name = filt_name.upper() 398 | 399 | if re.search(search_string, filt_name) is not None: 400 | if verbose: 401 | print('{0:5d} {1}'.format(i+1, self.filters[i].name)) 402 | matched.append(i) 403 | 404 | return np.array(matched) 405 | 406 | 407 | class ParamFilter(FilterDefinition): 408 | def __init__(self, line='# Filter #20, RES#78: COSMOS/SUBARU_filter_B.txt - lambda_c=4458.276253'): 409 | 410 | self.lambda_c = float(line.split('lambda_c=')[1]) 411 | self.name = line.split()[4] 412 | self.fnumber = int(line.split('RES#')[1].split(':')[0]) 413 | self.cnumber = int(line.split('Filter #')[1].split(',')[0]) 414 | -------------------------------------------------------------------------------- /eazy/hdf5.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools for saving/recoving state from HDF5 files 3 | """ 4 | import numpy as np 5 | 6 | try: 7 | import h5py 8 | except: 9 | pass 10 | 11 | from . import photoz 12 | from . import param 13 | from . import utils 14 | from . import templates as templates_code 15 | 16 | def write_hdf5(pzobj, h5file='test.hdf5', include_fit_coeffs=False, include_templates=True, verbose=True): 17 | """ 18 | Write self-contained HDF5 file 19 | 20 | Parameters 21 | ---------- 22 | pzobj : `~eazy.photoz.PhotoZ` 23 | Original code object with computed redshifts, coeffs, etc. 24 | 25 | h5file : str 26 | HDF5 filename 27 | 28 | include_fit_coeffs : bool 29 | Inlude full `fit_coeffs` array with ``(NOBJ, NZ, NTEMP)`` fit 30 | coefficients. This can make the file very large, and it's really only 31 | needed if you want to use `~eazy.photoz.PhotoZ.prior_beta`. 32 | 33 | include_templates : bool 34 | Include template arrays 35 | 36 | """ 37 | 38 | if verbose: 39 | print(f'h5: create file {h5file}') 40 | 41 | with h5py.File(h5file,'w') as f: 42 | 43 | grp = f.create_group("cat") 44 | dset = grp.create_dataset('id', data=pzobj.OBJID) 45 | dset = grp.create_dataset('ra', data=pzobj.RA) 46 | dset = grp.create_dataset('dec', data=pzobj.DEC) 47 | dset = grp.create_dataset('z_spec', data=pzobj.ZSPEC) 48 | 49 | for k in pzobj.cat.meta: 50 | if verbose: 51 | print('h5: cat meta: ', k, pzobj.cat.meta[k]) 52 | grp.attrs[k] = pzobj.cat.meta[k] 53 | 54 | for name in ['flux_columns','err_columns']: 55 | if verbose: 56 | print(f'h5: cat/{name}') 57 | 58 | attr = getattr(pzobj, name) 59 | dset = grp.create_dataset(name, data=[a.encode('utf8') 60 | for a in attr]) 61 | 62 | for name in ['f_numbers','fnu','efnu_orig','ok_data','zp', 63 | 'ext_corr','ext_redden','pivot']: 64 | if verbose: 65 | print(f'h5: cat/{name}') 66 | 67 | attr = getattr(pzobj, name) 68 | dset = grp.create_dataset(name, data=attr) 69 | 70 | grp.attrs['MW_EBV'] = pzobj.param['MW_EBV'] 71 | 72 | grp = f.create_group("fit") 73 | for name in ['zml','zbest','chi2_fit','coeffs_best']: 74 | if verbose: 75 | print(f'h5: fit/{name}') 76 | 77 | attr = getattr(pzobj, name) 78 | dset = grp.create_dataset(name, data=attr) 79 | 80 | dset = grp.create_dataset('tef_x', data=pzobj.TEF.te_x) 81 | dset = grp.create_dataset('tef_y', data=pzobj.TEF.te_y) 82 | 83 | f['fit/zml'].attrs['ZML_WITH_PRIOR'] = pzobj.ZML_WITH_PRIOR 84 | f['fit/zml'].attrs['ZML_WITH_BETA_PRIOR'] = pzobj.ZML_WITH_BETA_PRIOR 85 | f['fit/zbest'].attrs['ZPHOT_USER'] = pzobj.ZPHOT_USER 86 | 87 | if include_fit_coeffs | pzobj.ZML_WITH_BETA_PRIOR: 88 | name = 'fit_coeffs' 89 | if verbose: 90 | print(f'h5: fit/{name}') 91 | 92 | attr = getattr(pzobj, name) 93 | dset = grp.create_dataset(name, data=attr) 94 | 95 | # Parameters 96 | for k in pzobj.param.params: 97 | grp.attrs[k] = pzobj.param.params[k] 98 | 99 | dset = grp.create_dataset('tempfilt', data=pzobj.tempfilt.tempfilt) 100 | dset = grp.create_dataset('tempfilt_scale', data=pzobj.tempfilt.scale) 101 | func = pzobj.tempfilt.interpolator_function 102 | dset.attrs['interpolator_function'] = func.__name__ 103 | 104 | # Templates 105 | if include_templates: 106 | grp = f.create_group("templates") 107 | grp.attrs['NTEMP'] = pzobj.NTEMP 108 | for i, templ in enumerate(pzobj.templates): 109 | grp.attrs[f'TEMPL{i:03d}'] = templ.name 110 | if verbose: 111 | print(f'h5: templates/{templ.name}') 112 | 113 | dset = grp.create_dataset(f'wave {templ.name}', 114 | data=templ.wave.astype(pzobj.ARRAY_DTYPE)) 115 | dset = grp.create_dataset(f'flux {templ.name}', 116 | data=templ.flux.astype(pzobj.ARRAY_DTYPE)) 117 | dset = grp.create_dataset(f'z {templ.name}', 118 | data=templ.redshifts) 119 | 120 | 121 | def cat_from_hdf5(h5file): 122 | """ 123 | Parameters 124 | ---------- 125 | h5file : str 126 | HDF5 filename 127 | 128 | Returns 129 | ------- 130 | cat : `~astropy.table.Table` 131 | Catalog table generated from HDF5 data 132 | 133 | trans : `eazy.param.TranslateFile` 134 | TranslateFile object 135 | 136 | """ 137 | 138 | with h5py.File(h5file,'r') as f: 139 | cat, trans = photoz.PhotoZ._csv_from_arrays(f['cat/id'][:], 140 | f['cat/ra'][:], f['cat/dec'][:], 141 | f['cat/z_spec'][:], 142 | f['cat/fnu'], f['cat/efnu_orig'], 143 | f['cat/ok_data'], 144 | f['cat/flux_columns'].asstr(), 145 | f['cat/err_columns'].asstr(), 146 | f['cat/zp'][:]**0, 147 | f['cat/f_numbers'][:]) 148 | 149 | return cat, trans 150 | 151 | 152 | def param_from_hdf5(h5file): 153 | """ 154 | Read full parameters from HDF5 file 155 | 156 | Parameters 157 | ---------- 158 | h5file : str 159 | HDF5 filename 160 | 161 | Returns 162 | ------- 163 | params : `eazy.param.EazyParam` 164 | Parameter object 165 | """ 166 | params = param.EazyParam(verbose=False) 167 | 168 | with h5py.File(h5file,'r') as f: 169 | dset = f['fit'] 170 | for k in dset.attrs: 171 | print(f'set param: {k} {dset.attrs[k]}') 172 | params[k] = dset.attrs[k] 173 | 174 | return params 175 | 176 | 177 | def templates_from_hdf5(h5file, igm_kwargs={}, verbose=False): 178 | """ 179 | Read list of templates 180 | 181 | Parameters 182 | ---------- 183 | h5file : str 184 | HDF5 filename 185 | 186 | igm_kwargs : dict 187 | Keyword arguments to initialize the 188 | Returns 189 | ------- 190 | templates : list 191 | List of `eazy.templates.Template` objects read from ``h5file``. 192 | 193 | """ 194 | templates = [] 195 | with h5py.File(h5file,'r') as f: 196 | NTEMP = f['templates'].attrs['NTEMP'] 197 | for i in range(NTEMP): 198 | 199 | name = f['templates'].attrs[f'TEMPL{i:03d}'] 200 | if verbose: 201 | print(f'h5: read {name}') 202 | 203 | wave = f[f'templates/wave {name}'][:] 204 | flux = f[f'templates/flux {name}'][:] 205 | redshifts = f[f'templates/z {name}'][:] 206 | 207 | templ_i = templates_code.Template( 208 | arrays=(wave, flux), 209 | name=name, 210 | redshifts=redshifts, 211 | **igm_kwargs, 212 | ) 213 | templates.append(templ_i) 214 | 215 | return templates 216 | 217 | 218 | def show_info(h5file): 219 | """ 220 | Print summary info of contents of ``h5file`` to the screen 221 | """ 222 | 223 | with h5py.File(h5file,'r') as f: 224 | def show_size(name): 225 | if hasattr(f[name], 'shape'): 226 | print(name, f[name].shape) 227 | else: 228 | comment = '-'*(len(name)+3) 229 | print(f'\n{comment}\n {name}/\n{comment}') 230 | 231 | f.visit(show_size) 232 | 233 | 234 | def get_dataset_shape(h5file, dataset): 235 | """ 236 | Return the shape of a datset in ``h5file`` 237 | """ 238 | 239 | with h5py.File(h5file,'r') as f: 240 | if dataset not in f: 241 | raise ValueError(f'Dataset {dataset} not found in {h5file}') 242 | 243 | return f[dataset].shape 244 | 245 | 246 | def get_dataset_slice(h5file, dataset, sl=None): 247 | """ 248 | Get a slice of a dataset in ``h5file``. If no ``sl`` slice provided, then 249 | return the full array 250 | """ 251 | 252 | with h5py.File(h5file,'r') as f: 253 | if dataset not in f: 254 | raise ValueError(f'Dataset {dataset} not found in {h5file}') 255 | 256 | if sl is None: 257 | return f[dataset][:] 258 | else: 259 | return f[dataset][sl] 260 | 261 | 262 | def initialize_from_hdf5(h5file='test.hdf5', verbose=True): 263 | """ 264 | Initialize a `~eazy.photoz.PhotoZ` object from HDF5 data 265 | 266 | Parameters 267 | ---------- 268 | h5file : str 269 | HDF5 filename from `eazy.hdf5.write_hdf5` 270 | 271 | Returns 272 | ------- 273 | pzobj : `~eazy.photoz.PhotoZ` 274 | Photoz object 275 | 276 | """ 277 | 278 | # Parameter dictionary 279 | param = param_from_hdf5(h5file) 280 | 281 | # Generate a catalog table from H5 data 282 | cat, trans = cat_from_hdf5(h5file) 283 | 284 | # Put catalog in CATALOG_FILE parameter 285 | param['CATALOG_FILE'] = cat 286 | 287 | with h5py.File(h5file, 'r') as f: 288 | pzobj = photoz.PhotoZ(param_file=None, translate_file=trans, 289 | zeropoint_file=None, 290 | params=param.params, load_prior=True, 291 | load_products=False, 292 | tempfilt_data=f['fit/tempfilt'][:]) 293 | 294 | pzobj.tempfilt.scale = f['fit/tempfilt_scale'][:] 295 | pzobj.chi2_fit = f['fit/chi2_fit'][:] 296 | pzobj.zp = f['cat/zp'][:] 297 | 298 | if 'templates' in f: 299 | pzobj.templates = templates_from_hdf5( 300 | h5file, 301 | igm_kwargs=param.igm_kwargs, 302 | verbose=verbose 303 | ) 304 | 305 | if 'fit/fit_coeffs' in f: 306 | pzobj.fit_coeffs = f['fit/fit_coeffs'][:] 307 | 308 | pzobj.compute_lnp(prior=f['fit/zml'].attrs['ZML_WITH_PRIOR'], 309 | beta_prior=f['fit/zml'].attrs['ZML_WITH_BETA_PRIOR']) 310 | 311 | pzobj.evaluate_zml(prior=f['fit/zml'].attrs['ZML_WITH_PRIOR'], 312 | beta_prior=f['fit/zml'].attrs['ZML_WITH_BETA_PRIOR']) 313 | 314 | if f['fit/zbest'].attrs['ZPHOT_USER']: 315 | pzobj.fit_at_zbest(zbest=f['fit/zbest'], 316 | prior=f['fit/zml'].attrs['ZML_WITH_PRIOR'], 317 | beta_prior=f['fit/zml'].attrs['ZML_WITH_BETA_PRIOR']) 318 | else: 319 | pzobj.fit_at_zbest(zbest=None, 320 | prior=f['fit/zml'].attrs['ZML_WITH_PRIOR'], 321 | beta_prior=f['fit/zml'].attrs['ZML_WITH_BETA_PRIOR']) 322 | 323 | return pzobj 324 | 325 | 326 | class Viewer(object): 327 | def __init__(self, h5file, verbose=True): 328 | """ 329 | Tool to replicate functionality of `PhotoZ.show_fit` but with 330 | data read from a stored HDF5 file rather than a "live" object 331 | """ 332 | from astropy.cosmology import LambdaCDM 333 | 334 | self.h5file = h5file 335 | 336 | self.param = param_from_hdf5(h5file) 337 | 338 | photoz.PhotoZ.set_zgrid(self) 339 | self.NZ = len(self.zgrid) 340 | 341 | self.templates = templates_from_hdf5( 342 | h5file, 343 | igm_kwargs=self.param.igm_kwargs, 344 | verbose=verbose 345 | ) 346 | self.NTEMP = len(self.templates) 347 | 348 | self.set_attrs_from_hdf5() 349 | 350 | self.set_template_error() 351 | 352 | self.cosmology = LambdaCDM(H0=self.param['H0'], 353 | Om0=self.param['OMEGA_M'], 354 | Ode0=self.param['OMEGA_L'], 355 | Tcmb0=2.725, Ob0=0.048) 356 | 357 | self.set_tempfilt() 358 | 359 | 360 | def info(self): 361 | """ 362 | Print file contents 363 | """ 364 | show_info(self.h5file) 365 | 366 | 367 | def get_catalog(self): 368 | """ 369 | Create a full catalog table from the photometry data in the 370 | HDF5 file 371 | """ 372 | cat, trans = cat_from_hdf5(self.h5file) 373 | return cat 374 | 375 | 376 | def get_table(self, names, sl=None, verbose=True, strip_slash=True, as_table=True): 377 | """ 378 | Build a table from multiple dataset names 379 | 380 | Parameters 381 | ---------- 382 | names : str 383 | Dataset names in `h5file` 384 | 385 | sl : slice-like 386 | Array slice 387 | 388 | verbose : bool 389 | Print some status info 390 | 391 | Returns 392 | ------- 393 | data : array-like 394 | Sliced dataset data 395 | 396 | """ 397 | from astropy.table import Table 398 | 399 | tab = {} 400 | for name in names: 401 | if strip_slash: 402 | key = name.split('/')[-1] 403 | else: 404 | key = name 405 | 406 | tab[key] = self.get_dataset(name, sl=sl, verbose=verbose) 407 | 408 | if as_table: 409 | tab = Table(tab) 410 | tab.meta['to_ujy'] = self.to_uJy 411 | return tab 412 | else: 413 | return tab 414 | 415 | 416 | def get_dataset(self, name, sl=None, verbose=True): 417 | """ 418 | Get a single column of data from the HDF5 file 419 | 420 | If ``name`` is in the `self.flux_columns` or `self.err_columns` lists, 421 | then compute the appropriate index of the `fit/fnu` or `fit/efnu_orig` 422 | datasets, respectively. 423 | 424 | Parameters 425 | ---------- 426 | name : str 427 | Dataset name in `h5file` 428 | 429 | sl : slice-like 430 | Array slice 431 | 432 | verbose : bool 433 | Print some status info 434 | 435 | Returns 436 | ------- 437 | data : array-like 438 | Sliced dataset data 439 | 440 | """ 441 | if name in self.flux_columns: 442 | c_i = list(self.flux_columns).index(name) 443 | if verbose: 444 | print(f'{name} = cat/fnu[:,{c_i}]') 445 | 446 | data = get_dataset_slice(self.h5file, 'cat/fnu', sl) 447 | data = np.atleast_2d(data)[:,c_i] 448 | 449 | elif name in self.err_columns: 450 | c_i = list(self.err_columns).index(name) 451 | print(f'{name} = cat/efnu_orig[:,{c_i}]') 452 | 453 | data = get_dataset_slice(self.h5file, 'cat/efnu_orig', sl) 454 | data = np.atleast_2d(data)[:,c_i] 455 | 456 | else: 457 | data = get_dataset_slice(self.h5file, name, sl) 458 | 459 | if data.size == 1: 460 | return data[0] 461 | else: 462 | return data 463 | 464 | 465 | def set_tempfilt(self): 466 | """ 467 | Generate the `eazy.photoz.TemlpateGrid` object needed for plotting 468 | SEDs and refitting at other redshifts. 469 | """ 470 | 471 | with h5py.File(self.h5file, 'r') as f: 472 | self.tempfilt = photoz.TemplateGrid(self.zgrid, self.templates, 473 | RES=self.param['FILTERS_RES'], 474 | f_numbers=self.f_numbers, 475 | add_igm=self.param['IGM_SCALE_TAU'], 476 | galactic_ebv=self.MW_EBV, 477 | Eb=self.param['SCALE_2175_BUMP'], 478 | n_proc=1, cosmology=self.cosmology, 479 | array_dtype=self.ARRAY_DTYPE, 480 | tempfilt_data=f['fit/tempfilt'][:], 481 | verbose=False) 482 | 483 | self.tempfilt.scale = f['fit/tempfilt_scale'][:] 484 | 485 | 486 | def set_template_error(self): 487 | """ 488 | Set the Template Error Function from `fit/tef_x` and `fit/tef_y` data 489 | in the HDF5 file 490 | 491 | Returns 492 | ------- 493 | Sets `TEF` attributes 494 | 495 | """ 496 | 497 | with h5py.File(self.h5file, 'r') as f: 498 | arrays = (f['fit/tef_x'][:], f['fit/tef_y'][:]) 499 | 500 | self.TEF = templates_code.TemplateError(arrays=arrays, 501 | filter_wavelengths=self.pivot, 502 | scale=self.param['TEMP_ERR_A2']) 503 | 504 | 505 | def set_attrs_from_hdf5(self): 506 | """ 507 | Set fixed attributes needed for `show_fit` 508 | """ 509 | 510 | with h5py.File(self.h5file,'r') as f: 511 | self.NOBJ, self.NFILT = f['cat/fnu'].shape 512 | self.pivot = f['cat/pivot'][:] 513 | self.OBJID = f['cat/id'][:] 514 | self.zp = f['cat/zp'][:] 515 | self.ext_corr = f['cat/ext_corr'][:] 516 | self.ext_redden = f['cat/ext_redden'][:] 517 | self.f_numbers = f['cat/f_numbers'][:] 518 | self.flux_columns = f['cat/flux_columns'].asstr()[:] 519 | self.err_columns = f['cat/err_columns'].asstr()[:] 520 | 521 | self.idx = np.arange(self.NOBJ, dtype=int) 522 | 523 | 524 | def set_attrs_for_dash(self): 525 | """ 526 | Set additional attributes needed for the Dash/Plotly tool. They're 527 | not set in `set_attrs_from_hdf5` as they may be memory-intensive for 528 | very large catalogs and aren't explicitly necessary for just 529 | `show_fit` showing SEDs. 530 | """ 531 | with h5py.File(self.h5file,'r') as f: 532 | self.RA = f['cat/ra'][:] 533 | self.DEC = f['cat/dec'][:] 534 | self.ZSPEC = f['cat/z_spec'][:] 535 | self.nusefilt = f['cat/ok_data'][:].sum(axis=1) 536 | 537 | if not hasattr(self, 'cat'): 538 | self.cat = self.get_catalog() 539 | 540 | 541 | def get_object_data(self, ix): 542 | """ 543 | Pull out data for a given array index (not id!) corresponding 544 | to a particular object 545 | 546 | Returns 547 | ------- 548 | z_i, fnu_i, efnu_i, ra_i, dec_i, chi2_i, zspec_i, ok_i 549 | 550 | """ 551 | 552 | with h5py.File(self.h5file,'r') as f: 553 | fnu_i = f['cat/fnu'][ix,:] 554 | efnu_orig = f['cat/efnu_orig'][ix,:] 555 | ra_i = f['cat/ra'][ix] 556 | dec_i = f['cat/dec'][ix] 557 | z_i = f['fit/zbest'][ix] 558 | chi2_i = f['fit/chi2_fit'][ix,:] 559 | zspec_i = f['cat/z_spec'][ix] 560 | ok_i = f['cat/ok_data'][ix,:] 561 | 562 | efnu_i = np.sqrt( 563 | efnu_orig**2 + (self.param['SYS_ERR'] * np.maximum(fnu_i, 0.))**2 564 | ) 565 | 566 | return z_i, fnu_i, efnu_i, ra_i, dec_i, chi2_i, zspec_i, ok_i 567 | 568 | 569 | def get_lnp(self, ix): 570 | """ 571 | Compute ln P(z) directly from chi2. 572 | 573 | N.B. No prior implemented yet! 574 | 575 | """ 576 | 577 | with h5py.File(self.h5file,'r') as f: 578 | chi2_i = f['fit/chi2_fit'][ix,:] 579 | 580 | return -0.5*(chi2_i - np.nanmin(chi2_i)) 581 | 582 | 583 | def show_fit(self, id, **kwargs): 584 | """ 585 | Wrapper around `eazy.photoz.PhotoZ.show_fit` 586 | """ 587 | _ = photoz.PhotoZ.show_fit(self, id, **kwargs) 588 | return _ 589 | 590 | 591 | def show_fit_plotly(self, id, **kwargs): 592 | """ 593 | Wrapper around `eazy.photoz.PhotoZ.show_fit_plotly` 594 | """ 595 | _ = photoz.PhotoZ.show_fit_plotly(self, id, **kwargs) 596 | return _ 597 | 598 | 599 | @property 600 | def to_flam(self): 601 | """ 602 | Conversion factor to :math:`10^{-19} erg/s/cm^2/Å` 603 | """ 604 | to_flam = 10**(-0.4 * (self.param['PRIOR_ABZP'] + 48.6)) 605 | to_flam *= utils.CLIGHT*1.e10/1.e-19/self.pivot**2/self.ext_corr 606 | return to_flam 607 | 608 | 609 | @property 610 | def to_uJy(self): 611 | """ 612 | Conversion of observed fluxes to `~astropy.units.microJansky` 613 | """ 614 | return 10**(-0.4*(self.param['PRIOR_ABZP']-23.9)) 615 | 616 | 617 | @property 618 | def lc(self): 619 | """ 620 | Filter pivot wavelengths (deprecated, use `pivot`) 621 | """ 622 | return self.pivot 623 | 624 | 625 | @property 626 | def ARRAY_DTYPE(self): 627 | """ 628 | Array data type from `ARRAY_NBITS` parameter 629 | """ 630 | if 'ARRAY_NBITS' in self.param.params: 631 | if self.param['ARRAY_NBITS'] == 64: 632 | ARRAY_DTYPE = np.float64 633 | else: 634 | ARRAY_DTYPE = np.float32 635 | else: 636 | ARRAY_DTYPE = np.float32 637 | 638 | return ARRAY_DTYPE 639 | 640 | 641 | @property 642 | def MW_EBV(self): 643 | """ 644 | Galactic extinction E(B-V) 645 | """ 646 | if 'MW_EBV' not in self.param.params: 647 | return 0. # 0.0354 # MACS0416 648 | else: 649 | return self.param['MW_EBV'] 650 | 651 | -------------------------------------------------------------------------------- /eazy/igm.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | # from math import pow as _pow 5 | 6 | from . import __file__ as filepath 7 | 8 | __all__ = ["Asada24", "Inoue14"] 9 | 10 | 11 | class Asada24(object): 12 | def __init__( 13 | self, 14 | sigmoid_params=(3.5918, 1.8414, 18.001), 15 | scale_tau=1.0, 16 | add_cgm=True, 17 | **kwargs, 18 | ): 19 | """ 20 | Compute IGM+CGM transmission from Asada et al. 2024, in prep. 21 | The IGM model is from Inoue+ (2014). 22 | 23 | Parameters 24 | ---------- 25 | sigmoid_params : 3-tuple of float 26 | Parameters that controll the redshift evolution of the CGM HI gas column 27 | density. The defaul values are from Asada et al. (2024). 28 | 29 | scale_tau : float 30 | Scalar multiplied to tau_igm 31 | 32 | add_cgm : bool 33 | Add the additional LyA damping absorption at z>6 as described in Asada+24. 34 | If False, the transmission will be identical to Inoue+2014 35 | 36 | .. plot:: 37 | :include-source: 38 | 39 | # Compare two IGM transmissions 40 | 41 | import numpy as np 42 | import matplotlib.pyplot as plt 43 | from eazy import igm as igm_module 44 | 45 | igm_A24 = igm_module.Asada24() 46 | igm_I14 = igm_module.Inoue14() 47 | 48 | redshifts = [6., 7., 8., 9., 10.] 49 | colors = ['b', 'c', 'purple', 'orange', 'red'] 50 | 51 | wave = np.linspace(100,2000,1901) ## wavelength array in the rest-frame 52 | lyman = wave < 2000 53 | 54 | 55 | fig = plt.figure(figsize=(6,5)) 56 | for z, c in zip(redshifts, colors): 57 | igmz_A24 = wave*0.+1 58 | igmz_A24[lyman] = igm_A24.full_IGM(z, (wave*(1+z))[lyman]) 59 | 60 | igmz_I14 = wave*0.+1 61 | igmz_I14[lyman] = igm_I14.full_IGM(z, (wave*(1+z))[lyman]) 62 | 63 | plt.plot(wave*(1+z), igmz_I14, color=c, ls='dashed') 64 | plt.plot(wave*(1+z), igmz_A24, color=c, label=r'$z={}$'.format(int(z))) 65 | 66 | plt.xlabel('Observed wavelength [A]') 67 | plt.ylabel('Transmission') 68 | 69 | plt.legend() 70 | 71 | plt.xlim(5000,17000) 72 | """ 73 | self._load_data() 74 | self.sigmoid_params = sigmoid_params 75 | self.scale_tau = scale_tau 76 | self.add_cgm = add_cgm 77 | 78 | def __repr__(self): 79 | attrs = ["sigmoid_params", "add_cgm", "scale_tau"] 80 | return ( 81 | "(" 82 | + ", ".join([f"{k}={self.__dict__[k]}" for k in attrs]) 83 | + ")" 84 | ) 85 | 86 | @property 87 | def max_fuv_wave(self): 88 | """ 89 | Maximum FUV wavelength (Angstroms) where IGM model will have an effect 90 | """ 91 | if self.add_cgm: 92 | return 2000.0 93 | else: 94 | return 1300.0 95 | 96 | def tau_cgm(self, N_HI, lam, z): 97 | """ 98 | CGM optical depth given by Totani+06, eqn (1) 99 | 100 | Parameters 101 | ---------- 102 | N_HI : float 103 | HI column density [cm-2] 104 | 105 | lam : 1D array 106 | wavelength array in the observed frame [AA] 107 | 108 | z : float 109 | Redshift of the source 110 | 111 | Returns 112 | ------- 113 | 1D array of tau_CGM 114 | """ 115 | 116 | lam_rest = lam / (1 + z) 117 | nu_rest = 3e18 / lam_rest 118 | 119 | tau = np.zeros(len(lam)) 120 | 121 | for i, nu in enumerate(nu_rest): 122 | tau[i] = N_HI * sigma_a(nu) 123 | 124 | return tau 125 | 126 | def lgNHI_z(self, z): 127 | """ 128 | HI column density as a function of redshift, calibrated in Asada+ 2024. 129 | Only valid at z>=6 130 | 131 | Parameters 132 | ---------- 133 | z : float 134 | Redshift of the source 135 | 136 | Returns 137 | ------- 138 | log10(HI column density [cm-2]) 139 | """ 140 | 141 | lgN_HI = sigmoid( 142 | z, self.sigmoid_params[0], self.sigmoid_params[1], self.sigmoid_params[2] 143 | ) 144 | 145 | return lgN_HI 146 | 147 | ### IGM part -- identical to Inoue+14 148 | def _load_data(self): 149 | path = os.path.join(os.path.dirname(filepath), "data") 150 | # print path 151 | 152 | LAF_file = os.path.join(path, "LAFcoeff.txt") 153 | DLA_file = os.path.join(path, "DLAcoeff.txt") 154 | 155 | data = np.loadtxt(LAF_file, unpack=True) 156 | ix, lam, ALAF1, ALAF2, ALAF3 = data 157 | self.lam = lam[:, np.newaxis] 158 | self.ALAF1 = ALAF1[:, np.newaxis] 159 | self.ALAF2 = ALAF2[:, np.newaxis] 160 | self.ALAF3 = ALAF3[:, np.newaxis] 161 | 162 | data = np.loadtxt(DLA_file, unpack=True) 163 | ix, lam, ADLA1, ADLA2 = data 164 | self.ADLA1 = ADLA1[:, np.newaxis] 165 | self.ADLA2 = ADLA2[:, np.newaxis] 166 | 167 | return True 168 | 169 | @property 170 | def NA(self): 171 | """ 172 | Number of Lyman-series lines 173 | """ 174 | return self.lam.shape[0] 175 | 176 | def tLSLAF(self, zS, lobs): 177 | """ 178 | Lyman series, Lyman-alpha forest 179 | """ 180 | z1LAF = 1.2 181 | z2LAF = 4.7 182 | 183 | l2 = self.lam # [:, np.newaxis] 184 | tLSLAF_value = np.zeros_like(lobs * l2).T 185 | 186 | x0 = lobs < l2 * (1 + zS) 187 | x1 = x0 & (lobs < l2 * (1 + z1LAF)) 188 | x2 = x0 & ((lobs >= l2 * (1 + z1LAF)) & (lobs < l2 * (1 + z2LAF))) 189 | x3 = x0 & (lobs >= l2 * (1 + z2LAF)) 190 | 191 | tLSLAF_value = np.zeros_like(lobs * l2) 192 | tLSLAF_value[x1] += ((self.ALAF1 / l2**1.2) * lobs**1.2)[x1] 193 | tLSLAF_value[x2] += ((self.ALAF2 / l2**3.7) * lobs**3.7)[x2] 194 | tLSLAF_value[x3] += ((self.ALAF3 / l2**5.5) * lobs**5.5)[x3] 195 | 196 | return tLSLAF_value.sum(axis=0) 197 | 198 | def tLSDLA(self, zS, lobs): 199 | """ 200 | Lyman Series, DLA 201 | """ 202 | z1DLA = 2.0 203 | 204 | l2 = self.lam # [:, np.newaxis] 205 | tLSDLA_value = np.zeros_like(lobs * l2) 206 | 207 | x0 = (lobs < l2 * (1 + zS)) & (lobs < l2 * (1.0 + z1DLA)) 208 | x1 = (lobs < l2 * (1 + zS)) & ~(lobs < l2 * (1.0 + z1DLA)) 209 | 210 | tLSDLA_value[x0] += ((self.ADLA1 / l2**2) * lobs**2)[x0] 211 | tLSDLA_value[x1] += ((self.ADLA2 / l2**3) * lobs**3)[x1] 212 | 213 | return tLSDLA_value.sum(axis=0) 214 | 215 | def tLCDLA(self, zS, lobs): 216 | """ 217 | Lyman continuum, DLA 218 | """ 219 | z1DLA = 2.0 220 | lamL = 911.8 221 | 222 | tLCDLA_value = np.zeros_like(lobs) 223 | 224 | x0 = lobs < lamL * (1.0 + zS) 225 | if zS < z1DLA: 226 | tLCDLA_value[x0] = ( 227 | 0.2113 * (1.0 + zS) ** 2.0 228 | - 0.07661 * (1.0 + zS) ** 2.3 * (lobs[x0] / lamL) ** (-3e-1) 229 | - 0.1347 * (lobs[x0] / lamL) ** 2.0 230 | ) 231 | else: 232 | x1 = lobs >= lamL * (1.0 + z1DLA) 233 | 234 | tLCDLA_value[x0 & x1] = ( 235 | 0.04696 * (1.0 + zS) ** 3.0 236 | - 0.01779 * (1.0 + zS) ** 3.3 * (lobs[x0 & x1] / lamL) ** (-3e-1) 237 | - 0.02916 * (lobs[x0 & x1] / lamL) ** 3.0 238 | ) 239 | tLCDLA_value[x0 & ~x1] = ( 240 | 0.6340 241 | + 0.04696 * (1.0 + zS) ** 3.0 242 | - 0.01779 * (1.0 + zS) ** 3.3 * (lobs[x0 & ~x1] / lamL) ** (-3e-1) 243 | - 0.1347 * (lobs[x0 & ~x1] / lamL) ** 2.0 244 | - 0.2905 * (lobs[x0 & ~x1] / lamL) ** (-3e-1) 245 | ) 246 | 247 | return tLCDLA_value 248 | 249 | def tLCLAF(self, zS, lobs): 250 | """ 251 | Lyman continuum, LAF 252 | """ 253 | z1LAF = 1.2 254 | z2LAF = 4.7 255 | lamL = 911.8 256 | 257 | tLCLAF_value = np.zeros_like(lobs) 258 | 259 | x0 = lobs < lamL * (1.0 + zS) 260 | 261 | if zS < z1LAF: 262 | tLCLAF_value[x0] = 0.3248 * ( 263 | (lobs[x0] / lamL) ** 1.2 264 | - (1.0 + zS) ** (-9e-1) * (lobs[x0] / lamL) ** 2.1 265 | ) 266 | elif zS < z2LAF: 267 | x1 = lobs >= lamL * (1 + z1LAF) 268 | tLCLAF_value[x0 & x1] = 2.545e-2 * ( 269 | (1.0 + zS) ** 1.6 * (lobs[x0 & x1] / lamL) ** 2.1 270 | - (lobs[x0 & x1] / lamL) ** 3.7 271 | ) 272 | tLCLAF_value[x0 & ~x1] = ( 273 | 2.545e-2 * (1.0 + zS) ** 1.6 * (lobs[x0 & ~x1] / lamL) ** 2.1 274 | + 0.3248 * (lobs[x0 & ~x1] / lamL) ** 1.2 275 | - 0.2496 * (lobs[x0 & ~x1] / lamL) ** 2.1 276 | ) 277 | else: 278 | x1 = lobs > lamL * (1.0 + z2LAF) 279 | x2 = (lobs >= lamL * (1.0 + z1LAF)) & (lobs < lamL * (1.0 + z2LAF)) 280 | x3 = lobs < lamL * (1.0 + z1LAF) 281 | 282 | tLCLAF_value[x0 & x1] = 5.221e-4 * ( 283 | (1.0 + zS) ** 3.4 * (lobs[x0 & x1] / lamL) ** 2.1 284 | - (lobs[x0 & x1] / lamL) ** 5.5 285 | ) 286 | tLCLAF_value[x0 & x2] = ( 287 | 5.221e-4 * (1.0 + zS) ** 3.4 * (lobs[x0 & x2] / lamL) ** 2.1 288 | + 0.2182 * (lobs[x0 & x2] / lamL) ** 2.1 289 | - 2.545e-2 * (lobs[x0 & x2] / lamL) ** 3.7 290 | ) 291 | tLCLAF_value[x0 & x3] = ( 292 | 5.221e-4 * (1.0 + zS) ** 3.4 * (lobs[x0 & x3] / lamL) ** 2.1 293 | + 0.3248 * (lobs[x0 & x3] / lamL) ** 1.2 294 | - 3.140e-2 * (lobs[x0 & x3] / lamL) ** 2.1 295 | ) 296 | 297 | return tLCLAF_value 298 | 299 | def full_IGM(self, z, lobs): 300 | """Get full IGM+CGM absorption 301 | 302 | Parameters 303 | ---------- 304 | z : float 305 | Redshift to evaluate IGM absorption 306 | 307 | lobs : array-like 308 | Observed-frame wavelength(s) in Angstroms. 309 | 310 | Returns 311 | ------- 312 | abs : array-like 313 | IGM+CGM transmission factor 314 | 315 | """ 316 | 317 | tau_LS = self.tLSLAF(z, lobs) + self.tLSDLA(z, lobs) 318 | tau_LC = self.tLCLAF(z, lobs) + self.tLCDLA(z, lobs) 319 | tau_clip = 0.0 320 | 321 | igmz = np.exp(-self.scale_tau * (tau_LC + tau_LS + tau_clip)) 322 | 323 | if self.add_cgm: 324 | if z < 6: 325 | tau_C = 0.0 326 | else: 327 | NHI = 10 ** (self.lgNHI_z(z)) 328 | tau_C = self.tau_cgm(NHI, lobs, z) 329 | 330 | cgmz = np.exp(-tau_C) 331 | else: 332 | cgmz = 1.0 333 | 334 | return igmz * cgmz 335 | 336 | 337 | class Inoue14(object): 338 | """ 339 | IGM absorption from Inoue et al. (2014) 340 | """ 341 | 342 | max_fuv_wave = 1300.0 343 | 344 | def __init__(self, scale_tau=1.0, **kwargs): 345 | """ 346 | IGM absorption from Inoue et al. (2014) 347 | 348 | Parameters 349 | ---------- 350 | scale_tau : float 351 | Parameter multiplied to the IGM :math:`\tau` values (exponential 352 | in the linear absorption fraction). 353 | I.e., :math:`f_\mathrm{igm} = e^{-\mathrm{scale\_tau} \tau}`. 354 | 355 | Attributes 356 | ---------- 357 | max_fuv_wave : float 358 | Maximum FUV wavelength (Angstroms) where IGM model will have an effect 359 | """ 360 | self._load_data() 361 | self.scale_tau = scale_tau 362 | 363 | def __repr__(self): 364 | attrs = ["scale_tau"] 365 | return ( 366 | "(" 367 | + ", ".join([f"{k}={self.__dict__[k]}" for k in attrs]) 368 | + ")" 369 | ) 370 | 371 | def _load_data(self): 372 | path = os.path.join(os.path.dirname(filepath), "data") 373 | # print path 374 | 375 | LAF_file = os.path.join(path, "LAFcoeff.txt") 376 | DLA_file = os.path.join(path, "DLAcoeff.txt") 377 | 378 | data = np.loadtxt(LAF_file, unpack=True) 379 | ix, lam, ALAF1, ALAF2, ALAF3 = data 380 | self.lam = lam[:, np.newaxis] 381 | self.ALAF1 = ALAF1[:, np.newaxis] 382 | self.ALAF2 = ALAF2[:, np.newaxis] 383 | self.ALAF3 = ALAF3[:, np.newaxis] 384 | 385 | data = np.loadtxt(DLA_file, unpack=True) 386 | ix, lam, ADLA1, ADLA2 = data 387 | self.ADLA1 = ADLA1[:, np.newaxis] 388 | self.ADLA2 = ADLA2[:, np.newaxis] 389 | 390 | return True 391 | 392 | @property 393 | def NA(self): 394 | """ 395 | Number of Lyman-series lines 396 | """ 397 | return self.lam.shape[0] 398 | 399 | def tLSLAF(self, zS, lobs): 400 | """ 401 | Lyman series, Lyman-alpha forest 402 | """ 403 | z1LAF = 1.2 404 | z2LAF = 4.7 405 | 406 | l2 = self.lam # [:, np.newaxis] 407 | tLSLAF_value = np.zeros_like(lobs * l2).T 408 | 409 | x0 = lobs < l2 * (1 + zS) 410 | x1 = x0 & (lobs < l2 * (1 + z1LAF)) 411 | x2 = x0 & ((lobs >= l2 * (1 + z1LAF)) & (lobs < l2 * (1 + z2LAF))) 412 | x3 = x0 & (lobs >= l2 * (1 + z2LAF)) 413 | 414 | tLSLAF_value = np.zeros_like(lobs * l2) 415 | tLSLAF_value[x1] += ((self.ALAF1 / l2**1.2) * lobs**1.2)[x1] 416 | tLSLAF_value[x2] += ((self.ALAF2 / l2**3.7) * lobs**3.7)[x2] 417 | tLSLAF_value[x3] += ((self.ALAF3 / l2**5.5) * lobs**5.5)[x3] 418 | 419 | return tLSLAF_value.sum(axis=0) 420 | 421 | def tLSDLA(self, zS, lobs): 422 | """ 423 | Lyman Series, DLA 424 | """ 425 | z1DLA = 2.0 426 | 427 | l2 = self.lam # [:, np.newaxis] 428 | tLSDLA_value = np.zeros_like(lobs * l2) 429 | 430 | x0 = (lobs < l2 * (1 + zS)) & (lobs < l2 * (1.0 + z1DLA)) 431 | x1 = (lobs < l2 * (1 + zS)) & ~(lobs < l2 * (1.0 + z1DLA)) 432 | 433 | tLSDLA_value[x0] += ((self.ADLA1 / l2**2) * lobs**2)[x0] 434 | tLSDLA_value[x1] += ((self.ADLA2 / l2**3) * lobs**3)[x1] 435 | 436 | return tLSDLA_value.sum(axis=0) 437 | 438 | def tLCDLA(self, zS, lobs): 439 | """ 440 | Lyman continuum, DLA 441 | """ 442 | z1DLA = 2.0 443 | lamL = 911.8 444 | 445 | tLCDLA_value = np.zeros_like(lobs) 446 | 447 | x0 = lobs < lamL * (1.0 + zS) 448 | if zS < z1DLA: 449 | tLCDLA_value[x0] = ( 450 | 0.2113 * (1.0 + zS) ** 2.0 451 | - 0.07661 * (1.0 + zS) ** 2.3 * (lobs[x0] / lamL) ** (-3e-1) 452 | - 0.1347 * (lobs[x0] / lamL) ** 2.0 453 | ) 454 | else: 455 | x1 = lobs >= lamL * (1.0 + z1DLA) 456 | 457 | tLCDLA_value[x0 & x1] = ( 458 | 0.04696 * (1.0 + zS) ** 3.0 459 | - 0.01779 * (1.0 + zS) ** 3.3 * (lobs[x0 & x1] / lamL) ** (-3e-1) 460 | - 0.02916 * (lobs[x0 & x1] / lamL) ** 3.0 461 | ) 462 | tLCDLA_value[x0 & ~x1] = ( 463 | 0.6340 464 | + 0.04696 * (1.0 + zS) ** 3.0 465 | - 0.01779 * (1.0 + zS) ** 3.3 * (lobs[x0 & ~x1] / lamL) ** (-3e-1) 466 | - 0.1347 * (lobs[x0 & ~x1] / lamL) ** 2.0 467 | - 0.2905 * (lobs[x0 & ~x1] / lamL) ** (-3e-1) 468 | ) 469 | 470 | return tLCDLA_value 471 | 472 | def tLCLAF(self, zS, lobs): 473 | """ 474 | Lyman continuum, LAF 475 | """ 476 | z1LAF = 1.2 477 | z2LAF = 4.7 478 | lamL = 911.8 479 | 480 | tLCLAF_value = np.zeros_like(lobs) 481 | 482 | x0 = lobs < lamL * (1.0 + zS) 483 | 484 | if zS < z1LAF: 485 | tLCLAF_value[x0] = 0.3248 * ( 486 | (lobs[x0] / lamL) ** 1.2 487 | - (1.0 + zS) ** (-9e-1) * (lobs[x0] / lamL) ** 2.1 488 | ) 489 | elif zS < z2LAF: 490 | x1 = lobs >= lamL * (1 + z1LAF) 491 | tLCLAF_value[x0 & x1] = 2.545e-2 * ( 492 | (1.0 + zS) ** 1.6 * (lobs[x0 & x1] / lamL) ** 2.1 493 | - (lobs[x0 & x1] / lamL) ** 3.7 494 | ) 495 | tLCLAF_value[x0 & ~x1] = ( 496 | 2.545e-2 * (1.0 + zS) ** 1.6 * (lobs[x0 & ~x1] / lamL) ** 2.1 497 | + 0.3248 * (lobs[x0 & ~x1] / lamL) ** 1.2 498 | - 0.2496 * (lobs[x0 & ~x1] / lamL) ** 2.1 499 | ) 500 | else: 501 | x1 = lobs > lamL * (1.0 + z2LAF) 502 | x2 = (lobs >= lamL * (1.0 + z1LAF)) & (lobs < lamL * (1.0 + z2LAF)) 503 | x3 = lobs < lamL * (1.0 + z1LAF) 504 | 505 | tLCLAF_value[x0 & x1] = 5.221e-4 * ( 506 | (1.0 + zS) ** 3.4 * (lobs[x0 & x1] / lamL) ** 2.1 507 | - (lobs[x0 & x1] / lamL) ** 5.5 508 | ) 509 | tLCLAF_value[x0 & x2] = ( 510 | 5.221e-4 * (1.0 + zS) ** 3.4 * (lobs[x0 & x2] / lamL) ** 2.1 511 | + 0.2182 * (lobs[x0 & x2] / lamL) ** 2.1 512 | - 2.545e-2 * (lobs[x0 & x2] / lamL) ** 3.7 513 | ) 514 | tLCLAF_value[x0 & x3] = ( 515 | 5.221e-4 * (1.0 + zS) ** 3.4 * (lobs[x0 & x3] / lamL) ** 2.1 516 | + 0.3248 * (lobs[x0 & x3] / lamL) ** 1.2 517 | - 3.140e-2 * (lobs[x0 & x3] / lamL) ** 2.1 518 | ) 519 | 520 | return tLCLAF_value 521 | 522 | def full_IGM(self, z, lobs): 523 | """Get full Inoue IGM absorption 524 | 525 | Parameters 526 | ---------- 527 | z : float 528 | Redshift to evaluate IGM absorption 529 | 530 | lobs : array 531 | Observed-frame wavelength(s) in Angstroms. 532 | 533 | Returns 534 | ------- 535 | abs : array 536 | IGM absorption 537 | 538 | """ 539 | tau_LS = self.tLSLAF(z, lobs) + self.tLSDLA(z, lobs) 540 | tau_LC = self.tLCLAF(z, lobs) + self.tLCDLA(z, lobs) 541 | 542 | ### Upturn at short wavelengths, low-z 543 | # k = 1./100 544 | # l0 = 600-6/k 545 | # clip = lobs/(1+z) < 600. 546 | # tau_clip = 100*(1-1./(1+np.exp(-k*(lobs/(1+z)-l0)))) 547 | tau_clip = 0.0 548 | 549 | return np.exp(-self.scale_tau * (tau_LC + tau_LS + tau_clip)) 550 | 551 | def build_grid(self, zgrid, lrest): 552 | """Build a spline interpolation object for fast IGM models 553 | 554 | Returns: self.interpolate 555 | """ 556 | 557 | from scipy.interpolate import CubicSpline 558 | 559 | igm_grid = np.zeros((len(zgrid), len(lrest))) 560 | for iz in range(len(zgrid)): 561 | igm_grid[iz, :] = self.full_IGM(zgrid[iz], lrest * (1 + zgrid[iz])) 562 | 563 | self.interpolate = CubicSpline(zgrid, igm_grid) 564 | 565 | 566 | def sigmoid(x, A, a, c): 567 | """ 568 | Sigmoid function centered at x=6 569 | """ 570 | 571 | return A / (1 + np.exp(-a * (x - 6))) + c 572 | 573 | 574 | def sigma_a(nu_rest): 575 | """ 576 | Lyα absorption cross section for the restframe frequency \nu given by Totani+06, eqn (1) 577 | for CGM damping wing calculation 578 | 579 | Parameters 580 | ---------- 581 | nu : float 582 | rest-frame frequency [Hz] 583 | 584 | Returns 585 | ------- 586 | Lyα absorption cross section at the restframe frequency \nu_rest [cm2] 587 | """ 588 | 589 | Lam_a = 6.255486e8 ## Hz 590 | nu_lya = 2.46607e15 ## Hz 591 | 592 | C = 6.9029528e22 ## Constant factor [Ang2/s2] 593 | 594 | sig = ( 595 | C 596 | * (nu_rest / nu_lya) ** 4 597 | / ( 598 | 4 * np.pi**2 * (nu_rest - nu_lya) ** 2 599 | + Lam_a**2 * (nu_rest / nu_lya) ** 6 / 4 600 | ) 601 | ) 602 | 603 | s = sig * 1e-16 ## convert AA-2 to cm-2 604 | 605 | return s 606 | -------------------------------------------------------------------------------- /eazy/param.py: -------------------------------------------------------------------------------- 1 | import os 2 | import collections 3 | import numpy as np 4 | 5 | from . import utils 6 | 7 | __all__ = ["EazyParam", "TranslateFile", "read_param_file"] 8 | 9 | 10 | def read_param_file(param_file=None, verbose=True): 11 | """ 12 | Load a param file and add default parameters if any missing 13 | """ 14 | param = EazyParam(param_file, verbose=True) 15 | if param_file is not None: 16 | # Read defaults 17 | defaults = EazyParam(None, verbose=False) 18 | for k in defaults.param_names: 19 | if k not in param.param_names: 20 | param[k] = defaults[k] 21 | if verbose: 22 | print(f"Parameter default: {k} = {defaults[k]}") 23 | 24 | return param 25 | 26 | 27 | class EazyParam(object): 28 | def __init__(self, PARAM_FILE=None, verbose=True): 29 | """ 30 | Read an Eazy zphot.param file. 31 | 32 | Example: 33 | 34 | >>> if os.path.exists('zphot.param'): 35 | ... params = EazyParam(PARAM_FILE='zphot.param') 36 | ... print(params['Z_STEP']) 37 | 38 | Defaults are in `eazy/data/zphot.param.default `_ 39 | 40 | Parameters 41 | ---------- 42 | param_file : str 43 | Name of parameter file. If None, then will get 44 | `data/zphot.param.default` from within the module. 45 | 46 | Attributes 47 | ---------- 48 | params : `collections.OrderedDict` 49 | Parameter dictionary. Don't modify this directly but rather use 50 | `__getitem__` and `__setitem__` methods. 51 | 52 | param_names 53 | 54 | formats : list 55 | List indicating if parameters are interpreted as string ('s') or 56 | scalar ('f') values. 57 | 58 | """ 59 | if PARAM_FILE is None: 60 | PARAM_FILE = os.path.join( 61 | os.path.dirname(__file__), "data/zphot.param.default" 62 | ) 63 | 64 | if verbose: 65 | print("Read default param file: " + PARAM_FILE) 66 | 67 | self.filename = PARAM_FILE 68 | self.param_path = os.path.dirname(PARAM_FILE) 69 | 70 | f = open(PARAM_FILE, "r") 71 | self.lines = f.readlines() 72 | f.close() 73 | 74 | self.params = collections.OrderedDict() 75 | self.formats = collections.OrderedDict() 76 | 77 | self._process_params() 78 | 79 | @property 80 | def param_names(self): 81 | """ 82 | Keywords of the `params` dictionary 83 | """ 84 | return list(self.params.keys()) 85 | 86 | def _process_params(self): 87 | """ 88 | Process parameter dictionary 89 | """ 90 | params = collections.OrderedDict() 91 | formats = collections.OrderedDict() 92 | 93 | # self.param_names = [] 94 | for line in self.lines: 95 | if not line.strip().startswith("#"): 96 | lsplit = line.split() 97 | if lsplit.__len__() >= 2: 98 | params[lsplit[0]] = lsplit[1] 99 | # self.param_names.append(lsplit[0]) 100 | try: 101 | flt = float(lsplit[1]) 102 | formats[lsplit[0]] = "f" 103 | params[lsplit[0]] = flt 104 | except: 105 | formats[lsplit[0]] = "s" 106 | 107 | self.params = params 108 | self.formats = formats 109 | 110 | @property 111 | def to_mJy(self): 112 | """ 113 | Return catalog conversion factor to mJy based on ``PRIOR_ABZP``. 114 | """ 115 | return 10 ** (-0.4 * (self.params["PRIOR_ABZP"] - 23.9)) / 1000.0 116 | 117 | def write(self, file=None): 118 | """ 119 | Write to an ascii file 120 | """ 121 | if file == None: 122 | print("No output file specified...") 123 | else: 124 | fp = open(file, "w") 125 | for param in self.param_names: 126 | fp.write("{0:25s} {1}\n".format(param, self.params[param])) 127 | 128 | fp.close() 129 | 130 | def __getitem__(self, param_name): 131 | """ 132 | Get item from ``params`` dict and return None if parameter not found. 133 | """ 134 | if param_name.upper() not in self.param_names: 135 | print(f"Parameter {param_name} not found. Check `param_names` attribute.") 136 | return None 137 | else: 138 | return self.params[param_name.upper()] 139 | 140 | def __setitem__(self, param_name, value): 141 | """ 142 | Set item in ``params`` dict. 143 | """ 144 | self.params[param_name.upper()] = value 145 | 146 | def verify_params(self): 147 | """ 148 | Some checks on the parameters 149 | """ 150 | 151 | assert self["Z_MAX"] > self["Z_MIN"] 152 | 153 | test_paths = ["./", utils.DATA_PATH, os.path.join(utils.DATA_PATH, "filters")] 154 | 155 | for k in ["TEMPLATES_FILE", "TEMP_ERR_FILE", "CATALOG_FILE", "FILTERS_RES"]: 156 | if isinstance(self[k], str): 157 | filename = self[k] 158 | file_found = False 159 | for path in test_paths: 160 | if os.path.exists(os.path.join(path, filename)): 161 | file_found = True 162 | break 163 | 164 | if not file_found: 165 | raise FileNotFoundError( 166 | f"{k} ({self[k]}) not found in {test_paths}" 167 | ) 168 | 169 | assert int(self["ARRAY_NBITS"]) in [32, 64] 170 | 171 | # Positive 172 | for k in [ 173 | "TEMP_ERR_A2", 174 | "SYS_ERR", 175 | "IGM_SCALE_TAU", 176 | "MW_EBV", 177 | "OMEGA_M", 178 | "OMEGA_L", 179 | ]: 180 | if self[k] < 0: 181 | raise ValueError(f"{k} ({self[k]}) must be >= 0") 182 | 183 | # Positive nonzero 184 | for k in ["Z_STEP", "H0", "RF_PADDING"]: 185 | if self[k] < 0: 186 | raise ValueError(f"{k} ({self[k]}) must be > 0") 187 | 188 | @property 189 | def kwargs(self): 190 | """ 191 | Dictionary with lower-case parameter names for passing as ``**kwargs`` 192 | """ 193 | kws = collections.OrderedDict() 194 | for k in self.param_names: 195 | kws[k.lower()] = self.params[k] 196 | 197 | return kws 198 | 199 | @property 200 | def igm_kwargs(self): 201 | """ 202 | Keywords for initializing the IGM attenuation model 203 | """ 204 | igm_kwargs = { 205 | "scale_tau": 1.0, 206 | "add_cgm": self.__getitem__("ADD_CGM") in utils.TRUE_VALUES, 207 | } 208 | 209 | if "IGM_SCALE_TAU" in self.params: 210 | igm_kwargs["scale_tau"] = self.params["IGM_SCALE_TAU"] 211 | 212 | igm_kwargs["sigmoid_params"] = ( 213 | self.params["SIGMOID_PARAM1"], 214 | self.params["SIGMOID_PARAM2"], 215 | self.params["SIGMOID_PARAM3"], 216 | ) 217 | 218 | return igm_kwargs 219 | 220 | 221 | class TranslateFile: 222 | def __init__(self, file="zphot.translate"): 223 | """ 224 | File for translating catalog columns to associate bandbasses to them 225 | 226 | The `file` has format 227 | 228 | .. code-block:: 229 | 230 | flux_irac_ch1 F18 231 | err_irac_ch1 E18 232 | ... 233 | 234 | or a CSV table with format 235 | 236 | .. code-block:: 237 | 238 | column, trans, error 239 | flux_irac_ch1, F18 240 | err_irac_ch1, E18, 1.0 241 | ... 242 | 243 | where `flux_irac_ch1` is a column in the catalog table corresponding 244 | to the IRAC 3.6 µm flux density. ``F18`` indicates that this is a 245 | *flux density* column and is associated with filter number 18 in the 246 | `~eazy.params.filters.FilterFile`. 247 | 248 | ``E18`` indicates an uncertainty column, and filters must have both 249 | flux density and uncertainty columns to be considered. 250 | 251 | The original catalog could have had column names ``F18`` and ``E18`` 252 | and not needed a translate file but it is generally preferable to have 253 | more descriptive column names that aren't necessarily tied to a 254 | particular `eazy` filter file. 255 | 256 | Note, similarly, that columns like `F{N}` and `E{N}` are treated as 257 | these types of flux and uncertainty columns. If they correspond to 258 | something else, they should be "translated" to avoid confusion 259 | 260 | """ 261 | from astropy.table import Table 262 | 263 | self.file = file 264 | self.trans = collections.OrderedDict() 265 | self.error = collections.OrderedDict() 266 | 267 | if hasattr(file, "colnames"): 268 | tr = file 269 | self.file = "input_table.translate" 270 | 271 | if "error" not in tr.colnames: 272 | tr["error"] = 1.0 273 | 274 | if tr.colnames != ["column", "trans", "error"]: 275 | msg = f"table translate_file file must have columns" 276 | msg += f" 'column', 'trans' [, 'error']. The file {file}" 277 | msg += f" has columns {tr.colnames}." 278 | raise ValueError(msg) 279 | 280 | for i, k in enumerate(tr["column"]): 281 | self.trans[k] = tr["trans"][i] 282 | self.error[k] = tr["error"][i] 283 | 284 | elif file.endswith("csv"): 285 | tr = Table.read(file) 286 | 287 | if "error" not in tr.colnames: 288 | tr["error"] = 1.0 289 | 290 | if tr.colnames != ["column", "trans", "error"]: 291 | msg = f"csv translate_file file must have columns" 292 | msg += f" 'column', 'trans' [, 'error']. The file {file}" 293 | msg += f" has columns {tr.colnames}." 294 | raise ValueError(msg) 295 | 296 | for i, k in enumerate(tr["column"]): 297 | self.trans[k] = tr["trans"][i] 298 | self.error[k] = tr["error"][i] 299 | 300 | else: 301 | lines = open(file).readlines() 302 | 303 | for line in lines: 304 | spl = line.split() 305 | if (line.strip() == "") | (len(spl) < 2): 306 | continue 307 | 308 | key = spl[0] 309 | self.trans[key] = spl[1] 310 | if len(spl) == 3: 311 | self.error[key] = float(spl[2]) 312 | else: 313 | self.error[key] = 1.0 314 | 315 | def change_error(self, filter=88, value=1.0e8): 316 | """ 317 | Modify uncertainties based on error scaling factors in translate file 318 | """ 319 | if isinstance(filter, str): 320 | if "f_" in filter: 321 | err_filt = filter.replace("f_", "e_") 322 | else: 323 | err_filt = "e" + filter 324 | 325 | if err_filt in self.error: 326 | self.error[err_filt] = value 327 | return True 328 | 329 | if isinstance(filter, int): 330 | for key in self.trans.keys(): 331 | if self.trans[key] == "E{0:0d}".format(filter): 332 | self.error[key] = value 333 | return True 334 | 335 | print("Filter {0} not found in list.".format(str(filter))) 336 | 337 | def write(self, file=None, show_ones=False): 338 | """ 339 | Write to an ascii file 340 | """ 341 | lines = [] 342 | for key in self.error: 343 | line = "{0} {1}".format(key, self.trans[key]) 344 | if self.trans[key].startswith("E") & ((self.error[key] != 1.0) | show_ones): 345 | line += " {0:.1f}".format(self.error[key]) 346 | 347 | lines.append(line + "\n") 348 | 349 | if file is None: 350 | file = self.file 351 | 352 | if file: 353 | fp = open(file, "w") 354 | fp.writelines(lines) 355 | fp.close() 356 | else: 357 | for line in lines: 358 | print(line[:-1]) 359 | 360 | def to_csv(self): 361 | """ 362 | Generate CSV string 363 | """ 364 | rows = "column,trans,error\n" 365 | for k in self.error: 366 | rows += f"{k},{self.trans[k]},{self.error[k]}\n" 367 | return rows 368 | -------------------------------------------------------------------------------- /eazy/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gbrammer/eazy-py/292172c331b67f34358e9ff2500fe09a82757635/eazy/tests/__init__.py -------------------------------------------------------------------------------- /eazy/tests/test_filters.py: -------------------------------------------------------------------------------- 1 | import os 2 | import warnings 3 | 4 | import numpy as np 5 | 6 | from astropy.utils.exceptions import AstropyWarning 7 | 8 | from .. import filters, fetch_eazy_photoz, utils 9 | 10 | FILTER_RES = None 11 | 12 | def test_data_path(): 13 | """ 14 | Data path, download data files if needed 15 | """ 16 | from .. import filters, fetch_eazy_photoz, utils 17 | # assert os.path.exists(utils.DATA_PATH) 18 | 19 | if not os.path.exists( 20 | os.path.join(utils.DATA_PATH, "filters", "FILTER.RES.latest") 21 | ): 22 | fetch_eazy_photoz() 23 | 24 | 25 | def test_array_filter(): 26 | """ 27 | Generate filter from arrays 28 | """ 29 | wx = np.arange(5400, 5600.0, 1) 30 | wy = wx * 0.0 31 | wy[10:-10] = 1 32 | 33 | f1 = filters.FilterDefinition(wave=wx, throughput=wy, name="Tophat 5500") 34 | 35 | assert np.allclose(f1.pivot, 5500, rtol=1.0e-3) 36 | assert np.allclose(f1.ABVega, 0.016, atol=0.03) 37 | assert np.allclose(f1.equivwidth, 180) 38 | assert np.allclose(f1.rectwidth, 180) 39 | 40 | 41 | def test_pysynphot_filter(): 42 | """ 43 | PySynphot filter bandpass 44 | """ 45 | try: 46 | import pysynphot as S 47 | except: 48 | return None 49 | 50 | with warnings.catch_warnings(): 51 | warnings.simplefilter("ignore", AstropyWarning) 52 | v_pysyn = S.ObsBandpass("v") 53 | 54 | v_eazy = filters.FilterDefinition(bp=v_pysyn) 55 | 56 | assert np.allclose(v_pysyn.pivot(), v_eazy.pivot, rtol=0.001) 57 | 58 | 59 | # def test_data_path(): 60 | # """ 61 | # Data path, download data files if needed 62 | # """ 63 | # assert os.path.exists(utils.DATA_PATH) 64 | # 65 | # if not os.path.exists( 66 | # os.path.join(utils.DATA_PATH, "filters", "FILTER.RES.latest") 67 | # ): 68 | # fetch_eazy_photoz() 69 | 70 | 71 | def test_read_filter_res(): 72 | """ 73 | Read FILTER.RES 74 | """ 75 | global FILTER_RES 76 | 77 | filter_file = os.path.join(utils.DATA_PATH, "filters/FILTER.RES.latest") 78 | res = filters.FilterFile(filter_file) 79 | 80 | assert res[155].name.startswith("REST_FRAME/maiz-apellaniz_Johnson_V") 81 | assert np.allclose(res[155].pivot, 5479.35, rtol=0.001) 82 | 83 | FILTER_RES = res 84 | -------------------------------------------------------------------------------- /eazy/tests/test_igm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .. import igm 4 | 5 | # More comprehensive tests in test_templates 6 | 7 | 8 | def test_igm(): 9 | """ 10 | Test IGM module (Inoue14) 11 | """ 12 | 13 | igm_obj = igm.Inoue14() 14 | 15 | # Test against result from a particular version 16 | # (db97f839cf8afe4a22c31c5d6195fd707ba4de32) 17 | z = 3.0 18 | rest_wave = np.arange(850, 1251, 50) 19 | igm_val = np.array( 20 | [ 21 | 0.33537573, 22 | 0.54634578, 23 | 0.74207249, 24 | 0.74194787, 25 | 0.79182545, 26 | 0.75792504, 27 | 0.72135181, 28 | 0.68233589, 29 | 1.0, 30 | ] 31 | ) 32 | 33 | assert np.allclose(igm_obj.full_IGM(z, rest_wave * (1 + z)), igm_val, rtol=1.0e-2) 34 | 35 | # With scaling 36 | scale_tau = 2.0 37 | igm_obj.scale_tau = scale_tau 38 | 39 | igm_scaled = np.exp(2 * np.log(igm_val)) 40 | assert np.allclose( 41 | igm_obj.full_IGM(z, rest_wave * (1 + z)), igm_scaled, rtol=1.0e-2 42 | ) 43 | 44 | igm_obj = igm.Inoue14(scale_tau=scale_tau) 45 | assert np.allclose( 46 | igm_obj.full_IGM(z, rest_wave * (1 + z)), igm_scaled, rtol=1.0e-2 47 | ) 48 | -------------------------------------------------------------------------------- /eazy/tests/test_param.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .. import param 4 | from .. import utils 5 | 6 | 7 | def test_param_file(): 8 | """ 9 | Read Param file 10 | """ 11 | # No filename, read default 12 | pfile1 = param.EazyParam(PARAM_FILE=None) 13 | 14 | # Read from file 15 | param_file = os.path.join( 16 | os.path.dirname(param.__file__), 17 | "data", "zphot.param.default" 18 | ) 19 | pfile2 = param.EazyParam(PARAM_FILE=param_file) 20 | 21 | assert pfile1["Z_MIN"] == pfile2["Z_MIN"] 22 | 23 | # Set new parameter 24 | pfile1["XPARAM"] = 1.0 25 | assert "XPARAM" in pfile1.param_names 26 | assert pfile1["XPARAM"] == 1 27 | -------------------------------------------------------------------------------- /eazy/tests/test_photoz.py: -------------------------------------------------------------------------------- 1 | import os 2 | import warnings 3 | import pytest 4 | 5 | import numpy as np 6 | 7 | np.random.seed(0) 8 | np.seterr(all="ignore") 9 | 10 | from astropy.utils.exceptions import AstropyWarning 11 | 12 | from .. import utils 13 | from .. import filters 14 | from .. import photoz 15 | from .. import param 16 | 17 | from .test_templates import read_template_ascii 18 | 19 | ez = None 20 | 21 | # Single redshift for testing 22 | z_spec = 1.0 23 | 24 | # Additional catalog objects with random noise 25 | NRND = 16 26 | 27 | # Test catalog S/N 28 | SN = 20 29 | 30 | # S/N constant in F-lambda from F160W 31 | # If None, then constant S/N across all bands 32 | uncertainty_power = 2 33 | 34 | 35 | @pytest.fixture 36 | def make_fake_catalog(read_template_ascii): 37 | """ 38 | Make a fake photometric catalog 39 | """ 40 | os.chdir(utils.DATA_PATH) 41 | 42 | #### Generate data 43 | res = filters.FilterFile("filters/FILTER.RES.latest") 44 | templ = read_template_ascii 45 | 46 | ### WFC3 SED + K + IRAC 47 | f_numbers = [209, 211, 214, 217, 202, 203, 205, 269, 18, 19] 48 | f_list = [res[f] for f in f_numbers] 49 | 50 | ### Photometry from a single template 51 | fnu = templ.integrate_filter(f_list, z=z_spec) 52 | 53 | ### Norm to F160W 54 | i_f160 = -4 55 | flux_f160 = 1.0 # microJy 56 | fnu *= flux_f160 / fnu[i_f160] 57 | 58 | ### Add noise 59 | # SN = 20 60 | efnu_f160w = (fnu / SN)[i_f160] 61 | lc = np.array([f.pivot for f in f_list]) 62 | 63 | # Constant S/N in f-lambda 64 | if uncertainty_power is None: 65 | efnu = fnu / SN 66 | else: 67 | efnu = efnu_f160w * (lc / lc[i_f160]) ** uncertainty_power 68 | 69 | ### Make table 70 | tab = photoz.Table() 71 | tab["id"] = np.arange(NRND + 1, dtype=int) + 1 72 | tab["z_spec"] = z_spec 73 | tab["ra"] = 150.1 74 | tab["dec"] = 2.5 75 | 76 | ### Simpler filter names for catalog 77 | f_names = [] 78 | for f in f_list: 79 | f_name = f.name.split(" ")[0].split("/")[-1].split(".dat")[0] 80 | f_name = f_name.replace("irac_tr", "ch") 81 | f_name = f_name.replace("hawki_k", "k").split("_")[0] 82 | f_names.append(f_name) 83 | 84 | ### Translate file 85 | translate_file = "zphot.translate.test" 86 | 87 | with open(translate_file, "w") as fp: 88 | for i, f in enumerate(f_names): 89 | tab[f"f_{f}"] = fnu[i] + np.append(0, np.random.normal(size=NRND) * efnu[i]) 90 | tab[f"e_{f}"] = efnu[i] 91 | 92 | fp.write(f"f_{f} F{f_numbers[i]}\n") 93 | fp.write(f"e_{f} E{f_numbers[i]}\n") 94 | 95 | tr = param.TranslateFile(translate_file) 96 | with open(translate_file + ".csv", "w") as fp: 97 | fp.write(tr.to_csv()) 98 | 99 | tr = param.TranslateFile(translate_file + ".csv") 100 | 101 | ### ASCII catalog 102 | cat_file = "eazy_test.cat" 103 | tab.write(cat_file, overwrite=True, format="ascii.commented_header") 104 | tab.write(cat_file + ".fits", overwrite=True, format="fits") 105 | 106 | return tab, cat_file, translate_file 107 | 108 | 109 | def test_full_photoz(make_fake_catalog): 110 | """ 111 | End-to-end test 112 | """ 113 | global ez 114 | 115 | tab, cat_file, translate_file = make_fake_catalog # (SN=20) 116 | 117 | os.chdir(utils.DATA_PATH) 118 | 119 | ### Parameters 120 | params = {} 121 | params["CATALOG_FILE"] = cat_file 122 | params["MAIN_OUTPUT_FILE"] = "eazy_test" 123 | 124 | # Galactic extinction 125 | params["MW_EBV"] = 0.0 126 | 127 | params["Z_STEP"] = 0.01 128 | params["Z_MIN"] = z_spec - 0.5 * (1 + z_spec) 129 | params["Z_MAX"] = z_spec + 0.5 * (1 + z_spec) 130 | 131 | params["SYS_ERR"] = 0.02 132 | 133 | params["PRIOR_ABZP"] = 23.9 # uJy 134 | params["PRIOR_FILTER"] = 205 # f160W 135 | params["PRIOR_FILE"] = "templates/prior_F160W_TAO.dat" 136 | 137 | params["FILTERS_RES"] = "filters/FILTER.RES.latest" 138 | params["TEMPLATES_FILE"] = "templates/fsps_full/fsps_QSF_12_v3.param" 139 | params["VERBOSITY"] = 1 140 | params["FIX_ZSPEC"] = False 141 | 142 | ### Initialize object 143 | ez = photoz.PhotoZ( 144 | param_file=None, 145 | translate_file=translate_file, 146 | zeropoint_file=None, 147 | params=params, 148 | load_prior=True, 149 | load_products=False, 150 | ) 151 | 152 | # FITS catalog 153 | params["CATALOG_FILE"] = cat_file + ".fits" 154 | ez = photoz.PhotoZ( 155 | param_file=None, 156 | translate_file=translate_file, 157 | zeropoint_file=None, 158 | params=params, 159 | load_prior=True, 160 | load_products=False, 161 | ) 162 | 163 | 164 | def test_photoz_methods(): 165 | """ 166 | Test methods on `~eazy.photoz.PhotoZ` object. 167 | """ 168 | global ez 169 | 170 | ### Catalog subset 171 | ez.fit_catalog(idx=np.where(ez.cat["id"] < 2)[0], fitter="nnls") 172 | 173 | ### Full catalog, fitting methods 174 | ez.fit_catalog(fitter="lstsq") 175 | ez.fit_catalog(fitter="bounded") 176 | 177 | # Serial 178 | ez.fit_catalog(fitter="nnls", n_proc=0) 179 | chi2_serial = ez.chi2_fit 180 | coeffs_serial = ez.fit_coeffs 181 | 182 | # Parallel 183 | ez.fit_catalog(fitter="nnls", n_proc=4) 184 | assert np.allclose(ez.chi2_fit, chi2_serial) 185 | assert np.allclose(ez.fit_coeffs, coeffs_serial) 186 | 187 | ###### Methods 188 | 189 | # Specified zbest 190 | ez.fit_at_zbest( 191 | zbest=np.full(NRND + 1, z_spec), 192 | prior=False, 193 | beta_prior=False, 194 | get_err=False, 195 | clip_wavelength=1100, 196 | fitter="nnls", 197 | selection=None, 198 | n_proc=0, 199 | par_skip=10000, 200 | ) 201 | 202 | # default zbest 203 | ez.fit_at_zbest( 204 | zbest=None, 205 | prior=False, 206 | beta_prior=False, 207 | get_err=False, 208 | clip_wavelength=1100, 209 | fitter="nnls", 210 | selection=None, 211 | n_proc=0, 212 | par_skip=10000, 213 | ) 214 | 215 | # priors 216 | for prior in [True, False]: 217 | for beta_prior in [True, False]: 218 | ez.fit_at_zbest( 219 | zbest=None, 220 | prior=prior, 221 | beta_prior=beta_prior, 222 | get_err=False, 223 | clip_wavelength=1100, 224 | fitter="nnls", 225 | selection=None, 226 | n_proc=0, 227 | par_skip=10000, 228 | ) 229 | 230 | # Peak-finder 231 | peaks, numpeaks = ez.find_peaks() 232 | assert np.allclose(numpeaks, 1) 233 | assert np.allclose(ez.zgrid[peaks[0][0]], z_spec, atol=0.01 * (1 + z_spec)) 234 | 235 | # return ez 236 | 237 | 238 | def test_sps_parameters(): 239 | """ 240 | Derived parameters 241 | """ 242 | global ez 243 | 244 | import astropy.units as u 245 | 246 | ### Run all photo-zs 247 | ez.fit_catalog(fitter="nnls") 248 | 249 | ### SPS parameters 250 | 251 | # Parameters normalized by V band 252 | sps = ez.sps_parameters(template_fnu_units=None, simple=True) 253 | 254 | fnu = 1 * u.solLum / u.Hz 255 | sps = ez.sps_parameters(template_fnu_units=fnu, simple=True) 256 | 257 | # Full RF-colors with filter weighting 258 | zout, hdu = ez.standard_output( 259 | zbest=None, 260 | rf_pad_width=0.5, 261 | rf_max_err=2, 262 | prior=True, 263 | beta_prior=True, 264 | simple=False, 265 | save_fits=False, 266 | ) 267 | 268 | # "Simple" best-fit template RF colors 269 | zout, hdu = ez.standard_output( 270 | zbest=None, 271 | rf_pad_width=0.5, 272 | rf_max_err=2, 273 | prior=True, 274 | beta_prior=True, 275 | simple=True, 276 | save_fits=False, 277 | ) 278 | 279 | assert np.allclose(zout["z_phot"][0], z_spec, atol=0.1 * (1 + z_spec)) 280 | 281 | coeffs_norm = ez.coeffs_best[0, :] / ez.coeffs_best[0, :].max() 282 | assert np.argmax(coeffs_norm) == 0 283 | assert np.sum(coeffs_norm) < 1.1 284 | 285 | # All zout data 286 | # zdict = {} 287 | # for k in zout.colnames: 288 | # zdict[k] = zout[k][0] 289 | 290 | zdict = { 291 | "nusefilt": 10, 292 | "z_ml": 0.99616235, 293 | "z_ml_chi2": 0.013447836, 294 | "z_ml_risk": 0.0105553605, 295 | "lc_min": 3353.6304006459895, 296 | "lc_max": 45020.33785230743, 297 | "z_phot": 0.99616235, 298 | "z_phot_chi2": 0.013447836, 299 | "z_phot_risk": 0.0105553605, 300 | "z_min_risk": 0.9937155, 301 | "min_risk": 0.010250151, 302 | "z_raw_chi2": 0.9937155, 303 | # 'raw_chi2': 0.035614725, 304 | "z025": 0.92501247, 305 | "z160": 0.9604295, 306 | "z500": 0.99208033, 307 | "z840": 1.0187114, 308 | "z975": 1.0420052, 309 | "restU": 0.41460526, 310 | "restU_err": 0.01217702, 311 | "restB": 0.8223915, 312 | "restB_err": 0.027577162, 313 | "restV": 0.92202765, 314 | "restV_err": 0.017819434, 315 | "restJ": 1.024555, 316 | "restJ_err": 0.05461645, 317 | "dL": 6575.8372348364455, 318 | "mass": 1338132577.7487125, 319 | "sfr": 0.026515421690098212, 320 | "Lv": 3418389791.239653, 321 | "LIR": 438179193.31513166, 322 | "MLv": 0.39145113912344354, 323 | "Av": 0.06295947926487588, 324 | # 'rest270': 0.11133574, 325 | # 'rest270_err': 0.007641867, 326 | # 'rest274': 0.23238972, 327 | # 'rest274_err': 0.008679345, 328 | # 'rest120': 0.12516989, 329 | # 'rest120_err': 0.005393833, 330 | # 'rest121': 0.1816069, 331 | # 'rest121_err': 0.00364957, 332 | # 'rest156': 0.3724664, 333 | # 'rest156_err': 0.014633045, 334 | # 'rest157': 0.86651146, 335 | # 'rest157_err': 0.018754214, 336 | # 'rest158': 0.94490474, 337 | # 'rest158_err': 0.027536243, 338 | # 'rest159': 0.997915, 339 | # 'rest159_err': 0.023829281, 340 | # 'rest160': 1.0238949, 341 | # 'rest160_err': 0.0475851, 342 | # 'rest161': 1.024555, 343 | # 'rest161_err': 0.05461645, 344 | # 'rest162': 1.010895, 345 | # 'rest162_err': 0.06887752, 346 | # 'rest163': 0.7563232, 347 | # 'rest163_err': 0.06583378, 348 | "DISTMOD": 43.33921454218198, 349 | } 350 | 351 | for k in zdict: 352 | if "_err" in k: 353 | assert np.allclose(zout[k][0], zdict[k], rtol=1.0) 354 | else: 355 | assert np.allclose(zout[k][0], zdict[k], rtol=1.0) 356 | 357 | # confirm that zout['z_phot'] == zout['z_ml'] 358 | assert np.all(zout["z_ml"] == zout["z_phot"]) 359 | 360 | ### user-specified zbest 361 | zuser = np.full(NRND + 1, z_spec) 362 | z2, _ = ez.standard_output( 363 | zbest=zuser, 364 | rf_pad_width=0.5, 365 | rf_max_err=2, 366 | prior=True, 367 | beta_prior=True, 368 | simple=True, 369 | save_fits=False, 370 | ) 371 | 372 | # confirm that z2 has 'z_ml' and 'z_phot' columns and they're different 373 | assert np.all(z2["z_ml"] != z2["z_phot"]) 374 | 375 | # confirm that z2['z_ml'] == zout['z_phot'] 376 | assert np.all(z2["z_ml"] == zout["z_phot"]) 377 | 378 | # zphot is now the user-specified redshift 379 | assert np.allclose(z2["z_phot"], zuser, rtol=1.0e-2) 380 | 381 | # SPS parameters are different, as calculated for zuser 382 | assert np.all(z2["mass"] != zout["mass"]) 383 | assert np.all(z2["sfr"] != zout["sfr"]) 384 | 385 | 386 | def test_load_products(): 387 | """ 388 | Save and read products 389 | """ 390 | global ez 391 | 392 | with warnings.catch_warnings(): 393 | warnings.simplefilter("ignore", AstropyWarning) 394 | 395 | zout, hdu = ez.standard_output( 396 | zbest=None, 397 | rf_pad_width=0.5, 398 | rf_max_err=2, 399 | prior=True, 400 | beta_prior=True, 401 | simple=True, 402 | save_fits=2, 403 | ) 404 | 405 | new = photoz.PhotoZ( 406 | param_file="eazy_test.zphot.param", 407 | translate_file="eazy_test.zphot.translate", 408 | zeropoint_file="eazy_test.zphot.zeropoint", 409 | load_prior=True, 410 | load_products=False, 411 | ) 412 | 413 | assert len(new.param.params) == len(ez.param.params) 414 | 415 | bool_param = utils.bool_param 416 | for k in ez.param.params: 417 | assert bool_param(ez.param.params[k]) == bool_param(new.param.params[k]) 418 | 419 | new.load_products() 420 | assert new.ZML_WITH_PRIOR == ez.ZML_WITH_PRIOR 421 | assert new.ZML_WITH_BETA_PRIOR == ez.ZML_WITH_BETA_PRIOR 422 | 423 | has_chi2_init = (ez.chi2_fit != 0).sum(axis=1) > 0 424 | has_chi2 = (new.chi2_fit != 0).sum(axis=1) > 0 425 | assert has_chi2_init.sum() == has_chi2.sum() 426 | 427 | rtol = 1.e-3 428 | 429 | assert np.allclose(new.coeffs_best, ez.coeffs_best, rtol=rtol) 430 | 431 | assert np.allclose(new.fit_coeffs, ez.fit_coeffs, rtol=rtol) 432 | 433 | assert np.allclose(new.prior_data, ez.prior_data, rtol=rtol) 434 | 435 | assert np.allclose(ez.lnp[has_chi2, :], new.lnp[has_chi2, :], rtol=rtol) 436 | 437 | 438 | def test_fit_stars(): 439 | """ 440 | Fit phoenix star library for Star/Galaxy separation 441 | """ 442 | global ez 443 | res = ez.fit_phoenix_stars() 444 | assert np.allclose(ez.star_chi2[0, 0], 3191.3662) 445 | 446 | sub_index = np.array([0, 1]) 447 | sub_bool = np.zeros(ez.NOBJ, dtype=bool) 448 | sub_bool[:2] = True 449 | 450 | for subset in [sub_index, sub_bool]: 451 | res = ez.fit_phoenix_stars(subset=subset) 452 | assert "subset" in res 453 | assert res["star_min_ix"].shape == (2,) 454 | assert res["star_tnorm"].shape == (2, ez.NSTAR) 455 | assert np.allclose(res["star_chi2"][0, 0], 3191.3662) 456 | 457 | 458 | def test_photoz_figures(): 459 | """ 460 | Figures generated with PhotoZ object 461 | """ 462 | import matplotlib.pyplot as plt 463 | 464 | global ez 465 | 466 | ### SED figure 467 | fig, data = ez.show_fit(id=0, id_is_idx=True, show_fnu=False) 468 | fig.savefig("eazy_test.sed.png", dpi=72) 469 | 470 | assert isinstance(fig, plt.Figure) 471 | assert isinstance(data, dict) 472 | 473 | fig = ez.show_fit(id=1, show_fnu=False) 474 | fig = ez.show_fit(id=1, show_fnu=True) 475 | fig = ez.show_fit(id=1, show_fnu=2) 476 | 477 | fig = ez.show_fit(id=1, show_fnu=False, zshow=z_spec) 478 | 479 | fig = ez.show_fit(id=1, show_components=True) 480 | 481 | fig = ez.zphot_zspec() 482 | fig = ez.zphot_zspec(zmin=0, zmax=2) 483 | fig.savefig("eazy_test.zphot_zspec.png", dpi=72) 484 | 485 | plt.close("all") 486 | 487 | 488 | def test_zeropoint_residuals(): 489 | """ """ 490 | global ez 491 | 492 | ez.fit_catalog(fitter="nnls") 493 | res = ez.residuals() 494 | 495 | 496 | def test_hdf5(): 497 | """ 498 | Test HDF5 save / recover state 499 | """ 500 | import matplotlib.pyplot as plt 501 | 502 | plt.ioff() 503 | 504 | global ez 505 | 506 | from .. import hdf5 507 | 508 | try: 509 | import h5py 510 | except ImportError: 511 | return None 512 | 513 | hdf5.write_hdf5( 514 | ez, h5file="test.hdf5", include_fit_coeffs=False, include_templates=True 515 | ) 516 | 517 | new_ez = hdf5.initialize_from_hdf5("test.hdf5") 518 | 519 | assert np.allclose(ez.fnu, new_ez.fnu) 520 | assert np.allclose(ez.efnu, new_ez.efnu) 521 | assert np.allclose(ez.prior_mags, new_ez.prior_mags) 522 | assert np.allclose(ez.zml, new_ez.zml) 523 | assert np.allclose(ez.zbest, new_ez.zbest) 524 | assert np.allclose(ez.lnp, new_ez.lnp, rtol=1.0e-4) 525 | 526 | _ = new_ez.show_fit(10) 527 | plt.close("all") 528 | 529 | # Compact viewer 530 | h5 = hdf5.Viewer("test.hdf5") 531 | 532 | assert h5.NOBJ == new_ez.NOBJ 533 | assert h5.NTEMP == new_ez.NTEMP 534 | assert h5.NFILT == new_ez.NFILT 535 | assert h5.NZ == new_ez.NZ 536 | assert np.allclose(h5.zp, new_ez.zp) 537 | 538 | _cat = h5.get_catalog() 539 | _ = h5.show_fit(10) 540 | plt.close("all") 541 | 542 | 543 | def test_cleanup(): 544 | 545 | if os.path.exists("filters/FILTER.RES.latest.npy"): 546 | os.remove("filters/FILTER.RES.latest.npy") 547 | -------------------------------------------------------------------------------- /eazy/tests/test_templates.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | 4 | import numpy as np 5 | 6 | try: 7 | from numpy import trapezoid as trapz 8 | except ImportError: 9 | from numpy import trapz 10 | 11 | from .. import utils 12 | from .. import templates 13 | from .. import filters 14 | 15 | 16 | @pytest.fixture 17 | def read_template_ascii(): 18 | path = os.path.join(utils.DATA_PATH, "templates") 19 | ascii_file = os.path.join(path, "fsps_full/fsps_QSF_12_v3_001.dat") 20 | templ = templates.Template(file=ascii_file) 21 | return templ 22 | 23 | 24 | def test_read_template_ascii(read_template_ascii): 25 | """ 26 | Test interpolation function 27 | """ 28 | templ = read_template_ascii 29 | assert templ.name == "fsps_QSF_12_v3_001.dat" 30 | assert np.allclose(templ.flux.shape, [1, 5994]) 31 | 32 | 33 | @pytest.fixture 34 | def read_template_fits(): 35 | path = os.path.join(utils.DATA_PATH, "templates") 36 | fits_file = os.path.join(path, "spline_templates_v2/spline_age0.01_av0.0.fits") 37 | 38 | templ = templates.Template(file=fits_file) 39 | return templ 40 | 41 | 42 | def test_read_template_fits(read_template_fits): 43 | """ 44 | Read template FITS file 45 | """ 46 | templ = read_template_fits 47 | assert np.allclose(templ.flux.shape, [templ.NZ, 12603]) 48 | assert templ.name == "spline_age0.01_av0.0.fits" 49 | 50 | 51 | def test_zscale(): 52 | """ 53 | Test zscale method 54 | """ 55 | wave = np.arange(5000.0, 6000.0) 56 | flux = np.zeros_like(wave) 57 | flux[500] = 1.0 58 | 59 | templ = templates.Template(arrays=(wave, flux)) 60 | 61 | z = 1.0 62 | zsc = templ.zscale(z=z, scalar=1.0) 63 | 64 | # wave = wave*(1+z) 65 | assert np.allclose(zsc.wave, templ.wave * (1 + z)) 66 | 67 | # max index unchanged 68 | assert np.argmax(zsc.flux.flatten()) == 500 69 | 70 | # Still just one non-zero value 71 | assert (zsc.flux > 0).sum() == (flux > 0).sum() 72 | 73 | # Flux scaled by 1/(1+z) 74 | assert zsc.flux.max() == 1 / (1 + z) 75 | 76 | # Float and array scale 77 | for scalar in [2, wave * 0.0 + 2]: 78 | zsc = templ.zscale(z=z, scalar=scalar) 79 | assert zsc.flux.max() == np.max(scalar) / (1 + z) 80 | 81 | 82 | def test_gaussian_templates(): 83 | """ 84 | Test templates.gaussian_templates 85 | """ 86 | wave = np.arange(5000.0, 6000.0) 87 | centers = np.arange(5100.0, 5901.0, 100) 88 | 89 | width = 10 90 | widths = centers * 0 + width 91 | 92 | NW = len(wave) 93 | NG = len(centers) 94 | norm = np.sqrt(2 * np.pi * width**2) 95 | 96 | n0 = templates.gaussian_templates(wave, centers=centers, widths=widths, norm=False) 97 | 98 | assert np.allclose(n0.shape, (NW, NG)) 99 | assert np.allclose(n0.max(), 1.0, rtol=1.0e-4) 100 | assert np.allclose(n0.sum(), norm * NG, rtol=1.0e-4) 101 | 102 | # Normalized 103 | n1 = templates.gaussian_templates(wave, centers=centers, widths=widths, norm=True) 104 | 105 | assert np.allclose(n1.shape, (NW, NG)) 106 | assert np.allclose(n1.max(), 1.0 / norm, rtol=1.0e-4) 107 | assert np.allclose(n1.sum(), NG, rtol=1.0e-4) 108 | 109 | 110 | def test_bspline_templates(): 111 | """ 112 | templates.bspline_templates 113 | """ 114 | wave = np.arange(5000.0, 6000.0) 115 | NW = len(wave) 116 | 117 | df = 6 118 | 119 | for df in [6, 8, 12]: 120 | for log in [True, False]: 121 | spl = templates.bspline_templates( 122 | wave, 123 | degree=3, 124 | df=df, 125 | get_matrix=True, 126 | log=log, 127 | clip=0.0001, 128 | minmax=None, 129 | ) 130 | 131 | assert np.allclose(spl.shape, (NW, df)) 132 | assert np.allclose(spl.sum(axis=1), 1.0, rtol=1.0e-4) 133 | 134 | spt = templates.bspline_templates( 135 | wave, degree=3, df=df, get_matrix=False, log=log, clip=0.0001, minmax=None 136 | ) 137 | 138 | assert len(spt) == df 139 | keys = list(spt.keys()) 140 | for i, k in enumerate(keys): 141 | templ = spt[k] 142 | assert np.allclose(templ.wave, wave) 143 | assert np.allclose(spl[:, i], np.squeeze(templ.flux)) 144 | 145 | 146 | def test_redshift_dependent(): 147 | """ 148 | Redshift-dependent templates 149 | """ 150 | wave = np.arange(5000.0, 6000.0) 151 | 152 | # No dependence 153 | flux = np.ones(len(wave)) 154 | templ = templates.Template(arrays=(wave, flux), redshifts=[0]) 155 | assert templ.zindex(-0.1, redshift_type="nearest") == 0 156 | assert templ.zindex(0.3, redshift_type="nearest") == 0 157 | 158 | assert templ.zindex(-0.1, redshift_type="floor") == 0 159 | assert templ.zindex(0.3, redshift_type="floor") == 0 160 | 161 | assert np.allclose(templ.zindex(-0.1, redshift_type="interp"), (0, 1.0)) 162 | assert np.allclose(templ.zindex(0.1, redshift_type="interp"), (0, 1.0)) 163 | 164 | # Redshift-dependent 165 | flux = np.ones((2, len(wave))) 166 | flux[1, :] = 2 167 | 168 | templ = templates.Template(arrays=(wave, flux), redshifts=[0, 1]) 169 | 170 | assert templ.zindex(-0.1, redshift_type="nearest") == 0 171 | assert templ.zindex(0.3, redshift_type="nearest") == 0 172 | assert templ.zindex(0.6, redshift_type="nearest") == 1 173 | assert templ.zindex(2.6, redshift_type="nearest") == 1 174 | 175 | assert templ.zindex(-0.1, redshift_type="floor") == 0 176 | assert templ.zindex(0.3, redshift_type="floor") == 0 177 | assert templ.zindex(0.6, redshift_type="floor") == 0 178 | assert templ.zindex(2.6, redshift_type="floor") == 1 179 | 180 | assert np.allclose(templ.zindex(-0.1, redshift_type="interp"), (0, 1.0)) 181 | assert np.allclose(templ.zindex(0.1, redshift_type="interp"), (0, 0.9)) 182 | assert np.allclose(templ.zindex(0.9, redshift_type="interp"), (0, 0.1)) 183 | assert np.allclose(templ.zindex(1.1, redshift_type="interp"), (1, 1.0)) 184 | 185 | assert np.allclose(templ.flux_flam(iz=0, redshift_type="nearest"), 1.0) 186 | assert np.allclose(templ.flux_flam(iz=1, redshift_type="nearest"), 2.0) 187 | 188 | assert np.allclose(templ.flux_flam(z=-1.0, redshift_type="nearest"), 1.0) 189 | assert np.allclose(templ.flux_flam(z=0.0, redshift_type="nearest"), 1.0) 190 | assert np.allclose(templ.flux_flam(z=0.3, redshift_type="nearest"), 1.0) 191 | assert np.allclose(templ.flux_flam(z=1.5, redshift_type="nearest"), 2.0) 192 | 193 | assert np.allclose(templ.flux_flam(z=-1.0, redshift_type="interp"), 1.0) 194 | assert np.allclose(templ.flux_flam(z=0.0, redshift_type="interp"), 1.0) 195 | assert np.allclose(templ.flux_flam(z=0.3, redshift_type="interp"), 1.3) 196 | assert np.allclose(templ.flux_flam(z=1.5, redshift_type="interp"), 2.0) 197 | 198 | 199 | def test_integrate_filter(): 200 | """ 201 | Integrating templates through filter throughput 202 | """ 203 | import astropy.units as u 204 | 205 | # Tophat filter 206 | wx = np.arange(5400, 5600.0, 1) 207 | wy = wx * 0.0 208 | wy[10:-10] = 1 209 | 210 | f1 = filters.FilterDefinition(wave=wx, throughput=wy) 211 | 212 | # Flat-fnu spectrum 213 | wave = np.arange(1000.0, 9000.0) 214 | fnu = np.ones((2, len(wave))) * u.microJansky 215 | fnu[1, :] *= 2 216 | 217 | flam = fnu.to( 218 | utils.FLAM_CGS, 219 | equivalencies=u.equivalencies.spectral_density(wave * u.Angstrom), 220 | ) 221 | 222 | templ = templates.Template(arrays=(wave, flam), redshifts=[0, 1]) 223 | 224 | fnu_int = templ.integrate_filter(f1, z=0) 225 | assert np.allclose(fnu_int * utils.FNU_CGS, 1 * u.microJansky) 226 | 227 | fnu_int = templ.integrate_filter(f1, z=0, scale=2.0) 228 | assert np.allclose(fnu_int * utils.FNU_CGS, 2 * u.microJansky) 229 | 230 | fnu_int = templ.integrate_filter(f1, z=0.3, redshift_type="nearest") 231 | assert np.allclose(fnu_int * utils.FNU_CGS, 1 * u.microJansky) 232 | 233 | fnu_int = templ.integrate_filter(f1, z=0.3, redshift_type="interp") 234 | assert np.allclose(fnu_int * utils.FNU_CGS, 1.3 * u.microJansky) 235 | 236 | # Return f-lambda 237 | for z in [0, 0.2]: 238 | flam_interp = templ.integrate_filter( 239 | f1, z=z, flam=True, redshift_type="nearest" 240 | ) 241 | 242 | wz = f1.pivot * (1 + z) * u.Angstrom 243 | flam_unit = (1 * u.microJansky).to( 244 | utils.FLAM_CGS, equivalencies=u.equivalencies.spectral_density(wz) 245 | ) 246 | 247 | assert np.allclose(flam_interp * utils.FLAM_CGS, flam_unit) 248 | 249 | 250 | def test_template_resampling(): 251 | """ 252 | Resampling preserving integrated flux 253 | """ 254 | try: 255 | from grizli.utils_c import interp 256 | 257 | interp_grizli = interp.interp_conserve_c 258 | except: 259 | interp_grizli = None 260 | 261 | interp_eazy = utils.interp_conserve 262 | 263 | # Template with delta function line 264 | xtest = np.linspace(6550, 6576, 1024) 265 | ytest = xtest * 0 266 | 267 | ytest[len(xtest) // 2] = 1 268 | 269 | dx = np.diff(xtest)[0] 270 | 271 | tline = templates.Template(arrays=(xtest, ytest / dx)) 272 | 273 | # Different resample grids 274 | for func in [interp_eazy, interp_grizli]: 275 | if func is None: 276 | continue 277 | 278 | for nstep in [16, 32, 64, 128]: 279 | wlo = np.linspace(6550, 6576, nstep) 280 | 281 | tlo = tline.resample(wlo, in_place=False) 282 | 283 | assert np.allclose(trapz(tlo.flux.flatten(), tlo.wave), 1.0, rtol=1.0e-3) 284 | 285 | # Arbitrarily-spaced wavelengths 286 | np.random.seed(1) 287 | for func in [interp_eazy, interp_grizli]: 288 | if func is None: 289 | continue 290 | 291 | for nstep in [16, 32, 64, 128]: 292 | wlo = np.sort(np.random.rand(nstep) * 26 + 6550) 293 | 294 | tlo = tline.resample(wlo, in_place=False) 295 | 296 | assert np.allclose(trapz(tlo.flux.flatten(), tlo.wave), 1.0, rtol=1.0e-3) 297 | 298 | 299 | def test_template_smoothing(): 300 | """ 301 | Test template smoothing: 302 | 303 | - `eazy.templates.Template.smooth_velocity` 304 | - `eazy.templates.Template.to_observed_frame` 305 | 306 | """ 307 | from astropy.stats import gaussian_sigma_to_fwhm 308 | 309 | #### Template with delta function line 310 | xtest = np.linspace(6550, 6576, 1024) 311 | ytest = xtest * 0 312 | ytest[len(xtest) // 2] = 1 313 | 314 | dx = np.diff(xtest)[0] 315 | 316 | tline = templates.Template(arrays=(xtest, ytest)) 317 | 318 | #### Velocity smoothing 319 | vel = 100 # sigma 320 | pixel_sigma = vel / 3.0e5 * 6563.0 / dx 321 | 322 | tsm = tline.smooth_velocity(vel, in_place=False) 323 | 324 | assert np.allclose( 325 | tsm.flux.max(), 1.0 / np.sqrt(2 * np.pi) / pixel_sigma, rtol=1.0e-3 326 | ) 327 | 328 | assert np.allclose(trapz(tsm.flux.flatten(), tsm.wave), dx, rtol=1.0e-3) 329 | 330 | #### MUSE LSF 331 | bacon_lsf_fwhm = lambda w: 5.866e-8 * w**2 - 9.187e-4 * w + 6.04 332 | lsf_sig = bacon_lsf_fwhm(6563) / gaussian_sigma_to_fwhm 333 | 334 | tlsf = tline.to_observed_frame( 335 | extra_sigma=0, lsf_func="Bacon", smoothspec_kwargs={"fftsmooth": False} 336 | ) 337 | 338 | smax = 1 / np.sqrt(2 * np.pi) / (lsf_sig / dx) 339 | assert np.allclose(tlsf.flux.max(), smax, rtol=1.0e-3) 340 | assert np.allclose(trapz(tlsf.flux.flatten(), tlsf.wave), dx, rtol=1.0e-3) 341 | 342 | #### User LSF 343 | lsf_sig = 2.0 344 | my_lsf = lambda x: x * 0 + lsf_sig 345 | tlsf = tline.to_observed_frame( 346 | extra_sigma=0, lsf_func=my_lsf, smoothspec_kwargs={"fftsmooth": False} 347 | ) 348 | 349 | smax = 1 / np.sqrt(2 * np.pi) / (lsf_sig / dx) 350 | assert np.allclose(tlsf.flux.max(), smax, rtol=1.0e-3) 351 | assert np.allclose(trapz(tlsf.flux.flatten(), tlsf.wave), dx, rtol=1.0e-3) 352 | 353 | #### No LSF is the same as smooth_velocity 354 | tobs = tline.to_observed_frame( 355 | extra_sigma=vel, 356 | lsf_func=None, 357 | to_air=False, 358 | z=0, 359 | smoothspec_kwargs={"fftsmooth": False}, 360 | clip_wavelengths=None, 361 | ) 362 | 363 | np.allclose(tobs.flux, tsm.flux, atol=tsm.flux.max() * 1.0e-3) 364 | 365 | #### Resampled 366 | for nstep in [16, 32, 64, 128]: 367 | wlo = np.linspace(6550, 6576, nstep) 368 | tlo = tline.to_observed_frame( 369 | extra_sigma=0, 370 | lsf_func="Bacon", 371 | smoothspec_kwargs={"fftsmooth": False}, 372 | wavelengths=wlo, 373 | ) 374 | 375 | assert np.allclose(trapz(tlo.flux.flatten(), tlo.wave), dx, rtol=1.0e-2) 376 | 377 | 378 | def test_igm(): 379 | 380 | kwarg_sets = [ 381 | dict(igm_model="Asada24", scale_tau=1.0, add_cgm=True), 382 | dict(igm_model="Asada24", scale_tau=1.0, add_cgm=False), 383 | dict(igm_model="Inoue14", scale_tau=1.0, add_cgm=False), 384 | ] 385 | 386 | # Specific test of Inoue14 against result from a particular version 387 | # (db97f839cf8afe4a22c31c5d6195fd707ba4de32) 388 | zfix = 3.0 389 | rest_wave = np.arange(850, 1251, 50) 390 | igm_val = np.array( 391 | [ 392 | 0.33537573, 393 | 0.54634578, 394 | 0.74207249, 395 | 0.74194787, 396 | 0.79182545, 397 | 0.75792504, 398 | 0.72135181, 399 | 0.68233589, 400 | 1.0, 401 | ] 402 | ) 403 | 404 | template = templates.Template(arrays=(rest_wave, rest_wave**0.0)) 405 | for kwargs in kwarg_sets: 406 | igmz = template.igm_absorption(z=zfix, **kwargs) 407 | assert np.allclose(igmz, igm_val, rtol=1.0e-2) 408 | 409 | # Test other IGM properties 410 | wave = np.logspace(np.log10(800), np.log10(4000.0), 512) 411 | template = templates.Template(arrays=(wave, wave**0.0)) 412 | 413 | fuv = wave < 1216.0 414 | fuv_cgm = wave < 2000.0 415 | 416 | for z in [2.0, 6.5]: 417 | with_cgm, without_cgm, inoue = [ 418 | template.igm_absorption(z=z, **kwargs) for kwargs in kwarg_sets 419 | ] 420 | 421 | # Asada24(add_cgm=False) = Inoue14 422 | assert np.allclose(without_cgm, inoue) 423 | 424 | # No CGM in Asada24 at z < 6.0 425 | if z < 6.0: 426 | assert np.allclose(with_cgm, without_cgm) 427 | else: 428 | assert np.all(with_cgm[fuv_cgm] < without_cgm[fuv_cgm]) 429 | 430 | for kwargs in kwarg_sets: 431 | i1 = template.igm_absorption(z=1.0, **kwargs) 432 | i2 = template.igm_absorption(z=2.0, **kwargs) 433 | i7 = template.igm_absorption(z=7.0, **kwargs) 434 | 435 | assert np.allclose(i1[~fuv], 1.0) 436 | assert np.all(i1[fuv] < 1.0) 437 | assert np.all(i1 >= i2) 438 | assert np.all(i2 >= i7) 439 | 440 | kwargs["scale_tau"] = 2.0 441 | it2 = template.igm_absorption(z=1.0, **kwargs) 442 | assert np.allclose(np.log(it2[fuv]) / np.log(i1[fuv]), 2.0) 443 | 444 | kwargs["scale_tau"] = 1.0 445 | -------------------------------------------------------------------------------- /eazy/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | try: 4 | from numpy import trapezoid as trapz 5 | except ImportError: 6 | from numpy import trapz 7 | 8 | from .. import utils 9 | 10 | 11 | def test_milky_way(): 12 | """ 13 | Test that milky way extinction is avaliable 14 | """ 15 | import astropy.units as u 16 | 17 | f99 = utils.GalacticExtinction(EBV=1.0 / 3.1, Rv=3.1) 18 | 19 | # Out of range, test that at least run 20 | _ = f99(10) 21 | _ = f99(5.0e4) 22 | _ = f99(10.0e4) 23 | 24 | # Data types 25 | np.testing.assert_allclose( 26 | f99(5500), 1.0, rtol=0.05, atol=0.05, equal_nan=False, err_msg="", verbose=True 27 | ) 28 | 29 | np.testing.assert_allclose( 30 | f99(5500.0), 31 | 1.0, 32 | rtol=0.05, 33 | atol=0.05, 34 | equal_nan=False, 35 | err_msg="", 36 | verbose=True, 37 | ) 38 | 39 | np.testing.assert_allclose( 40 | f99(5500.0 * u.Angstrom), 41 | 1.0, 42 | rtol=0.05, 43 | atol=0.05, 44 | equal_nan=False, 45 | err_msg="", 46 | verbose=True, 47 | ) 48 | 49 | np.testing.assert_allclose( 50 | f99(0.55 * u.micron), 51 | 1.0, 52 | rtol=0.05, 53 | atol=0.05, 54 | equal_nan=False, 55 | err_msg="", 56 | verbose=True, 57 | ) 58 | 59 | np.testing.assert_allclose( 60 | f99(100 * u.micron), 61 | 0.0, 62 | rtol=0.05, 63 | atol=0.05, 64 | equal_nan=False, 65 | err_msg="", 66 | verbose=True, 67 | ) 68 | 69 | # Arrays 70 | np.testing.assert_allclose( 71 | f99([5500.0, 5500.0]), 72 | 1.0, 73 | rtol=0.05, 74 | atol=0.05, 75 | equal_nan=False, 76 | err_msg="", 77 | verbose=True, 78 | ) 79 | 80 | arr = np.ones(10) * 5500.0 81 | np.testing.assert_allclose( 82 | f99(arr), 1.0, rtol=0.05, atol=0.05, equal_nan=False, err_msg="", verbose=True 83 | ) 84 | 85 | np.testing.assert_allclose( 86 | f99(arr * u.Angstrom), 87 | 1.0, 88 | rtol=0.05, 89 | atol=0.05, 90 | equal_nan=False, 91 | err_msg="", 92 | verbose=True, 93 | ) 94 | 95 | np.testing.assert_allclose( 96 | f99(arr * u.Angstrom), 97 | 1.0, 98 | rtol=0.05, 99 | atol=0.05, 100 | equal_nan=False, 101 | err_msg="", 102 | verbose=True, 103 | ) 104 | 105 | 106 | def test_interp_conserve(): 107 | """ 108 | Test interpolation function 109 | """ 110 | # High-frequence sine function, should integrate to zero 111 | ph = 0.1 112 | xp = np.arange(-np.pi, 3 * np.pi, 0.001) 113 | fp = np.sin(xp / ph) 114 | fp[(xp <= 0) | (xp > 2 * np.pi)] = 0 115 | 116 | x = np.arange(-np.pi / 2, 2.5 * np.pi, 1) 117 | 118 | y1 = utils.interp_conserve(x, xp, fp) 119 | integral = trapz(y1, x) 120 | 121 | np.testing.assert_allclose( 122 | integral, 123 | 0.0, 124 | rtol=1e-04, 125 | atol=1.0e-4, 126 | equal_nan=False, 127 | err_msg="", 128 | verbose=True, 129 | ) 130 | 131 | 132 | def test_log_zgrid(): 133 | """ 134 | Test log_zgrid function 135 | """ 136 | ref = np.array([0.1, 0.21568801, 0.34354303, 0.48484469, 0.64100717, 0.8135934]) 137 | 138 | vals = utils.log_zgrid(zr=[0.1, 1], dz=0.1) 139 | np.testing.assert_allclose( 140 | vals, ref, rtol=1e-04, atol=1.0e-4, equal_nan=False, err_msg="", verbose=True 141 | ) 142 | 143 | 144 | def test_invert(): 145 | """ 146 | Test matrix invert helper 147 | """ 148 | a = np.array([[1.0, 2.0], [3.0, 4.0]]) 149 | ainv = utils.safe_invert(a) 150 | assert np.allclose(np.dot(a, ainv), np.eye(2)) 151 | 152 | # Singular matrix returns np.nan inverse without raising exception 153 | a = np.ones((2, 2)) 154 | ainv = utils.safe_invert(a) 155 | assert np.all(~np.isfinite(utils.safe_invert(a))) 156 | 157 | 158 | def test_query_string(): 159 | """ """ 160 | ra, dec = 53.14474, -27.78552 161 | 162 | qstr = utils.query_html( 163 | ra, dec, with_coords=True, replace_comma=False, queries=["CDS"] 164 | ) 165 | 166 | expected = '(53.144740, -27.785520) CDS' 167 | assert qstr == expected 168 | 169 | for co in [True, False]: 170 | for re in [True, False]: 171 | qstr = utils.query_html( 172 | ra, 173 | dec, 174 | with_coords=co, 175 | replace_comma=re, 176 | queries=["CDS", "ESO", "MAST", "ALMA", "LEG", "HSC"], 177 | ) 178 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 64", "setuptools-scm>=8"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "eazy" 7 | dynamic = ["version"] 8 | description = "Pythonic photo-zs" 9 | readme = "README.rst" 10 | license = {file = "LICENSE.txt"} 11 | requires-python = ">=3.8" 12 | authors = [ 13 | { name = "G. Brammer", email = "gbrammer@gmail.com" }, 14 | ] 15 | classifiers = [ 16 | "Development Status :: 3 - Alpha", 17 | "Intended Audience :: Science/Research", 18 | "Topic :: Scientific/Engineering :: Astronomy", 19 | ] 20 | dependencies = [ 21 | "astro-sedpy>=0.3", 22 | "astropy", 23 | "dust_extinction", 24 | "h5py", 25 | "matplotlib", 26 | "numpy", 27 | "peakutils", 28 | "scipy", 29 | "tqdm", 30 | ] 31 | 32 | [project.optional-dependencies] 33 | # dust = [ 34 | # "dust_attenuation @ git+https://github.com/karllark/dust_attenuation.git", 35 | # ] 36 | docs = [ 37 | "sphinx", 38 | "sphinx-astropy", 39 | "toml", 40 | # "dust_attenuation @ git+https://github.com/karllark/dust_attenuation.git", 41 | ] 42 | test = [ 43 | "flake8", 44 | "pytest>=5.1", 45 | "coverage", 46 | ] 47 | vistool = [ 48 | "dash==2.5.1", 49 | "jupyter-dash", 50 | "Werkzeug==2.0.3" 51 | ] 52 | 53 | [project.urls] 54 | Documentation = "https://eazy-py.readthedocs.io/" 55 | Homepage = "https://github.com/gbrammer/eazy-py" 56 | Source = "https://github.com/gbrammer/eazy-py" 57 | Tracker = "https://github.com/gbrammer/eazy-py/issues" 58 | 59 | [tool.setuptools] 60 | packages = ["eazy"] 61 | package-data = {eazy = [ 62 | "data/*txt", 63 | "data/alpha_lyr_stis_008.fits", 64 | "data/emlines_info.dat", 65 | "data/spectra_kc13_12_tweak.params", 66 | "data/zphot.param.default" 67 | ]} 68 | 69 | [tool.setuptools_scm] 70 | version_file = "eazy/version.py" 71 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # python == 3.6.10 2 | # numpy >= 1.19 3 | # scipy >= 1.5.0 4 | # matplotlib >= 3.2 5 | # astropy >= 4.0 6 | # peakutils >= 1.0.3 7 | # tqdm 8 | # h5py 9 | 10 | # dustmaps needs healpy in environment.yml since 11 | # often can't compile on Mac with pip 12 | # ... removing requirement so that can distribute just with requirements.txt 13 | # dustmaps 14 | 15 | # git+https://github.com/gbrammer/dust_extinction.git 16 | # git+https://github.com/gbrammer/dust_attenuation.git 17 | 18 | # Needed for template smoothing, doesn't bring whole fsps distro 19 | astro-sedpy>=0.3 20 | # astro-prospector 21 | 22 | 23 | -------------------------------------------------------------------------------- /scripts/README.rst: -------------------------------------------------------------------------------- 1 | Some files running various parts of the eazy-py code, e.g., for generating 2 | templates and testing. 3 | 4 | *(Oct. 2024) NB:* These scripts haven't been used or tested in years and should be 5 | considered deprecated. -------------------------------------------------------------------------------- /scripts/nmf.py: -------------------------------------------------------------------------------- 1 | def build_matrices(): 2 | """ 3 | Build matrices for Blanton & Roweis optimization 4 | """ 5 | #import unicorn 6 | import numpy as np 7 | #import br07 8 | import glob 9 | #from threedhst import catIO 10 | import matplotlib.pyplot as plt 11 | 12 | from astropy.table import Table 13 | import eazy 14 | 15 | from eazy.utils import running_median, nmad 16 | 17 | # UltraVista 18 | self = eazy.photoz.PhotoZ(param_file='zphot.param.new', translate_file='zphot.translate.new', zeropoint_file='zphot.zeropoint.FSPS_full') 19 | zbest = np.load('zbest_Nov3.npy')[0] 20 | self.best_fit(zbest=zbest) 21 | 22 | # FAST 23 | fout = Table.read('UVISTA_DR3_master_v1.1.fout', format='ascii.commented_header') 24 | 25 | ## RF fluxes 26 | if False: 27 | rf_tempfilt, f_rest = self.rest_frame_fluxes(f_numbers=[153,155,161], pad_width=0.1, percentiles=[2.5,16,50,84,97.5]) 28 | np.save('uvista_rest_frame.npy', [rf_tempfilt, f_rest, idx]) 29 | else: 30 | rf_tempfilt, f_rest, idx0 = np.load('uvista_rest_frame.npy') 31 | 32 | uv_data = -2.5*np.log10(f_rest[:, 0, 2] / f_rest[:, 1, 2]) 33 | vj_data = -2.5*np.log10(f_rest[:, 1, 2] / f_rest[:, 2, 2]) 34 | 35 | u_0 = rf_tempfilt.tempfilt[:,0] / rf_tempfilt.tempfilt[:,2] 36 | v_0 = rf_tempfilt.tempfilt[:,1] / rf_tempfilt.tempfilt[:,2] 37 | j_0 = rf_tempfilt.tempfilt[:,2] / rf_tempfilt.tempfilt[:,2] 38 | 39 | uv_temp = -2.5*np.log10(u_0 / v_0) 40 | vj_temp = -2.5*np.log10(v_0 / j_0) 41 | 42 | ## Template data 43 | lines = open('templates/fsps_full/tweak_spectra.param').readlines() 44 | templ_names = [templ.name.split('.dat')[0] for templ in self.templates] 45 | data = [] 46 | for line in lines: 47 | if line.startswith('# '): 48 | data.append(line.split()[1:]) 49 | columns = ['id', 'label', 'age', 'mass', 'sfr', 'Av', 'Ha_flux', 'Ha_cont'] 50 | templ_data = Table(data=np.array(data), names=columns, dtype=[int, str, float, float, float, float, float, float]) 51 | 52 | zsp_only=True 53 | 54 | zp_ix = self.f_numbers == self.param['PRIOR_FILTER'] 55 | mag = self.param['PRIOR_ABZP'] - np.squeeze(2.5*np.log10(self.fnu[:, zp_ix])) 56 | 57 | idx = (mag < 23) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 58 | #idx = (mag < 22) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 59 | if zsp_only: 60 | idx &= (self.cat['z_spec'] > 0) 61 | 62 | if 'USE' in self.cat.colnames: 63 | # UltraVISTA 64 | use = (self.cat['USE'] == 1) & (self.cat['star'] == 0) 65 | use = (self.cat['contamination'] == 0) & (self.cat['nan_contam'] == 0) & (self.cat['star'] == 0) 66 | else: 67 | use = (self.cat['use'] == 1) & (self.cat['star_flag'] != 1) 68 | 69 | idx &= use 70 | 71 | idx_zsp = np.arange(self.NOBJ)[idx] 72 | 73 | ############# Refine photo-zs 74 | iter = 200 75 | iter = 300 # use Nov4 12 templates 76 | error_residuals, update_templates, update_zeropoints = True, False, True 77 | for i in range(6): 78 | self.iterate_zp_templates(idx=idx_zsp, update_templates=(i > 2), iter=iter+i, error_residuals=True, update_zeropoints=True, save_templates=(i > 2), n_proc=6) 79 | self.write_zeropoint_file('zphot.zeropoint.FSPS_full_12') 80 | #np.save('zbest.npy', [self.zbest]) 81 | 82 | ############# 83 | ## Fit full sample without updating zeropoints, etc. 84 | zp_ix = self.f_numbers == self.param['PRIOR_FILTER'] 85 | mag = self.param['PRIOR_ABZP'] - np.squeeze(2.5*np.log10(self.fnu[:, zp_ix])) 86 | 87 | idx = (mag < 23) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 88 | if 'USE' in self.cat.colnames: 89 | # UltraVISTA 90 | use = (self.cat['USE'] == 1) & (self.cat['star'] == 0) 91 | use = (self.cat['contamination'] == 0) & (self.cat['nan_contam'] == 0) & (self.cat['star'] == 0) 92 | else: 93 | use = (self.cat['use'] == 1) & (self.cat['star_flag'] != 1) 94 | 95 | idx &= use 96 | 97 | ## Minimum number of OK data 98 | nfilt = ((self.fnu > 0) & (self.efnu > 0) & (self.fnu/self.efnu > 3)).sum(axis=1) 99 | idx &= nfilt > 6 100 | 101 | # reasonable colors 102 | idx &= np.isfinite(uv_data) & np.isfinite(vj_data) & (uv_data < 3) & (uv_data > -0.4) & (vj_data > -0.4) & (vj_data < 2.7) #quiescent 103 | 104 | #idx &= (fout['lssfr'] < -11) & (fout['Av'] < 0.5) & (vj_data > 0.5) & np.isfinite(uv_data) & np.isfinite(vj_data) & (uv_data < 2.4) #quiescent 105 | 106 | idx_full = np.arange(self.NOBJ)[idx] 107 | idx = idx_full 108 | 109 | ## Refit last photo-zs 110 | if False: 111 | self.iterate_zp_templates(idx=idx, update_templates=False, iter=206, error_residuals=True, update_zeropoints=False, save_templates=False, n_proc=6) 112 | self.iterate_zp_templates(idx=idx, update_templates=False, iter=207, error_residuals=True, update_zeropoints=False, save_templates=False, n_proc=6) 113 | np.save('zbest_Nov3.npy', [self.zbest]) 114 | 115 | ############## 116 | ## Build NMF matrix 117 | izbest = np.asarray(np.round(np.interp(self.zbest, self.tempfilt.zgrid, np.arange(self.NZ))),dtype=int) 118 | 119 | NOBJ = len(idx) 120 | data = np.zeros((NOBJ, self.NFILT, self.NZ)) 121 | data_ivar = np.zeros((NOBJ, self.NFILT, self.NZ)) 122 | 123 | for i in range(len(idx)): 124 | print(i) 125 | ix = idx[i] 126 | iz = izbest[ix] 127 | 128 | flu = self.fnu[ix,:]*self.zp*self.ext_corr 129 | ivar = 1/((self.efnu[ix,:]*self.zp*self.ext_corr)**2 + (0.02*flu)**2) 130 | 131 | ok_i = (self.fnu[ix,:] > 0) & (self.efnu[ix,:] > 0) 132 | data[i, ok_i, iz] = flu[ok_i] 133 | data_ivar[i, ok_i, iz] = ivar[ok_i] 134 | 135 | ### Show UVJ 136 | if False: 137 | 138 | if True: 139 | plt.scatter(vj_data[idx], uv_data[idx], c=fout['lssfr'][idx], vmin=-12, vmax=-8, marker='s', alpha=0.03, cmap='jet_r', edgecolor='None') 140 | #plt.scatter(vj_data[idx], uv_data[idx], c=fout['lmass'][idx], vmin=9, vmax=11, marker='s', alpha=0.03, cmap='jet_r', edgecolor='None') 141 | 142 | else: 143 | plt.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.03) 144 | 145 | templ_ssfr = np.log10(templ_data['sfr']/templ_data['mass']) 146 | plt.scatter(vj_temp, uv_temp, c=templ_ssfr, vmin=-12, vmax=-8, marker='s', alpha=0.8, edgecolor='None', cmap='jet_r') 147 | plt.xlim(-0.3, 3.5); plt.ylim(0,3.5) 148 | 149 | ### Generate NMF components 150 | # Normalize template array by J band 151 | M = np.zeros((self.NTEMP, self.NFILT, self.NZ)) 152 | for i in range(self.NTEMP): 153 | M[i,:,:] = self.tempfilt.tempfilt[:,i,:].T / rf_tempfilt.tempfilt[i,2] 154 | 155 | ok_temp = templ_data['Av'] > -1 156 | n_components = 12 157 | 158 | ## Fulls set, 12 templates, coeffs_blue 159 | n_components = 8 160 | ok_temp = ~((templ_ssfr < -10) & (templ_data['Av'] <= 1)) 161 | 162 | # Quiescent templates, 4 templates, coeffs_red 163 | if True: 164 | ok_temp = (templ_ssfr < -10) & (templ_data['Av'] <= 1) 165 | n_components = 4 166 | 167 | M = M[ok_temp, :, :] 168 | ds = data.shape 169 | ms = M.shape 170 | 171 | delta = ms[0] // n_components+1 172 | 173 | coeffs = np.ones((n_components, ms[0]))*1./delta/(n_components*5) 174 | for i in range(n_components): 175 | coeffs[i, i*delta:(i+1)*delta] = 1./delta 176 | 177 | so = np.argsort(vj_temp[ok_temp]) 178 | coeffs[:,so] = coeffs*1 179 | 180 | coeffsx = np.ones((n_components, self.NTEMP))*1./delta/(n_components*5) 181 | coeffsx[:, ok_temp] += coeffs 182 | 183 | ## Split on quiescent templates for initialization 184 | coeffs = np.vstack((coeffs_red, coeffs_blue)) 185 | 186 | templates = np.ones((ds[0], n_components)) 187 | 188 | ### Loop so you can break in and still have the coeffs updated. 189 | ### total of 4000 iters 190 | plt.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.03) 191 | #plt.scatter(vj_temp, uv_temp, color='r', marker='.', alpha=0.8) 192 | plt.xlim(-0.3, 3.5); plt.ylim(0,3.5) 193 | 194 | plt.scatter(-2.5*np.log10(np.dot(coeffs, v_0[ok_temp])/np.dot(coeffs, j_0[ok_temp])), -2.5*np.log10(np.dot(coeffs, u_0[ok_temp])/np.dot(coeffs, v_0[ok_temp])), color='orange', alpha=0.5, s=40, marker='s') 195 | 196 | from templates import nmf_sparse 197 | 198 | for i in range(1, 100): 199 | print('Restart #{0}'.format(i)) 200 | coeffs, templates = nmf_sparse(data.reshape((ds[0],-1)), data_ivar.reshape((ds[0],-1)), M.reshape((ms[0],-1)), n_components=n_components, NITER=40, tol_limit=1.e-5, coeffs=coeffs*1, templates=templates*1.) 201 | 202 | np.save('full_nmf_coeffs.npy', [[i], idx, coeffs, templates]) 203 | 204 | if True: 205 | plt.scatter(-2.5*np.log10(np.dot(coeffs, v_0[ok_temp])/np.dot(coeffs, j_0[ok_temp])), -2.5*np.log10(np.dot(coeffs, u_0[ok_temp])/np.dot(coeffs, v_0[ok_temp])), color='r', alpha=0.3, s=40, marker='.') 206 | plt.savefig('full_nmf_iter_{0:03d}.png'.format(i)) 207 | 208 | ################################## 209 | ### Best combined templates 210 | i, idx0, coeffs, templates = np.load('full_nmf_coeffs.npy') 211 | #i, idx0, coeffs, templates = np.load('nmf_coeffs.npy') 212 | templ_array = np.array([templ.flux for templ in self.templates]) 213 | 214 | vj0 = -2.5*np.log10(np.dot(coeffs, v_0)/np.dot(coeffs, j_0)) 215 | uv0 = -2.5*np.log10(np.dot(coeffs, u_0)/np.dot(coeffs, v_0)) 216 | 217 | plt.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.03) 218 | plt.xlim(-0.4, 2.9); plt.ylim(-0.4,2.9) 219 | #plt.scatter(vj0, uv0, color='r', marker='s') 220 | 221 | # Put normalization back in 222 | coeffs_norm = coeffs/rf_tempfilt.tempfilt[:,2] 223 | coeffs_norm = (coeffs_norm.T / coeffs_norm.sum(axis=1)).T 224 | 225 | # V-band 226 | nmf_norm = np.dot(coeffs_norm, rf_tempfilt.tempfilt[:,1]) 227 | coeffs_norm = (coeffs_norm.T / nmf_norm).T 228 | 229 | nmf_templates = np.dot(coeffs_norm, templ_array).T 230 | 231 | ## Effective Av 232 | alpha = 10**(-0.4*templ_data['Av']) 233 | fv = np.dot(coeffs_norm, rf_tempfilt.tempfilt[:,1]) 234 | fv_corr = np.dot(coeffs_norm, rf_tempfilt.tempfilt[:,1]/alpha) 235 | Av = -2.5*np.log10(fv/fv_corr) 236 | 237 | ## Other parameters 238 | mass = np.dot(coeffs_norm, templ_data['mass']) 239 | sfr = np.dot(coeffs_norm, templ_data['sfr']) 240 | ssfr = sfr/mass 241 | 242 | Ha_flux = np.dot(coeffs_norm, templ_data['Ha_flux']) 243 | Ha_cont = np.dot(coeffs_norm, templ_data['Ha_cont']) 244 | 245 | Ha_EW = Ha_flux / Ha_cont 246 | 247 | NTEMP = coeffs.shape[0] 248 | 249 | nu_v = 3.e8/5000.e-10 250 | Lv = fv*nu_v 251 | 252 | fp = open('templates/uvista_nmf/spectra_12_Nov4.param','w') 253 | fp.write('## i age mass sfr Av Ha_flux Ha_cont fnu_v\n') 254 | for i in range(NTEMP): 255 | temp_file = 'nmf_fsps_Nov4_{0:02d}.dat'.format(i+1) 256 | label = '# {id:2d} -1 {mass:5.4e} {sfr:.3e} {Av:.3f} {Ha_flux:.3e} {Ha_cont:.3e} {fnu:.3e}'.format(id=i+1, mass=mass[i], sfr=sfr[i], Av=Av[i], Ha_flux=Ha_flux[i], Ha_cont=Ha_cont[i], fnu=fv[i]) 257 | print(label) 258 | fp.write(label+'\n') 259 | fp.write('{0:2d} templates/uvista_nmf/{1} 1.0 0 1.0\n'.format(i+1, temp_file)) 260 | 261 | fpt = open('./templates/uvista_nmf/{0}'.format(temp_file), 'w') 262 | fpt.write('# i age mass sfr Av Ha_flux Ha_cont fnu_v\n') 263 | fpt.write(label+'\n') 264 | fpt.close() 265 | 266 | fpt = open('./templates/uvista_nmf/{0}'.format(temp_file), 'ab') 267 | np.savetxt(fpt, np.array([self.templates[0].wave, nmf_templates[:,i]]).T, fmt='%.6e') 268 | fpt.close() 269 | 270 | fp.close() 271 | 272 | ## Show the templates in UVJ: sSFR, MLv, Av, Ha EW 273 | sh = 0.8 274 | fig = plt.figure(figsize=[11,8]) 275 | 276 | ax = fig.add_subplot(221) 277 | ax.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.01) 278 | ax.set_xlim(-0.4, 2.9); ax.set_ylim(-0.4,2.9) 279 | sc = ax.scatter(vj0, uv0, marker='s', c=np.log10(ssfr), s=100, vmin=-12, vmax=-8, cmap='jet_r') 280 | cb = plt.colorbar(sc, ax=ax, shrink=sh) 281 | cb.set_label('log sSFR') 282 | for i in range(NTEMP): 283 | ax.text(vj0[i], uv0[i]+0.1, '{0:d}'.format(i+1), ha='center', va='bottom', size=8, backgroundcolor='w') 284 | 285 | ax.grid() 286 | 287 | ax = fig.add_subplot(222) 288 | ax.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.01) 289 | ax.set_xlim(-0.4, 2.9); ax.set_ylim(-0.4,2.9) 290 | sc = ax.scatter(vj0, uv0, marker='s', c=np.log10(mass/Lv), s=100, vmin=-1, vmax=1.5, cmap='jet') 291 | cb = plt.colorbar(sc, ax=ax, shrink=sh) 292 | cb.set_label('log M/Lv') 293 | ax.grid() 294 | 295 | ax = fig.add_subplot(223) 296 | ax.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.01) 297 | ax.set_xlim(-0.4, 2.9); ax.set_ylim(-0.4,2.9) 298 | sc = ax.scatter(vj0, uv0, marker='s', c=Av, s=100, vmin=0, vmax=3, cmap='jet') 299 | cb = plt.colorbar(sc, ax=ax, shrink=sh) 300 | cb.set_label('Av') 301 | ax.grid() 302 | 303 | ax = fig.add_subplot(224) 304 | ax.scatter(vj_data[idx], uv_data[idx], color='k', marker='.', alpha=0.01) 305 | ax.set_xlim(-0.4, 2.9); ax.set_ylim(-0.4,2.9) 306 | sc = ax.scatter(vj0, uv0, marker='s', c=np.log10(Ha_flux/Ha_cont), s=100, vmin=1, vmax=3, cmap='jet_r') 307 | cb = plt.colorbar(sc, ax=ax, shrink=sh) 308 | cb.set_label(r'log H$\alpha$ EW') 309 | ax.grid() 310 | 311 | fig.tight_layout(pad=0.4) 312 | fig.savefig('spectra_12_Nov4.png') 313 | 314 | fig = plt.figure(figsize=[8,8]) 315 | for i in range(NTEMP): 316 | ax = fig.add_subplot(3,4,1+i) 317 | ax.plot(self.templates[0].wave[::10], nmf_templates[::10,:]/(3.e18/5500.**2), color='k', alpha=0.1) 318 | ax.plot(self.templates[0].wave, nmf_templates[:,i]/(3.e18/5500.**2), color='k', alpha=0.8, linewidth=2) 319 | ax.semilogx() 320 | ax.set_xlim(1000,5.e4) 321 | ax.set_ylim(0,10) 322 | if i < 8: 323 | ax.set_xticklabels([]) 324 | 325 | if (i % 4) > 0: 326 | ax.set_yticklabels([]) 327 | 328 | ax.text(0.9, 0.9, '#{0}'.format(i+1), transform=ax.transAxes, ha='right', va='top', size=12) 329 | 330 | fig.tight_layout(pad=0.1) 331 | fig.savefig('spectra_12_Nov4_SED.png') 332 | 333 | def check_parameters(): 334 | """ 335 | Redshifts & SP parameters from the new NMF fits 336 | """ 337 | self = eazy.photoz.PhotoZ(param_file='zphot.param.new', translate_file='zphot.translate.new', zeropoint_file='zphot.zeropoint.FSPS_full_12') 338 | 339 | zp_ix = self.f_numbers == self.param['PRIOR_FILTER'] 340 | mag = self.param['PRIOR_ABZP'] - np.squeeze(2.5*np.log10(self.fnu[:, zp_ix])) 341 | 342 | idx = (mag < 23) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 343 | if 'USE' in self.cat.colnames: 344 | # UltraVISTA 345 | use = (self.cat['USE'] == 1) & (self.cat['star'] == 0) 346 | use = (self.cat['contamination'] == 0) & (self.cat['nan_contam'] == 0) & (self.cat['star'] == 0) 347 | else: 348 | use = (self.cat['use'] == 1) & (self.cat['star_flag'] != 1) 349 | 350 | idx &= use 351 | 352 | ## Minimum number of OK data 353 | nfilt = ((self.fnu > 0) & (self.efnu > 0) & (self.fnu/self.efnu > 3)).sum(axis=1) 354 | idx &= nfilt > 6 355 | 356 | #idx &= (fout['lssfr'] < -11) & (fout['Av'] < 0.5) & (vj_data > 0.5) & np.isfinite(uv_data) & np.isfinite(vj_data) & (uv_data < 2.4) #quiescent 357 | 358 | idx_full = np.arange(self.NOBJ)[idx] 359 | idx = idx_full 360 | 361 | ## Refit last photo-zs 362 | if True: 363 | self.iterate_zp_templates(idx=idx, update_templates=False, iter=401, error_residuals=True, update_zeropoints=False, save_templates=False, n_proc=6) 364 | self.iterate_zp_templates(idx=idx, update_templates=False, iter=402, error_residuals=True, update_zeropoints=False, save_templates=False, n_proc=6) 365 | np.save('zbest_Nov4_12.npy', [self.zbest]) 366 | 367 | rf_tempfilt, f_rest = self.rest_frame_fluxes(f_numbers=[153,155,161], pad_width=0.1, percentiles=[2.5,16,50,84,97.5]) 368 | 369 | ############################################ 370 | # def old_build_matrices(NTEMP=5, field='cosmos', hmax=24, zmax=2.5): 371 | # """ 372 | # Build matrices for Blanton & Roweis optimization 373 | # """ 374 | # #import unicorn 375 | # import numpy as np 376 | # #import br07 377 | # import glob 378 | # #from threedhst import catIO 379 | # from astropy.table import Table 380 | # import eazy 381 | # 382 | # from eazy.utils import running_median, nmad 383 | # 384 | # # UltraVista 385 | # self = eazy.photoz.PhotoZ(param_file='zphot.param.new', translate_file='zphot.translate.new', zeropoint_file='zphot.zeropoint.new') 386 | # zbest = np.load('zbest.npy')[0] 387 | # self.best_fit(zbest=zbest) 388 | # 389 | # zsp_only=False 390 | # 391 | # zp_ix = self.f_numbers == self.param['PRIOR_FILTER'] 392 | # mag = self.param['PRIOR_ABZP'] - np.squeeze(2.5*np.log10(self.fnu[:, zp_ix])) 393 | # 394 | # idx = (mag < 23) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 395 | # #idx = (mag < 22) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 396 | # if zsp_only: 397 | # idx &= (self.cat['z_spec'] > 0) 398 | # 399 | # if 'USE' in self.cat.colnames: 400 | # # UltraVISTA 401 | # use = (self.cat['USE'] == 1) & (self.cat['star'] == 0) 402 | # use = (self.cat['contamination'] == 0) & (self.cat['nan_contam'] == 0) & (self.cat['star'] == 0) 403 | # else: 404 | # use = (self.cat['use'] == 1) & (self.cat['star_flag'] != 1) 405 | # 406 | # idx &= use 407 | # 408 | # idx = np.arange(self.NOBJ)[idx] 409 | # 410 | # iter = 200 411 | # error_residuals, update_templates, update_zeropoints = True, False, True 412 | # 413 | # for i in range(3): 414 | # self.iterate_zp_templates(idx=idx, update_templates=update_templates, iter=iter+i, error_residuals=error_residuals, update_zeropoints=update_zeropoints, save_templates=True, n_proc=6) 415 | # self.write_zeropoint_file('zphot.zeropoint.FSPS_full') 416 | # np.save('zbest.npy', [self.zbest]) 417 | # 418 | # ### Fit all objects 419 | # idx_full = np.arange(self.NOBJ) 420 | # error_residuals, update_templates, update_zeropoints = True, False, False 421 | # iter = 99 422 | # self.iterate_zp_templates(idx=idx_full, update_templates=update_templates, iter=iter, error_residuals=error_residuals, update_zeropoints=update_zeropoints, save_templates=False, n_proc=6) 423 | # 424 | # ### Bin on U-V vs sSFR 425 | # plt.scatter(np.log10(ssfr[idx]), uv[idx], c=vj[idx], vmin=-0.5, vmax=2, alpha=0.2, edgecolor='0.5') 426 | # 427 | # xh, yh = np.clip(np.log10(ssfr[idx]), -12.95, -8.05), np.clip(uv[idx], 0.01, 2.49) 428 | # h2 = np.histogram2d(xh, yh, range=[(-13,-8), (0,2.5)], bins=[8,8]) 429 | # 430 | # xh, yh = np.clip(vj[idx], 0.01, 2.99), np.clip(uv[idx], 0.01, 3.49) 431 | # h2 = np.histogram2d(xh, yh, range=[(0,3), (0,3.5)], bins=[16,16]) 432 | # 433 | # ## Binned rest-frame SEDs 434 | # #full_templates = self.param.read_templates(templates_file='templates/fsps_full/tweak_spectra.param') 435 | # full_templates = self.param.read_templates(templates_file='templates/fsps_full/spectra.param') 436 | # full_tempfilt = eazy.photoz.TemplateGrid(self.zgrid, full_templates, self.param['FILTERS_RES'], self.f_numbers, add_igm=True, galactic_ebv=self.param.params['MW_EBV'], Eb=self.param['SCALE_2175_BUMP'], n_proc=0) 437 | # full_rf_tempfilt = eazy.photoz.TemplateGrid(np.array([0,0.1]), full_templates, self.param['FILTERS_RES'], np.array([153,155,161]), add_igm=False, galactic_ebv=0, Eb=self.param['SCALE_2175_BUMP'], n_proc=-1) 438 | # full_rf_tempfilt.tempfilt = np.squeeze(full_rf_tempfilt.tempfilt[0,:,:]) 439 | # 440 | # ix = np.argmax(h2[0]) 441 | # j, i = np.unravel_index(ix, h2[0].shape) 442 | # 443 | # Q = ssfr[idx] > -10 444 | # testQ = Q 445 | # 446 | # sel = (xh >= h2[1][j]) & (xh <= h2[1][j+1]) & (yh >= h2[2][i]) & (yh <= h2[2][i+1]) & testQ 447 | # 448 | # fnu_corr = self.fnu[idx[sel],:]*self.zp*self.ext_corr 449 | # efnu_corr = self.efnu[idx[sel],:]*self.zp*self.ext_corr 450 | # 451 | # #irest = 1 452 | # fnu_corr = (fnu_corr.T/f_rest[idx[sel],irest,2]).T 453 | # efnu_corr = (efnu_corr.T/f_rest[idx[sel],irest,2]).T 454 | # lc_rest = (self.lc[:,np.newaxis]/(1+self.zbest[idx[sel]])).T 455 | # 456 | # templ = np.zeros((sel.sum(), self.NFILT, full_tempfilt.NTEMP)) 457 | # 458 | # import specutils.extinction 459 | # import astropy.units as u 460 | # f99 = specutils.extinction.ExtinctionF99(a_v = self.tempfilt.galactic_ebv * 3.1) 461 | # fred = 10**(-0.4*f99(full_rf_tempfilt.lc[irest]*(1+self.zbest[idx][sel])*u.AA)) 462 | # 463 | # for ii in range(sel.sum()): 464 | # zi = self.zbest[idx][sel][ii] 465 | # templ[ii,:,:] = (full_tempfilt(zi).T/(full_rf_tempfilt.tempfilt[:,irest]*fred[ii])) 466 | # 467 | # be = efnu_corr.flatten() 468 | # A = templ.reshape((-1,full_tempfilt.NTEMP)) 469 | # b = fnu_corr.flatten()/be 470 | # 471 | # ok = (self.fnu[idx[sel],:].flatten() > -99) & (self.efnu[idx[sel],:].flatten() > 0) & (lc_rest.flatten() > 1300) 472 | # 473 | # sh = self.fnu[idx[sel],:].shape 474 | # oksh = ok.reshape(sh) 475 | # 476 | # yy = (fnu_corr/self.ext_corr)/(lc_rest/rf_tempfilt.lc[irest])**2 477 | # xm, ym, ys, N = running_median(lc_rest[oksh], yy[oksh], use_median=True, use_nmad=True, NBIN=100) 478 | # ym_i = np.interp(lc_rest, xm, ym) 479 | # ys_i = np.interp(lc_rest, xm, ys) 480 | # oksh &= np.abs(ym_i - yy) < 3*ys_i 481 | # ok = oksh.flatten() 482 | # 483 | # coeffs, resid = scipy.optimize.nnls((A[ok,:].T/be[ok]).T,b[ok]) 484 | # # coeffs, resid, rank, s = np.linalg.lstsq((A[ok,:].T/be[ok]).T,b[ok]) 485 | # # 486 | # # amatrix = unicorn.utils_c.prepare_nmf_amatrix(be[ok]**2, A[ok,:].T) 487 | # # coeffs_nmf = unicorn.utils_c.run_nmf(fnu_corr.flatten()[ok], be[ok]**2, A[ok,:].T, amatrix, verbose=True, toler=1.e-5) 488 | # 489 | # best = np.dot(A, coeffs) 490 | # 491 | # yym = (best.reshape(sh)/self.ext_corr)/(lc_rest/rf_tempfilt.lc[irest])**2 492 | # xmm, ymm, ysm, Nm = running_median(lc_rest[oksh], yym[oksh], use_median=True, use_nmad=True, NBIN=100) 493 | # 494 | # if False: 495 | # plt.scatter(lc_rest[oksh], (fnu_corr/self.ext_corr)[oksh]/(lc_rest[oksh]/rf_tempfilt.lc[irest])**2, alpha=0.05*100/sel.sum(), color='k', marker='.') 496 | # plt.scatter(lc_rest[oksh], (best.reshape(sh)/self.ext_corr)[oksh]/(lc_rest[oksh]/rf_tempfilt.lc[irest])**2, alpha=0.05*100/sel.sum(), color='r', marker='.', zorder=2) 497 | # 498 | # plt.errorbar(xm, ym, ys, color='k') 499 | # plt.plot(xmm, ymm, color='r', marker='.', alpha=0.4) 500 | # plt.xlim(800,1.e5); plt.ylim(0.01,10); log() 501 | # 502 | # tf = np.array([full_templates[ii].flux / (full_rf_tempfilt.tempfilt[ii, irest]*3.e18/full_rf_tempfilt.lc[irest]**2) for ii in range(full_tempfilt.NTEMP)]) 503 | # tt = np.dot(coeffs, tf) 504 | # plt.plot(full_templates[0].wave, tt, color='r') 505 | # 506 | # plt.scatter(lc_rest[oksh], fnu_corr[oksh]/best[ok], alpha=0.05*100/sel.sum(), color='k', marker='.') 507 | # plt.xlim(800,1.e5); plt.ylim(0.5,1.5); plt.semilogx() 508 | 509 | -------------------------------------------------------------------------------- /scripts/photoz.py: -------------------------------------------------------------------------------- 1 | def rest_frame_seds_selections(): 2 | """ 3 | """ 4 | ok = (zout['z_phot'] > 0.4) & (zout['z_phot'] < 2) 5 | col = (VJ < 1.5) & (UV > 1.5) 6 | # Quiescent 7 | idx = col & ok & (np.log10(sSFR) < -11.5) 8 | idx = col & ok & (np.log10(sSFR) > -10.5) 9 | idx = col & ok & (np.log10(sSFR) > -9.5) 10 | 11 | idx = ok & (VJ > 1.8) 12 | 13 | ## Red 14 | UWise = f_rest[:,0,2]/f_rest[:,2,2] 15 | idx, label, c = ok & (np.log10(UWise) > -1) & (np.log10(sSFR) > -10), 'U22_blue', 'b' 16 | 17 | idx, label, c = ok & (np.log10(UWise) < -1.8) & (np.log10(UWise) > -2.2) & (np.log10(sSFR) > -10), 'U22_mid', 'g' 18 | 19 | idx, label, c = ok & (np.log10(UWise) < -2.4) & (np.log10(sSFR) > -10), 'U22_red', 'r' 20 | 21 | # Quiescent 22 | idx, label, c = ok & (np.log10(zout['MLv']) > 0.4) & (np.log10(sSFR) < -11.9), 'Q', 'r' 23 | 24 | # Dusty 25 | idx, label, c = ok & (np.log10(zout['MLv']) > 0.6) & (np.log10(sSFR) < -10.5), 'MLv_lo', 'brown' 26 | 27 | idx, label, c = ok & (np.log10(zout['MLv']) > 0.6) & (np.abs(np.log10(sSFR)+10.5) < 0.5), 'MLv_mid', 'k' 28 | 29 | idx, label, c = ok & (np.log10(zout['MLv']) > 0.6) & (np.log10(sSFR) > -9.5), 'MLv_hi', 'green' 30 | 31 | # post-SB 32 | #idx, label, c = (UV < 1.6) & ok & (np.log10(sSFR) < -11) & (VJ < 1), 'post-SB', 'orange' 33 | 34 | # star-forming 35 | idx, label, c = ok & (UV < 0.6) & (VJ < 0.5), 'SF0', 'purple' 36 | 37 | idx, label, c = ok & (np.abs(UV-0.8) < 0.2) & (np.abs(VJ-0.6) < 0.2), 'SF1', 'b' 38 | 39 | idx, label, c = ok & (np.abs(UV-1.2) < 0.2) & (np.abs(VJ-1.0) < 0.2), 'SF2', 'orange' 40 | 41 | idx, label, c = ok & (np.abs(UV-1.6) < 0.2) & (np.abs(VJ-1.6) < 0.2), 'SF3', 'pink' -------------------------------------------------------------------------------- /scripts/run.py: -------------------------------------------------------------------------------- 1 | def go(): 2 | 3 | import numpy as np 4 | import glob 5 | import matplotlib.pyplot as plt 6 | 7 | from astropy.io import fits 8 | import eazy 9 | 10 | zsp_only = False 11 | 12 | # GOODS-S 13 | self = eazy.photoz.PhotoZ(param_file='zphot.param.goodss.uvista', translate_file='zphot.translate.goodss.uvista', zeropoint_file='zphot.zeropoint.goodss.uvista.FSPS') 14 | 15 | self = eazy.photoz.PhotoZ(param_file='zphot.param.new', translate_file='zphot.translate.new', zeropoint_file='zphot.zeropoint.new') 16 | 17 | # UVUDF 18 | self = eazy.photoz.PhotoZ(param_file='zphot.param', translate_file='zphot.translate', zeropoint_file='zphot.zeropoint') 19 | use = (self.cat['STAR'] == 0) 20 | 21 | # R-selected 22 | self = eazy.photoz.PhotoZ(param_file='zphot.param.new', translate_file='zphot.translate.new', zeropoint_file='zphot.zeropoint.FSPS') 23 | 24 | # 25 | self.zp = self.zp*0+1 26 | 27 | if False: 28 | out = fits.open('eazy_output.fits') 29 | self.zp = out['ZP'].data 30 | self.zbest = out['ZBEST'].data 31 | self.chibest = out['CHIBEST'].data 32 | self.fit_chi2 = out['FIT_CHI2'].data 33 | self.fobs = out['FOBS'].data 34 | 35 | for i in range(self.NFILT): 36 | self.cat['fit_{0}'.format(self.flux_columns[i])] = self.fobs[:,i]/self.ext_corr[i] 37 | 38 | # mag = self.param['PRIOR_ABZP'] - 2.5*np.log10(self.cat['Ks']) 39 | # 40 | # mag = self.param['PRIOR_ABZP'] - 2.5*np.log10(self.cat['rp_tot']) 41 | 42 | zp_ix = self.f_numbers == self.param['PRIOR_FILTER'] 43 | mag = self.param['PRIOR_ABZP'] - np.squeeze(2.5*np.log10(self.fnu[:, zp_ix])) 44 | 45 | idx = (mag < 23) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 46 | #idx = (mag < 22) | ((self.cat['z_spec'] > 0) & np.isfinite(mag)) 47 | 48 | if zsp_only: 49 | idx &= (self.cat['z_spec'] > 0) 50 | 51 | use = (self.cat['use'] == 1) & (self.cat['star_flag'] != 1) 52 | 53 | if False: 54 | # UltraVISTA 55 | use = (self.cat['USE'] == 1) & (self.cat['star'] == 0) 56 | use = (self.cat['contamination'] == 0) & (self.cat['nan_contam'] == 0) & (self.cat['star'] == 0) 57 | 58 | idx &= use 59 | 60 | idx = np.arange(self.NOBJ)[idx] 61 | 62 | # test 63 | iter = 100 64 | error_residuals, update_templates, update_zeropoints = False, False, False 65 | 66 | self.iterate_zp_templates(idx=idx, update_templates=update_templates, iter=iter, error_residuals=error_residuals, update_zeropoints=update_zeropoints, save_templates=False, n_proc=4) 67 | 68 | rf_tempfilt, f_rest = self.rest_frame_fluxes(f_numbers=[153,155,161], pad_width=0.1, percentiles=[2.5,16,50,84,97.5]) 69 | 70 | uv = -2.5*np.log10(f_rest[:,0,2]/f_rest[:,1,2]) 71 | vj = -2.5*np.log10(f_rest[:,1,2]/f_rest[:,2,2]) 72 | 73 | import astropy.cosmology 74 | from astropy.cosmology import Planck15 75 | cosmo = Planck15 76 | cosmo = astropy.cosmology.FlatLambdaCDM(H0=70, Om0=0.3) 77 | 78 | import astropy.units as u 79 | 80 | dL = cosmo.luminosity_distance(self.zgrid).to(u.cm) 81 | dL_i = np.interp(self.zbest, self.zgrid, dL) 82 | 83 | #mass_ratio = (np.interp(self.zbest, self.zgrid, dL)/np.interp(fout['z'], self.zgrid, dL))**2 84 | 85 | fnu_factor = 10**(-0.4*(self.param['PRIOR_ABZP']+48.6)) 86 | Lnu = (f_rest[:,:,2].T*fnu_factor*4*np.pi*dL_i**2/(1+self.zbest)).T 87 | nuLnu = Lnu*(3.e8/(rf_tempfilt.lc*1.e-10)) 88 | Ltot = nuLnu / 3.839e33 89 | Lv = Ltot[:,1] 90 | Lj = Ltot[:,2] 91 | 92 | lines = open('templates/uvista_nmf/spectra_kc13_12_tweak.param').readlines() 93 | MLv_template = np.asarray([line.split()[3] for line in lines if ('MLv' in line) & (line.startswith('# '))],dtype=float) 94 | SFRv_template = np.asarray([line.split()[5] for line in lines if ('MLv' in line) & (line.startswith('# '))],dtype=float) 95 | 96 | irest = 1 # V 97 | #irest = 2 # J 98 | 99 | ### Full individual template set 100 | if 'fsps_full' in self.param['TEMPLATES_FILE']: 101 | nu = 3.e18/rf_tempfilt.lc[irest] 102 | Lvt = rf_tempfilt.tempfilt[:,irest]*nu # nu Lnu luminosity 103 | 104 | # These are masses 105 | lines = open(self.param['TEMPLATES_FILE']).readlines() 106 | MLv_template = np.asarray([line.split()[4] for line in lines if line.startswith('# ')],dtype=float) 107 | SFRv_template = np.asarray([line.split()[5] for line in lines if line.startswith('# ')],dtype=float) 108 | 109 | MLv_template /= Lvt 110 | SFRv_template /= Lvt 111 | 112 | csum = self.coeffs_best*rf_tempfilt.tempfilt[:,irest] 113 | csum = (csum.T/f_rest[:,irest,2]).T 114 | #csum = (csum.T/self.coeffs_best.sum(axis=1)).T 115 | MLv = np.sum(csum*MLv_template, axis=1)#/np.sum(csum, axis=1) 116 | stellar_mass = Ltot[:,irest]*MLv 117 | sfr = Ltot[:,irest]*np.sum(csum*SFRv_template, axis=1)#/np.sum(csum, axis=1) 118 | 119 | tf = rf_tempfilt.tempfilt 120 | uv_temp = -2.5*np.log10(tf[:,0]/tf[:,1]) 121 | vj_temp = -2.5*np.log10(tf[:,1]/tf[:,2]) 122 | 123 | plt.scatter(vj[idx], uv[idx], alpha=0.1,c=np.log10(MLv)[idx], vmin=-1, vmax=1.5, edgecolor='None') 124 | plt.scatter(vj_temp, uv_temp, alpha=0.8,c=np.log10(MLv_template), vmin=-1, vmax=1.5, marker='s', edgecolor='w', s=30) 125 | plt.xlim(-0.5,3); plt.ylim(-0.5,3) 126 | 127 | # sSFR 128 | ssfr = sfr / stellar_mass 129 | plt.scatter(vj[idx], uv[idx], alpha=0.1,c=np.log10(ssfr)[idx], vmin=-13, vmax=-8, edgecolor='None', cmap='jet_r') 130 | #plt.scatter(vj_temp, uv_temp, alpha=0.8,c=np.log10(SFRv_template/MLv_template), vmin=-13, vmax=-10, marker='s', edgecolor='w', s=30) 131 | plt.xlim(-0.5,3); plt.ylim(-0.5,3) 132 | 133 | j = 0 134 | 135 | mass_fraction = ((csum*MLv_template).T/(csum*MLv_template).sum(axis=1)).T 136 | # L fraction 137 | light_fraction = ((csum).T/(csum).sum(axis=1)).T 138 | 139 | weight = mass_fraction 140 | 141 | j+=1; id = self.cat['id'][idx[j]]; fig = self.show_fit(id, show_fnu=show_fnu, xlim=[0.1,10]) 142 | 143 | fig = plt.figure() 144 | ax = fig.add_subplot(111) 145 | ax.scatter(vj[idx], uv[idx], alpha=0.1,c=np.log10(MLv)[idx], vmin=-1, vmax=1.5, edgecolor='None') 146 | ax.scatter(vj_temp, uv_temp, c=np.log10(MLv_template), vmin=-1, vmax=1.5, marker='s', edgecolor='k', s=200*weight[idx[j],:]**0.3) 147 | ax.scatter(vj[idx][j], uv[idx][j], alpha=0.8,c=np.log10(MLv)[idx][j], vmin=-1, vmax=1.5, edgecolor='k', s=70) 148 | ax.set_xlim(-0.5,5); ax.set_ylim(-0.5,5) 149 | 150 | fout = catIO.Table('/Users/brammer/3DHST/Spectra/Release/v4.1.5/FullRelease/goodss_3dhst_v4.1.5_catalogs/goodss_3dhst.v4.1.5.zbest.fout') 151 | 152 | # run 153 | iter = -1 154 | 155 | for iter in range(7): 156 | self.iterate_zp_templates(idx=idx, update_templates=(iter > 1), iter=iter, error_residuals=(iter > 0), save_templates=(iter > 8), n_proc=4) 157 | self.zp[-3] = 1.0 158 | self.write_zeropoint_file('zphot.zeropoint.FSPS') 159 | 160 | # Show fits 161 | if False: 162 | ids = self.cat['id'][idx] 163 | j = -1 164 | 165 | j+=1; id = ids[j]; fig = self.show_fit(id, show_fnu=show_fnu, xlim=[0.1,10]) 166 | 167 | ## Full Run 168 | idx = np.arange(self.NOBJ)[use] 169 | 170 | self.fit_parallel(idx=idx, n_proc=4) 171 | self.best_fit() 172 | self.error_residuals() 173 | 174 | self.fit_parallel(idx=idx, n_proc=4) 175 | self.best_fit() 176 | rf_tempfilt, f_rest = self.rest_frame_fluxes(f_numbers=[153,155,161], pad_width=0.5, percentiles=[2.5,16,50,84,97.5]) 177 | 178 | hdu = fits.HDUList() 179 | hdu.append(fits.ImageHDU(data=self.zp, name='zp')) 180 | hdu.append(fits.ImageHDU(data=self.zbest, name='zbest')) 181 | hdu.append(fits.ImageHDU(data=self.chi_best, name='chibest')) 182 | hdu.append(fits.ImageHDU(data=self.fit_chi2, name='fit_chi2')) 183 | hdu.append(fits.ImageHDU(data=self.fobs, name='fobs')) 184 | hdu.writeto('eazy_output.fits', clobber=True) 185 | 186 | np.save('eazy_templates.npy', [self.templates]) 187 | 188 | ## NUV mag 189 | idx = np.arange(self.NOBJ)[use & (mag < 23) & (self.zbest < 1)] 190 | #plt.scatter(nuv_obs[idx]*self.zp[-1]*self.ext_corr[-1], nuv_mod[idx], alpha=0.1) 191 | #plt.xlim(0.01,100); plt.ylim(0.01, 100); plt.plot([0.01,100], [0.01,100], color='r'); plt.loglog() 192 | 193 | plt.hist(25-2.5*np.log10(nuv_obs[idx]*self.zp[-1]), bins=100, range=[18,28], alpha=0.5, log=True) 194 | plt.hist(25-2.5*np.log10(nuv_mod[idx]/self.ext_corr[-1]), bins=100, range=[18,28], alpha=0.5, log=True) 195 | 196 | nuv_mag = 25-2.5*np.log10(nuv_mod/self.ext_corr[-1]*totcorr) 197 | idx = use & (nuv_mag < 22) & (self.zbest > 0.8) 198 | 199 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | setup() --------------------------------------------------------------------------------