├── tests ├── __init__.py ├── test_read │ ├── test_spectrum.py │ ├── test_planck.py │ ├── test_color.py │ ├── test_object.py │ ├── test_filter.py │ ├── test_calibration.py │ ├── test_isochrone.py │ └── test_model.py └── test_phot │ └── test_synphot.py ├── species ├── core │ ├── __init__.py │ └── constants.py ├── phot │ └── __init__.py ├── plot │ └── __init__.py ├── util │ ├── __init__.py │ ├── test_util.py │ ├── core_util.py │ ├── spec_util.py │ └── convert_util.py ├── data │ ├── filter_data │ │ └── __init__.py │ ├── misc_data │ │ ├── __init__.py │ │ ├── accretion_data.py │ │ └── dust_data.py │ ├── model_data │ │ └── __init__.py │ ├── phot_data │ │ ├── __init__.py │ │ ├── phot_jwst_ydwarfs.py │ │ ├── phot_vlm_plx.py │ │ └── phot_leggett.py │ ├── spec_data │ │ ├── __init__.py │ │ ├── add_spec_data.py │ │ ├── spec_kesseli2017.py │ │ ├── spec_vega.py │ │ ├── spec_bonnefoy2014.py │ │ ├── spec_allers2013.py │ │ └── spec_irtf.py │ ├── companion_data │ │ ├── __init__.py │ │ ├── companion_spectra.json │ │ └── companion_spectra.py │ ├── isochrone_data │ │ ├── __init__.py │ │ ├── iso_nextgen.py │ │ ├── iso_btsettl.py │ │ ├── iso_marleau.py │ │ ├── iso_ames.py │ │ ├── iso_parsec.py │ │ ├── iso_baraffe2015.py │ │ ├── add_isochrone.py │ │ ├── iso_sonora_bobcat.py │ │ ├── iso_manual.py │ │ ├── iso_saumon2008.py │ │ ├── iso_atmo.py │ │ ├── iso_chabrier2023.py │ │ ├── iso_sonora_diamondback.py │ │ └── iso_linder2019.py │ └── __init__.py ├── fit │ ├── __init__.py │ └── fit_spectrum.py ├── __init__.py ├── read │ └── __init__.py └── _version.py ├── docs ├── _static │ └── species_logo.png ├── modules.rst ├── contributing.rst ├── species.rst ├── species.phot.rst ├── species.data.filter_data.rst ├── species.data.companion_data.rst ├── Makefile ├── index.rst ├── species.data.rst ├── species.data.misc_data.rst ├── species.data.model_data.rst ├── species.core.rst ├── make.bat ├── about.rst ├── species.data.phot_data.rst ├── species.fit.rst ├── species.plot.rst ├── tutorials.rst ├── species.data.spec_data.rst ├── species.read.rst ├── configuration.rst ├── conf.py ├── species.util.rst ├── database.rst ├── species.data.isochrone_data.rst └── installation.rst ├── .codecov.yml ├── .readthedocs.yml ├── .github ├── pull_request_template.md └── workflows │ └── main.yml ├── .gitignore ├── LICENSE ├── Makefile ├── pyproject.toml └── README.rst /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/phot/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/plot/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/util/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/filter_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/misc_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/model_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/phot_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/spec_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/companion_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/isochrone_data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /species/data/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["database"] 2 | -------------------------------------------------------------------------------- /docs/_static/species_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tomasstolker/species/HEAD/docs/_static/species_logo.png -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | 3 | API documentation 4 | ================= 5 | 6 | .. toctree:: 7 | :maxdepth: 4 8 | 9 | species 10 | -------------------------------------------------------------------------------- /species/fit/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [ 2 | "compare_spectra", 3 | "emission_line", 4 | "fit_evolution", 5 | "fit_model", 6 | "fit_spectrum", 7 | "retrieval", 8 | ] 9 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | threshold: 100% 6 | if_not_found: success 7 | patch: no 8 | 9 | comment: off 10 | 11 | ignore: 12 | - tests/* 13 | -------------------------------------------------------------------------------- /species/__init__.py: -------------------------------------------------------------------------------- 1 | from species.core.species_init import SpeciesInit 2 | from ._version import __version__ 3 | 4 | __author__ = "Tomas Stolker" 5 | __license__ = "MIT" 6 | __maintainer__ = "Tomas Stolker" 7 | __email__ = "stolker@strw.leidenuniv.nl" 8 | -------------------------------------------------------------------------------- /species/read/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [ 2 | "read_calibration", 3 | "read_color", 4 | "read_filter", 5 | "read_isochrone", 6 | "read_model", 7 | "read_object", 8 | "read_planck", 9 | "read_radtrans", 10 | "read_spectrum", 11 | ] 12 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | configuration: docs/conf.py 5 | 6 | build: 7 | os: ubuntu-22.04 8 | tools: 9 | python: "3.12" 10 | 11 | python: 12 | install: 13 | - method: pip 14 | path: . 15 | extra_requirements: 16 | - full 17 | - docs 18 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. _contributing: 2 | 3 | Contributing 4 | ============ 5 | 6 | Contributions are welcome so please consider `forking `_ the repository and creating a `pull request `_. Bug reports and feature requests can be provided by creating an `issue `_ on the Github page. 7 | -------------------------------------------------------------------------------- /docs/species.rst: -------------------------------------------------------------------------------- 1 | species package 2 | =============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | species.core 10 | species.data 11 | species.fit 12 | species.phot 13 | species.plot 14 | species.read 15 | species.util 16 | 17 | Module contents 18 | --------------- 19 | 20 | .. automodule:: species 21 | :members: 22 | :undoc-members: 23 | :show-inheritance: 24 | -------------------------------------------------------------------------------- /docs/species.phot.rst: -------------------------------------------------------------------------------- 1 | species.phot package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.phot.syn\_phot module 8 | ----------------------------- 9 | 10 | .. automodule:: species.phot.syn_phot 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: species.phot 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Thank you for contributing to the species repo! Before submitting this PR, please make sure: 2 | 3 | - [ ] That the branch of your PR is synced with the main branch of tomasstolker/species. 4 | - [ ] To add and/or update the docstrings (i.e. parameters, returns, types, and descriptions). 5 | - [ ] To add and/or update the typehints and typechecks. 6 | - [ ] To run `black` on any files that have been adjusted (see black.readthedocs.io). 7 | - [ ] That only text files have been included with the commits. 8 | 9 | -------------------------------------------------------------------------------- /docs/species.data.filter_data.rst: -------------------------------------------------------------------------------- 1 | species.data.filter\_data package 2 | ================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.filter\_data.filter\_data module 8 | --------------------------------------------- 9 | 10 | .. automodule:: species.data.filter_data.filter_data 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: species.data.filter_data 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .DS_Store 3 | docs/_build 4 | docs/tutorials/species_config.ini 5 | docs/tutorials/species_database.hdf5 6 | docs/tutorials/retrieval.tgz 7 | docs/tutorials/*.png 8 | docs/tutorials/*.jpg 9 | docs/tutorials/*.json 10 | docs/tutorials/*.fits 11 | docs/tutorials/*.dat 12 | docs/tutorials/data/* 13 | docs/tutorials/multinest/* 14 | docs/tutorials/ultranest/* 15 | docs/tutorials/dynesty/* 16 | docs/tutorials/.ipynb_checkpoints/* 17 | build/ 18 | dist/ 19 | species.egg-info/ 20 | .coverage* 21 | coverage.xml 22 | .pytest_cache/ 23 | htmlcov/ 24 | .tox/ 25 | -------------------------------------------------------------------------------- /docs/species.data.companion_data.rst: -------------------------------------------------------------------------------- 1 | species.data.companion\_data package 2 | ==================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.companion\_data.companion\_spectra module 8 | ------------------------------------------------------ 9 | 10 | .. automodule:: species.data.companion_data.companion_spectra 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | Module contents 16 | --------------- 17 | 18 | .. automodule:: species.data.companion_data 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. _index: 2 | 3 | Documentation for *species* 4 | =========================== 5 | 6 | *species* is a toolkit for atmospheric characterization of directly imaged exoplanets. 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | :caption: Getting Started 11 | :hidden: 12 | 13 | installation 14 | tutorials/running_species.ipynb 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | :caption: User Documentation 19 | :hidden: 20 | 21 | overview 22 | configuration 23 | database 24 | tutorials 25 | modules 26 | 27 | .. toctree:: 28 | :maxdepth: 2 29 | :caption: About species 30 | :hidden: 31 | 32 | contributing 33 | about 34 | -------------------------------------------------------------------------------- /docs/species.data.rst: -------------------------------------------------------------------------------- 1 | species.data package 2 | ==================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | species.data.companion_data 11 | species.data.filter_data 12 | species.data.isochrone_data 13 | species.data.misc_data 14 | species.data.model_data 15 | species.data.phot_data 16 | species.data.spec_data 17 | 18 | Submodules 19 | ---------- 20 | 21 | species.data.database module 22 | ---------------------------- 23 | 24 | .. automodule:: species.data.database 25 | :members: 26 | :undoc-members: 27 | :show-inheritance: 28 | 29 | Module contents 30 | --------------- 31 | 32 | .. automodule:: species.data 33 | :members: 34 | :undoc-members: 35 | :show-inheritance: 36 | -------------------------------------------------------------------------------- /docs/species.data.misc_data.rst: -------------------------------------------------------------------------------- 1 | species.data.misc\_data package 2 | =============================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.misc\_data.accretion\_data module 8 | ---------------------------------------------- 9 | 10 | .. automodule:: species.data.misc_data.accretion_data 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.data.misc\_data.dust\_data module 16 | ----------------------------------------- 17 | 18 | .. automodule:: species.data.misc_data.dust_data 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: species.data.misc_data 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/species.data.model_data.rst: -------------------------------------------------------------------------------- 1 | species.data.model\_data package 2 | ================================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.model\_data.custom\_model module 8 | --------------------------------------------- 9 | 10 | .. automodule:: species.data.model_data.custom_model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.data.model\_data.model\_spectra module 16 | ---------------------------------------------- 17 | 18 | .. automodule:: species.data.model_data.model_spectra 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: species.data.model_data 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/species.core.rst: -------------------------------------------------------------------------------- 1 | species.core package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.core.box module 8 | ----------------------- 9 | 10 | .. automodule:: species.core.box 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.core.constants module 16 | ----------------------------- 17 | 18 | .. automodule:: species.core.constants 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.core.species\_init module 24 | --------------------------------- 25 | 26 | .. automodule:: species.core.species_init 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: species.core 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /species/_version.py: -------------------------------------------------------------------------------- 1 | # file generated by setuptools-scm 2 | # don't change, don't track in version control 3 | 4 | __all__ = [ 5 | "__version__", 6 | "__version_tuple__", 7 | "version", 8 | "version_tuple", 9 | "__commit_id__", 10 | "commit_id", 11 | ] 12 | 13 | TYPE_CHECKING = False 14 | if TYPE_CHECKING: 15 | from typing import Tuple 16 | from typing import Union 17 | 18 | VERSION_TUPLE = Tuple[Union[int, str], ...] 19 | COMMIT_ID = Union[str, None] 20 | else: 21 | VERSION_TUPLE = object 22 | COMMIT_ID = object 23 | 24 | version: str 25 | __version__: str 26 | __version_tuple__: VERSION_TUPLE 27 | version_tuple: VERSION_TUPLE 28 | commit_id: COMMIT_ID 29 | __commit_id__: COMMIT_ID 30 | 31 | __version__ = version = '0.10.0' 32 | __version_tuple__ = version_tuple = (0, 10, 0) 33 | 34 | __commit_id__ = commit_id = None 35 | -------------------------------------------------------------------------------- /species/util/test_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for running the unit tests. 3 | """ 4 | 5 | import os 6 | 7 | 8 | def create_config(test_path): 9 | """ 10 | Function for creating a configuration file in the test folder. 11 | 12 | Parameters 13 | ---------- 14 | test_path : str 15 | Folder where the unit tests are located. 16 | 17 | Returns 18 | ------- 19 | NoneType 20 | None 21 | """ 22 | 23 | config_file = os.path.join(test_path, "species_config.ini") 24 | database_file = os.path.join(test_path, "species_database.hdf5") 25 | data_folder = os.path.join(test_path, "data/") 26 | 27 | with open(config_file, "w", encoding="utf-8") as config: 28 | config.write("[species]\n") 29 | config.write(f"database = {database_file}\n") 30 | config.write(f"data_folder = {data_folder}\n") 31 | config.write("vega_mag = 0.03") 32 | -------------------------------------------------------------------------------- /species/util/core_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module with utility functions for the ``species`` core. 3 | """ 4 | 5 | from typeguard import typechecked 6 | 7 | 8 | @typechecked 9 | def print_section( 10 | sect_title: str, 11 | bound_char: str = "-", 12 | extra_line: bool = True, 13 | ) -> None: 14 | """ 15 | Function for printing a section title. 16 | 17 | Parameters 18 | ---------- 19 | sect_title : str 20 | Section title. 21 | bound_char : str 22 | Boundary character for around the section title. 23 | extra_line : bool 24 | Extra new line at the beginning. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | if extra_line: 33 | print("\n" + len(sect_title) * bound_char) 34 | else: 35 | print(len(sect_title) * bound_char) 36 | 37 | print(sect_title) 38 | print(len(sect_title) * bound_char + "\n") 39 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /species/core/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Physical constants in the International System of Units (SI). 3 | """ 4 | 5 | from typing import Final 6 | from astropy import constants 7 | 8 | PLANCK: Final = constants.h.value # (m2 kg s-1) 9 | LIGHT: Final = constants.c.value # (m s-1) 10 | BOLTZMANN: Final = constants.k_B.value # (J K-1) 11 | GRAVITY: Final = constants.G.value # (m3 kg−1 s−2) 12 | PARSEC: Final = constants.pc.value # (m) 13 | AU: Final = constants.au.value # (m) 14 | R_JUP: Final = constants.R_jup.value # (m) 15 | M_JUP: Final = constants.M_jup.value # (kg) 16 | L_SUN: Final = constants.L_sun.value # (W) 17 | R_SUN: Final = constants.R_sun.value # (m) 18 | M_SUN: Final = constants.M_sun.value # (kg) 19 | R_EARTH: Final = constants.R_earth.value # (m) 20 | M_EARTH: Final = constants.M_earth.value # (kg) 21 | SIGMA_SB: Final = constants.sigma_sb.value # (W m−2 K−4) 22 | ATOMIC_MASS: Final = constants.u.value # (kg) 23 | RYDBERG: Final = constants.Ryd.value # (m-1) 24 | -------------------------------------------------------------------------------- /docs/about.rst: -------------------------------------------------------------------------------- 1 | .. _about: 2 | 3 | About 4 | ===== 5 | 6 | Questions & feedback 7 | -------------------- 8 | 9 | *species* is maintained by Tomas Stolker (stolker@strw.leidenuniv.nl). Feel free to send an email for questions, comments, or suggestions. 10 | 11 | Attribution 12 | ----------- 13 | 14 | Please cite `Stolker et al. (2020) `_ whenever results from *species* are used in a publication. Please also make sure to give credit the relevant articles for the use of the publicly available data that *species* is using. 15 | 16 | License 17 | ------- 18 | 19 | *species* is distributed under the MIT License. See `LICENSE `_ for the terms and conditions. 20 | 21 | Logo 22 | ---- 23 | 24 | Are you presenting results obtained with `species` in a talk or poster? Please consider using the `logo `_ that is available in PDF and PNG format. 25 | -------------------------------------------------------------------------------- /docs/species.data.phot_data.rst: -------------------------------------------------------------------------------- 1 | species.data.phot\_data package 2 | =============================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.phot\_data.phot\_jwst\_ydwarfs module 8 | -------------------------------------------------- 9 | 10 | .. automodule:: species.data.phot_data.phot_jwst_ydwarfs 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.data.phot\_data.phot\_leggett module 16 | -------------------------------------------- 17 | 18 | .. automodule:: species.data.phot_data.phot_leggett 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.data.phot\_data.phot\_vlm\_plx module 24 | --------------------------------------------- 25 | 26 | .. automodule:: species.data.phot_data.phot_vlm_plx 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: species.data.phot_data 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018-2024 Tomas Stolker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_nextgen.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import pooch 5 | 6 | from typeguard import typechecked 7 | 8 | from species.data.isochrone_data.iso_manual import add_manual 9 | 10 | 11 | @typechecked 12 | def add_nextgen(database: h5py._hl.files.File, input_path: str) -> None: 13 | """ 14 | Function for adding the NextGen isochrone data to the database. 15 | 16 | Parameters 17 | ---------- 18 | database : h5py._hl.files.File 19 | Database. 20 | input_path : str 21 | Folder where the data is located. 22 | 23 | Returns 24 | ------- 25 | NoneType 26 | None 27 | """ 28 | 29 | url = ( 30 | "https://home.strw.leidenuniv.nl/~stolker/species/" 31 | "model.NextGen.M-0.0.MKO.Vega" 32 | ) 33 | 34 | iso_tag = "nextgen" 35 | 36 | input_file = url.rsplit("/", maxsplit=1)[-1] 37 | data_file = Path(input_path) / input_file 38 | 39 | if not data_file.exists(): 40 | pooch.retrieve( 41 | url=url, 42 | known_hash="a72aef342a1782553094114dabc3196c0862a0c17ee5c368fbd1b47b5c611363", 43 | fname=input_file, 44 | path=input_path, 45 | progressbar=True, 46 | ) 47 | 48 | add_manual( 49 | database=database, 50 | tag=iso_tag, 51 | file_name=str(data_file), 52 | model_name="nextgen", 53 | ) 54 | -------------------------------------------------------------------------------- /docs/species.fit.rst: -------------------------------------------------------------------------------- 1 | species.fit package 2 | =================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.fit.compare\_spectra module 8 | ----------------------------------- 9 | 10 | .. automodule:: species.fit.compare_spectra 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.fit.emission\_line module 16 | --------------------------------- 17 | 18 | .. automodule:: species.fit.emission_line 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.fit.fit\_evolution module 24 | --------------------------------- 25 | 26 | .. automodule:: species.fit.fit_evolution 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | species.fit.fit\_model module 32 | ----------------------------- 33 | 34 | .. automodule:: species.fit.fit_model 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | species.fit.fit\_spectrum module 40 | -------------------------------- 41 | 42 | .. automodule:: species.fit.fit_spectrum 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | species.fit.retrieval module 48 | ---------------------------- 49 | 50 | .. automodule:: species.fit.retrieval 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | Module contents 56 | --------------- 57 | 58 | .. automodule:: species.fit 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_btsettl.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import pooch 5 | 6 | from typeguard import typechecked 7 | 8 | from species.data.isochrone_data.iso_manual import add_manual 9 | 10 | 11 | @typechecked 12 | def add_btsettl(database: h5py._hl.files.File, input_path: str) -> None: 13 | """ 14 | Function for adding the BT-Settl isochrone data to the database. 15 | 16 | Parameters 17 | ---------- 18 | database : h5py._hl.files.File 19 | Database. 20 | input_path : str 21 | Folder where the data is located. 22 | 23 | Returns 24 | ------- 25 | NoneType 26 | None 27 | """ 28 | 29 | url = ( 30 | "https://home.strw.leidenuniv.nl/~stolker/species/" 31 | "model.BT-Settl.M-0.0.MKO.Vega" 32 | ) 33 | 34 | iso_tag = "bt-settl" 35 | 36 | input_file = url.rsplit("/", maxsplit=1)[-1] 37 | data_file = Path(input_path) / input_file 38 | 39 | if not data_file.exists(): 40 | print() 41 | 42 | pooch.retrieve( 43 | url=url, 44 | known_hash="18e6a2b1e0b2452973c9af3e34138d5346cf04945239a085142f1fade5f7946e", 45 | fname=input_file, 46 | path=input_path, 47 | progressbar=True, 48 | ) 49 | 50 | add_manual( 51 | database=database, 52 | tag=iso_tag, 53 | file_name=str(data_file), 54 | model_name="bt-settl", 55 | ) 56 | -------------------------------------------------------------------------------- /docs/species.plot.rst: -------------------------------------------------------------------------------- 1 | species.plot package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.plot.plot\_color module 8 | ------------------------------- 9 | 10 | .. automodule:: species.plot.plot_color 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.plot.plot\_comparison module 16 | ------------------------------------ 17 | 18 | .. automodule:: species.plot.plot_comparison 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.plot.plot\_evolution module 24 | ----------------------------------- 25 | 26 | .. automodule:: species.plot.plot_evolution 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | species.plot.plot\_mcmc module 32 | ------------------------------ 33 | 34 | .. automodule:: species.plot.plot_mcmc 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | species.plot.plot\_retrieval module 40 | ----------------------------------- 41 | 42 | .. automodule:: species.plot.plot_retrieval 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | species.plot.plot\_spectrum module 48 | ---------------------------------- 49 | 50 | .. automodule:: species.plot.plot_spectrum 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | Module contents 56 | --------------- 57 | 58 | .. automodule:: species.plot 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | -------------------------------------------------------------------------------- /docs/tutorials.rst: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | Tutorials 4 | ========= 5 | 6 | This page contains a list of tutorials which highlight some of the functionalities of `species`. The tutorials can be downloaded as `Jupyter notebook `_. Please `create an issue `_ if you encounter any problems. 7 | 8 | .. tip:: 9 | Details on the various classes, functions, and parameters can be found in the `API documentation `_. 10 | 11 | **Fitting model spectra, parameter retrievals** 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | tutorials/fitting_model_spectra.ipynb 17 | tutorials/grid_comparison.ipynb 18 | tutorials/evolution_fit.ipynb 19 | tutorials/atmospheric_retrieval.ipynb 20 | tutorials/emission_line.ipynb 21 | tutorials/flux_calibration.ipynb 22 | 23 | **Atmospheric and evolutionary models** 24 | 25 | .. toctree:: 26 | :maxdepth: 1 27 | 28 | tutorials/model_spectra.ipynb 29 | tutorials/data_model.ipynb 30 | tutorials/read_isochrone.ipynb 31 | 32 | **Synthetic photometry, magnitudes, fluxes** 33 | 34 | .. toctree:: 35 | :maxdepth: 1 36 | 37 | tutorials/flux_magnitude.ipynb 38 | tutorials/synthetic_photometry.ipynb 39 | 40 | **Color and magnitude diagrams** 41 | 42 | .. toctree:: 43 | :maxdepth: 1 44 | 45 | tutorials/color_magnitude_broadband.ipynb 46 | tutorials/color_magnitude_narrowband.ipynb 47 | 48 | **Data of directly imaged planets** 49 | 50 | .. toctree:: 51 | :maxdepth: 1 52 | 53 | tutorials/companion_data.ipynb 54 | tutorials/mass_ratio.ipynb 55 | 56 | **Spectral libraries** 57 | 58 | .. toctree:: 59 | :maxdepth: 1 60 | 61 | tutorials/spectral_library.ipynb 62 | 63 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_marleau.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | 4 | from typeguard import typechecked 5 | 6 | from species.core import constants 7 | 8 | 9 | @typechecked 10 | def add_marleau(database: h5py._hl.files.File, tag: str, file_name: str) -> None: 11 | """ 12 | Function for adding the `Marleau et al. (2019) 13 | `_ 14 | isochrone data to the database. The isochrone data can be 15 | requested by contacting Gabriel Marleau. 16 | 17 | Parameters 18 | ---------- 19 | database : h5py._hl.files.File 20 | Database. 21 | tag : str 22 | Tag name in the database. 23 | file_name : str 24 | Filename with the isochrones data. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | # M age S_0 L S(t) R Teff 33 | # (M_J) (Gyr) (k_B/baryon) (L_sol) (k_B/baryon) (R_J) (K) 34 | mass, age, _, luminosity, _, radius, teff = np.loadtxt(file_name, unpack=True) 35 | 36 | age *= 1e3 # (Myr) 37 | luminosity = np.log10(luminosity) 38 | 39 | mass_cgs = 1e3 * mass * constants.M_JUP # (g) 40 | radius_cgs = 1e2 * radius * constants.R_JUP # (cm) 41 | 42 | logg = np.log10(1e3 * constants.GRAVITY * mass_cgs / radius_cgs**2) 43 | 44 | print(f"\nAdding isochrones: {tag}...", end="", flush=True) 45 | 46 | isochrones = np.vstack((age, mass, teff, luminosity, logg)) 47 | isochrones = np.transpose(isochrones) 48 | 49 | index_sort = np.argsort(isochrones[:, 0]) 50 | isochrones = isochrones[index_sort, :] 51 | 52 | dset = database.create_dataset(f"isochrones/{tag}/evolution", data=isochrones) 53 | 54 | dset.attrs["model"] = "marleau" 55 | 56 | print(" [DONE]") 57 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_ames.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import pooch 5 | 6 | from typeguard import typechecked 7 | 8 | from species.data.isochrone_data.iso_manual import add_manual 9 | 10 | 11 | @typechecked 12 | def add_ames(database: h5py._hl.files.File, input_path: str) -> None: 13 | """ 14 | Function for adding the AMES-Cond and AMES-Dusty 15 | isochrone data to the database. 16 | 17 | Parameters 18 | ---------- 19 | database : h5py._hl.files.File 20 | Database. 21 | input_path : str 22 | Folder where the data is located. 23 | 24 | Returns 25 | ------- 26 | NoneType 27 | None 28 | """ 29 | 30 | url_list = [ 31 | "https://home.strw.leidenuniv.nl/~stolker/species/" 32 | "model.AMES-Cond-2000.M-0.0.MKO.Vega", 33 | "https://home.strw.leidenuniv.nl/~stolker/species/" 34 | "model.AMES-dusty.M-0.0.MKO.Vega", 35 | ] 36 | 37 | file_hash = [ 38 | "fc04e6f7c02982bb3187b55cdefc2464e3f1564fb8026a8958967cb889f0f581", 39 | "c7ba32ae10111c9ca692bf75154edac70b050c06cae211b421e1473725d6380c", 40 | ] 41 | 42 | iso_tags = ["ames-cond", "ames-dusty"] 43 | 44 | for url_idx, url_item in enumerate(url_list): 45 | input_file = url_item.split("/")[-1] 46 | data_file = Path(input_path) / input_file 47 | 48 | if not data_file.exists(): 49 | print() 50 | 51 | pooch.retrieve( 52 | url=url_item, 53 | known_hash=file_hash[url_idx], 54 | fname=input_file, 55 | path=input_path, 56 | progressbar=True, 57 | ) 58 | 59 | add_manual( 60 | database=database, 61 | tag=iso_tags[url_idx], 62 | file_name=str(data_file), 63 | model_name="ames", 64 | ) 65 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | strategy: 11 | matrix: 12 | python-version: ['3.10', '3.11', '3.12', '3.13'] 13 | 14 | steps: 15 | - name: Checkout Project 16 | uses: actions/checkout@v2 17 | 18 | - name: Install BLAS, LAPACK, GCC, Pandoc 19 | run: sudo apt-get install libblas-dev liblapack-dev gcc pandoc 20 | 21 | - name: Setup Python ${{ matrix.python-version }} 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: ${{ matrix.python-version }} 25 | 26 | # - name: Install MultiNest 27 | # run: | 28 | # git clone https://github.com/JohannesBuchner/MultiNest 29 | # cd MultiNest/build 30 | # cmake .. 31 | # make 32 | 33 | - name: Install dependencies 34 | run: | 35 | pip install --upgrade pip wheel cython 36 | pip install .[full,docs,test] 37 | 38 | - name: Lint with flake8 39 | run: | 40 | # stop the build if there are Python syntax errors or undefined names 41 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 42 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 43 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 44 | 45 | - name: Build documentation 46 | run: | 47 | # export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/work/species/species/MultiNest/lib 48 | make docs 49 | 50 | - name: Run pytest 51 | run: | 52 | # export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/work/species/species/MultiNest/lib 53 | make test 54 | 55 | - name: Upload coverage to Codecov 56 | uses: codecov/codecov-action@v2 57 | -------------------------------------------------------------------------------- /docs/species.data.spec_data.rst: -------------------------------------------------------------------------------- 1 | species.data.spec\_data package 2 | =============================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.spec\_data.add\_spec\_data module 8 | ---------------------------------------------- 9 | 10 | .. automodule:: species.data.spec_data.add_spec_data 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.data.spec\_data.spec\_allers2013 module 16 | ----------------------------------------------- 17 | 18 | .. automodule:: species.data.spec_data.spec_allers2013 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.data.spec\_data.spec\_bonnefoy2014 module 24 | ------------------------------------------------- 25 | 26 | .. automodule:: species.data.spec_data.spec_bonnefoy2014 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | species.data.spec\_data.spec\_irtf module 32 | ----------------------------------------- 33 | 34 | .. automodule:: species.data.spec_data.spec_irtf 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | species.data.spec\_data.spec\_kesseli2017 module 40 | ------------------------------------------------ 41 | 42 | .. automodule:: species.data.spec_data.spec_kesseli2017 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | species.data.spec\_data.spec\_spex module 48 | ----------------------------------------- 49 | 50 | .. automodule:: species.data.spec_data.spec_spex 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | species.data.spec\_data.spec\_vega module 56 | ----------------------------------------- 57 | 58 | .. automodule:: species.data.spec_data.spec_vega 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | Module contents 64 | --------------- 65 | 66 | .. automodule:: species.data.spec_data 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help pypi pypi-test docs coverage test clean 2 | 3 | help: 4 | @echo "pypi - submit to PyPI server" 5 | @echo "pypi-check - check the distribution for PyPI" 6 | @echo "pypi-test - submit to TestPyPI server" 7 | @echo "docs - generate Sphinx documentation" 8 | @echo "coverage - check code coverage" 9 | @echo "test - run unit tests" 10 | @echo "clean - remove artifacts" 11 | 12 | pypi: 13 | python -m build 14 | twine upload dist/* 15 | 16 | pypi-check: 17 | python -m build 18 | twine check dist/* 19 | 20 | pypi-test: 21 | python -m build 22 | twine upload --repository testpypi dist/* 23 | 24 | docs: 25 | rm -f docs/species.core.rst 26 | rm -f docs/species.data.rst 27 | rm -f docs/species.data.*.rst 28 | rm -f docs/species.fit.rst 29 | rm -f docs/species.phot.rst 30 | rm -f docs/species.plot.rst 31 | rm -f docs/species.read.rst 32 | rm -f docs/species.util.rst 33 | sphinx-apidoc -o docs species 34 | cd docs/ 35 | $(MAKE) -C docs clean 36 | $(MAKE) -C docs html 37 | rm -f docs/species_config.ini 38 | rm -f docs/species_database.hdf5 39 | 40 | coverage: 41 | coverage run --source=species -m pytest 42 | coverage report -m 43 | 44 | test: 45 | pytest --cov=species/ --cov-report=xml 46 | 47 | clean: 48 | find . -name '*.pyc' -exec rm -f {} + 49 | find . -name '__pycache__' -exec rm -rf {} + 50 | rm -f .coverage* 51 | rm -f coverage.xml 52 | rm -rf .pytest_cache/ 53 | rm -rf docs/_build/ 54 | rm -rf docs/tutorials/data/ 55 | rm -rf docs/tutorials/multinest/ 56 | rm -rf docs/tutorials/ultranest/ 57 | rm -rf docs/tutorials/dynesty/ 58 | rm -rf docs/tutorials/.ipynb_checkpoints 59 | rm -f docs/tutorials/species_config.ini 60 | rm -f docs/tutorials/species_database.hdf5 61 | rm -f docs/tutorials/retrieval.tgz 62 | rm -f docs/tutorials/*.png 63 | rm -f docs/tutorials/*.jpg 64 | rm -f docs/tutorials/*.fits 65 | rm -f docs/tutorials/*.dat 66 | rm -f docs/tutorials/*.json 67 | rm -rf build/ 68 | rm -rf dist/ 69 | rm -rf species.egg-info/ 70 | rm -rf htmlcov/ 71 | rm -rf .tox/ 72 | -------------------------------------------------------------------------------- /docs/species.read.rst: -------------------------------------------------------------------------------- 1 | species.read package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.read.read\_calibration module 8 | ------------------------------------- 9 | 10 | .. automodule:: species.read.read_calibration 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.read.read\_color module 16 | ------------------------------- 17 | 18 | .. automodule:: species.read.read_color 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.read.read\_filter module 24 | -------------------------------- 25 | 26 | .. automodule:: species.read.read_filter 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | species.read.read\_isochrone module 32 | ----------------------------------- 33 | 34 | .. automodule:: species.read.read_isochrone 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | species.read.read\_model module 40 | ------------------------------- 41 | 42 | .. automodule:: species.read.read_model 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | species.read.read\_object module 48 | -------------------------------- 49 | 50 | .. automodule:: species.read.read_object 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | species.read.read\_planck module 56 | -------------------------------- 57 | 58 | .. automodule:: species.read.read_planck 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | species.read.read\_radtrans module 64 | ---------------------------------- 65 | 66 | .. automodule:: species.read.read_radtrans 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | species.read.read\_spectrum module 72 | ---------------------------------- 73 | 74 | .. automodule:: species.read.read_spectrum 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | Module contents 80 | --------------- 81 | 82 | .. automodule:: species.read 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "setuptools-scm", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "species" 7 | authors = [{name = "Tomas Stolker", email = "stolker@strw.leidenuniv.nl"}] 8 | description = "Toolkit for atmospheric characterization of directly imaged exoplanets" 9 | readme = "README.rst" 10 | requires-python = ">=3.10,<3.14" 11 | license = "MIT" 12 | classifiers = [ 13 | "Programming Language :: Python :: 3.10", 14 | "Programming Language :: Python :: 3.11", 15 | "Programming Language :: Python :: 3.12", 16 | "Programming Language :: Python :: 3.13", 17 | "Intended Audience :: Science/Research", 18 | "Topic :: Scientific/Engineering :: Astronomy", 19 | ] 20 | dependencies = [ 21 | "astropy", 22 | "dust-extinction", 23 | "h5py", 24 | "matplotlib", 25 | "numba", 26 | "numpy", 27 | "pooch", 28 | "scipy", 29 | "spectres", 30 | "tqdm", 31 | "typeguard", 32 | ] 33 | dynamic = ["version"] 34 | 35 | [project.optional-dependencies] 36 | full = [ 37 | "astrodbkit2", 38 | "astroquery", 39 | "dynesty", 40 | "emcee", 41 | "molmass", 42 | "pandas", 43 | "PyMieScatt", 44 | "pymultinest", 45 | "requests", 46 | "schwimmbad", 47 | "shapely", 48 | "specutils", 49 | "SQLAlchemy", 50 | "ultranest", 51 | "xlrd", 52 | ] 53 | docs = [ 54 | "cython", 55 | "jupyter", 56 | "nbsphinx", 57 | "pandoc", 58 | "sphinx", 59 | "sphinx-automodapi", 60 | "sphinx_book_theme", 61 | ] 62 | test = [ 63 | "black", 64 | "flake8", 65 | "pycodestyle", 66 | "pylint", 67 | "pytest", 68 | "pytest-cov", 69 | ] 70 | 71 | [project.urls] 72 | Documentation = "https://species.readthedocs.io" 73 | Repository = "https://github.com/tomasstolker/species" 74 | Issues = "https://github.com/tomasstolker/species/issues" 75 | 76 | [tool.setuptools.packages.find] 77 | where = ["."] 78 | include = ["species*"] 79 | 80 | [tool.setuptools_scm] 81 | write_to = "species/_version.py" 82 | local_scheme = "no-local-version" 83 | 84 | [tool.setuptools.package-data] 85 | "species.data" = ["*.json"] 86 | -------------------------------------------------------------------------------- /docs/configuration.rst: -------------------------------------------------------------------------------- 1 | .. _configuration: 2 | 3 | Configuration 4 | ============= 5 | 6 | A configuration file with the name `species_config.ini` is required in the working folder. The configuration file contains the global settings, such as the path of the HDF5 database and the location where the data will be dowloaded. These can be provided as absolute paths or relative to the working folder. The file also contains the magnitude of Vega that is used for converting between magnitudes and fluxes. This is what the content of the configuration file may look like: 7 | 8 | .. code-block:: ini 9 | 10 | [species] 11 | database = species_database.hdf5 12 | data_folder = /path/to/store/data/ 13 | vega_mag = 0.03 14 | 15 | In this case, the database is stored in the working folder and an absolute path points to the folder where the data is downloaded. 16 | 17 | .. important:: 18 | The configuration file should always be located in the working folder. Are you not sure about your current working folder? Try running Python's `os.getcwd() `_ function: 19 | 20 | .. code-block:: python 21 | 22 | >>> import os 23 | >>> os.getcwd() 24 | 25 | The workflow with *species* is now initiated with the :class:`~species.core.species_init.SpeciesInit` class: 26 | 27 | .. code-block:: python 28 | 29 | >>> import species 30 | >>> species.SpeciesInit() 31 | 32 | A configuration file with default values is automatically created when `species` is initiated and the file is not present in the working folder. 33 | 34 | .. tip:: 35 | The same `data_folder` can be used in multiple configuration files. In this way, the data is only downloaded once and easily reused by a new instance of :class:`~species.core.species_init.SpeciesInit`. Also the HDF5 database can be reused by simply including the same `database` in the configuration file (:ref:`database` section). 36 | 37 | .. important:: 38 | A flux-calibrated spectrum of Vega is used for the conversion between a flux density and magnitude. The magnitude of Vega is set to 0.03 for all filters by default. If needed, the magnitude of Vega can be adjusted with the ``vega_mag`` parameter in the configuration file. 39 | -------------------------------------------------------------------------------- /species/data/spec_data/add_spec_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for adding spectral libraries to the database. 3 | """ 4 | 5 | from typing import List, Optional 6 | 7 | import h5py 8 | 9 | from typeguard import typechecked 10 | 11 | from species.data.spec_data.spec_allers2013 import add_allers2013 12 | from species.data.spec_data.spec_bonnefoy2014 import add_bonnefoy2014 13 | from species.data.spec_data.spec_irtf import add_irtf 14 | from species.data.spec_data.spec_kesseli2017 import add_kesseli2017 15 | from species.data.spec_data.spec_spex import add_spex 16 | from species.data.spec_data.spec_vega import add_vega 17 | 18 | 19 | @typechecked 20 | def add_spec_library( 21 | input_path: str, 22 | database: h5py._hl.files.File, 23 | spec_library: str, 24 | sptypes: Optional[List[str]] = None, 25 | ) -> None: 26 | """ 27 | Function for adding spectral libraries to the database. 28 | 29 | Parameters 30 | ---------- 31 | input_path : str 32 | Folder where the data is located. 33 | database : h5py._hl.files.File 34 | Database. 35 | spec_library : str 36 | Name of the spectral library ('irtf', 'spex', 'kesseli+2017', 37 | 'bonnefoy+2014', 'allers+2013'). 38 | sptypes : list(str), None 39 | Spectral types ('F', 'G', 'K', 'M', 'L', 'T'). Currently 40 | only implemented for ``spec_library='irtf'``. 41 | 42 | Returns 43 | ------- 44 | None 45 | NoneType 46 | """ 47 | 48 | if spec_library[0:11] == "allers+2013": 49 | add_allers2013(input_path, database) 50 | 51 | elif spec_library[0:13] == "bonnefoy+2014": 52 | add_bonnefoy2014(input_path, database) 53 | 54 | elif spec_library[0:5] == "irtf": 55 | add_irtf(input_path, database, sptypes) 56 | 57 | elif spec_library[0:12] == "kesseli+2017": 58 | add_kesseli2017(input_path, database) 59 | 60 | elif spec_library[0:5] == "spex": 61 | add_spex(input_path, database) 62 | 63 | elif spec_library[0:5] == "vega": 64 | add_vega(input_path, database) 65 | 66 | else: 67 | raise ValueError( 68 | f"The spectral library '{spec_library}' is not supported. " 69 | "Please adjust the argument of 'spec_library'." 70 | ) 71 | -------------------------------------------------------------------------------- /tests/test_read/test_spectrum.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | # from species.data.database import Database 9 | # from species.read.read_spectrum import ReadSpectrum 10 | # from species.plot.plot_spectrum import plot_spectrum 11 | from species.util import test_util 12 | 13 | 14 | class TestSpectrum: 15 | def setup_class(self): 16 | self.limit = 1e-8 17 | self.test_path = os.path.dirname(__file__) + "/" 18 | 19 | def teardown_class(self): 20 | os.remove("species_database.hdf5") 21 | os.remove("species_config.ini") 22 | # os.remove("spectrum.pdf") 23 | shutil.rmtree("data/") 24 | 25 | def test_species_init(self): 26 | test_util.create_config("./") 27 | SpeciesInit() 28 | 29 | # def test_read_spectrum(self): 30 | # database = Database() 31 | # 32 | # with pytest.warns(UserWarning): 33 | # database.add_spectrum( 34 | # "irtf", 35 | # sptypes=[ 36 | # "L", 37 | # ], 38 | # ) 39 | # 40 | # read_spectrum = ReadSpectrum("irtf", filter_name="MKO/NSFCam.H") 41 | # assert read_spectrum.wavel_range == pytest.approx( 42 | # (1.382, 1.8656), rel=1e-6, abs=0.0 43 | # ) 44 | # 45 | # def test_get_spectrum(self): 46 | # read_spectrum = ReadSpectrum("irtf", filter_name="MKO/NSFCam.H") 47 | # spec_box = read_spectrum.get_spectrum( 48 | # sptypes=[ 49 | # "L0", 50 | # ], 51 | # exclude_nan=True, 52 | # ) 53 | # 54 | # assert spec_box.wavelength[0].shape == (1063,) 55 | # assert spec_box.flux[0].shape == (1063,) 56 | # 57 | # assert np.sum(spec_box.wavelength[0]) == pytest.approx( 58 | # 1692.8604, rel=1e-7, abs=0.0 59 | # ) 60 | # assert np.sum(spec_box.flux[0]) == pytest.approx( 61 | # 4.5681937e-11, rel=1e-7, abs=0.0 62 | # ) 63 | # 64 | # plot_spectrum( 65 | # boxes=[ 66 | # spec_box, 67 | # ], 68 | # filters=[ 69 | # "MKO/NSFCam.H", 70 | # ], 71 | # output="spectrum.pdf", 72 | # xlim=(1.0, 2.5), 73 | # offset=(-0.08, -0.06), 74 | # ) 75 | # 76 | # assert os.path.exists("spectrum.pdf") 77 | -------------------------------------------------------------------------------- /tests/test_read/test_planck.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | from species.data.database import Database 9 | from species.read.read_planck import ReadPlanck 10 | from species.phot.syn_phot import SyntheticPhotometry 11 | from species.util import test_util 12 | 13 | 14 | class TestPlanck: 15 | def setup_class(self): 16 | self.limit = 1e-8 17 | self.test_path = os.path.dirname(__file__) + "/" 18 | 19 | def teardown_class(self): 20 | os.remove("species_database.hdf5") 21 | os.remove("species_config.ini") 22 | shutil.rmtree("data/") 23 | 24 | def test_species_init(self): 25 | test_util.create_config("./") 26 | SpeciesInit() 27 | 28 | def test_read_planck(self): 29 | read_planck = ReadPlanck(filter_name="MKO/NSFCam.J") 30 | assert read_planck.wavel_range == pytest.approx( 31 | (1.1308, 1.3812), rel=1e-6, abs=0.0 32 | ) 33 | 34 | read_planck = ReadPlanck(wavel_range=(1.0, 5.0)) 35 | assert read_planck.wavel_range == (1.0, 5.0) 36 | 37 | def test_get_spectrum(self): 38 | read_planck = ReadPlanck(filter_name="MKO/NSFCam.J") 39 | modelbox = read_planck.get_spectrum( 40 | {"teff": 2000.0, "radius": 1.0, "parallax": 100.0}, 100.0 41 | ) 42 | 43 | assert modelbox.model == "planck" 44 | assert modelbox.wavelength.shape == (204,) 45 | assert modelbox.flux.shape == (204,) 46 | 47 | assert np.sum(modelbox.wavelength) == pytest.approx( 48 | 255.37728257033913, rel=self.limit, abs=0.0 49 | ) 50 | assert np.sum(modelbox.flux) == pytest.approx( 51 | 4.228433207324114e-12, rel=self.limit, abs=0.0 52 | ) 53 | 54 | def test_get_flux(self): 55 | read_planck = ReadPlanck(filter_name="MKO/NSFCam.J") 56 | 57 | # low relative precision because of filter profile precision 58 | flux = read_planck.get_flux({"teff": 2000.0, "radius": 1.0, "distance": 10.0}) 59 | assert flux[0] == pytest.approx(2.079882900702339e-14, rel=1e-4, abs=0.0) 60 | 61 | # low relative precision because of filter profile precision 62 | synphot = SyntheticPhotometry(filter_name="MKO/NSFCam.J") 63 | flux = read_planck.get_flux( 64 | {"teff": 2000.0, "radius": 1.0, "distance": 10.0}, synphot=synphot 65 | ) 66 | assert flux[0] == pytest.approx(2.079882900702339e-14, rel=1e-4, abs=0.0) 67 | -------------------------------------------------------------------------------- /tests/test_read/test_color.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | 9 | # from species.data.database import Database 10 | from species.read.read_color import ReadColorColor, ReadColorMagnitude 11 | from species.util import test_util 12 | 13 | 14 | class TestColor: 15 | def setup_class(self): 16 | self.limit = 1e-8 17 | 18 | def teardown_class(self): 19 | os.remove("species_database.hdf5") 20 | os.remove("species_config.ini") 21 | shutil.rmtree("data/") 22 | 23 | def test_species_init(self): 24 | test_util.create_config("./") 25 | SpeciesInit() 26 | 27 | # def test_read_color_magnitude(self): 28 | # database = Database() 29 | # database.add_photometry("vlm-plx") 30 | # database.add_photometry("leggett") 31 | # 32 | # read_colormag = ReadColorMagnitude( 33 | # "vlm-plx", ("MKO/NSFCam.J", "MKO/NSFCam.H"), "MKO/NSFCam.J" 34 | # ) 35 | # 36 | # assert read_colormag.filters_color == ("MKO/NSFCam.J", "MKO/NSFCam.H") 37 | # assert read_colormag.filter_mag == "MKO/NSFCam.J" 38 | # 39 | # def test_get_color_magnitude(self): 40 | # read_colormag = ReadColorMagnitude( 41 | # "leggett", ("MKO/NSFCam.J", "MKO/NSFCam.H"), "MKO/NSFCam.J" 42 | # ) 43 | # 44 | # colormag_box = read_colormag.get_color_magnitude(object_type=None) 45 | # colormag_box.open_box() 46 | # 47 | # assert np.nansum(colormag_box.color) == pytest.approx(14.05) 48 | # assert np.nansum(colormag_box.magnitude) == pytest.approx(1519.373) 49 | # 50 | # def test_read_color_color(self): 51 | # read_colorcolor = ReadColorColor( 52 | # "vlm-plx", 53 | # (("MKO/NSFCam.J", "MKO/NSFCam.H"), ("MKO/NSFCam.H", "MKO/NSFCam.K")), 54 | # ) 55 | # 56 | # assert read_colorcolor.filters_colors == ( 57 | # ("MKO/NSFCam.J", "MKO/NSFCam.H"), 58 | # ("MKO/NSFCam.H", "MKO/NSFCam.K"), 59 | # ) 60 | # 61 | # def test_get_color_color(self): 62 | # read_colorcolor = ReadColorColor( 63 | # "vlm-plx", 64 | # (("MKO/NSFCam.J", "MKO/NSFCam.H"), ("MKO/NSFCam.H", "MKO/NSFCam.K")), 65 | # ) 66 | # 67 | # colorcolor_box = read_colorcolor.get_color_color(object_type=None) 68 | # 69 | # assert np.nansum(colorcolor_box.color1) == pytest.approx(166.99384) 70 | # assert np.nansum(colorcolor_box.color2) == pytest.approx(138.5127) 71 | -------------------------------------------------------------------------------- /species/data/companion_data/companion_spectra.json: -------------------------------------------------------------------------------- 1 | { 2 | "beta Pic b": { 3 | "GPI_YJHK": [ 4 | "betapicb_gpi_yjhk.dat", 5 | null, 6 | 40.0, 7 | "Chilcote et al. 2017, AJ, 153, 182" 8 | ], 9 | "GRAVITY": [ 10 | "BetaPictorisb_2018-09-22.fits", 11 | "BetaPictorisb_2018-09-22.fits", 12 | 500.0, 13 | "Gravity Collaboration et al. 2020, A&A, 633, 110" 14 | ] 15 | }, 16 | "51 Eri b": { 17 | "SPHERE_YJH": [ 18 | "51erib_sphere_yjh.dat", 19 | null, 20 | 25.0, 21 | "Samland et al. 2017, A&A, 603, 57" 22 | ] 23 | }, 24 | "HD 206893 B": { 25 | "SPHERE_YJH": [ 26 | "hd206893b_sphere_yjh.dat", 27 | null, 28 | 25.0, 29 | "Delorme et al. 2017, A&A, 608, 79" 30 | ] 31 | }, 32 | "HD 206893 c": { 33 | "GRAVITY": [ 34 | "HD206893C_GRAVITYK_fluxcal_spectrum.fits", 35 | "HD206893C_GRAVITYK_fluxcal_spectrum.fits", 36 | 500.0, 37 | "2023A&A...671L...5H" 38 | ] 39 | }, 40 | "HIP 65426 B": { 41 | "SPHERE_YJH": [ 42 | "hip65426b_sphere_yjh.dat", 43 | null, 44 | 25.0, 45 | "Cheetham et al. 2019, A&A, 622, 80" 46 | ] 47 | }, 48 | "HR 8799 e": { 49 | "SPHERE_YJH": [ 50 | "hr8799e_sphere_yjh.dat", 51 | null, 52 | 25.0, 53 | "Zurlo et al. 2016, A&A, 587, 57" 54 | ] 55 | }, 56 | "PDS 70 b": { 57 | "SPHERE_YJH": [ 58 | "pds70b_sphere_yjh.dat", 59 | null, 60 | 25.0, 61 | "M\u00fcller et al. 2018, A&A, 617, 2" 62 | ] 63 | }, 64 | "GQ Lup B": { 65 | "MUSE": [ 66 | "gqlupb_muse.dat", 67 | null, 68 | 3000.0, 69 | "Stolker et al. 2021, AJ, 162, 286" 70 | ], 71 | "SINFONI_J": [ 72 | "gqlupb_sinfoni_j.dat", 73 | null, 74 | 2500.0, 75 | "Seifahrt et al. 2007, A&A, 463, 309" 76 | ], 77 | "SINFONI_H": [ 78 | "gqlupb_sinfoni_h.dat", 79 | null, 80 | 4000.0, 81 | "Seifahrt et al. 2007, A&A, 463, 309" 82 | ], 83 | "SINFONI_K": [ 84 | "gqlupb_sinfoni_k.dat", 85 | null, 86 | 4000.0, 87 | "Seifahrt et al. 2007, A&A, 463, 309" 88 | ] 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_parsec.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import numpy as np 5 | import pooch 6 | 7 | from typeguard import typechecked 8 | 9 | from species.core import constants 10 | 11 | 12 | @typechecked 13 | def add_parsec(database: h5py._hl.files.File, input_path: str) -> None: 14 | """ 15 | Function for adding the PARSEC v2.0 isochrone data to the database. 16 | 17 | Parameters 18 | ---------- 19 | database : h5py._hl.files.File 20 | Database. 21 | input_path : str 22 | Folder where the data is located. 23 | 24 | Returns 25 | ------- 26 | NoneType 27 | None 28 | """ 29 | 30 | iso_tag = "parsec" 31 | 32 | url = "https://home.strw.leidenuniv.nl/~stolker/species/parsec_evolution.dat" 33 | 34 | input_file = url.rsplit("/", maxsplit=1)[-1] 35 | data_file = Path(input_path) / input_file 36 | 37 | if not data_file.exists(): 38 | print() 39 | 40 | pooch.retrieve( 41 | url=url, 42 | known_hash="c41f60460ac0bf89390b16645238f9cc692316ce158543634763e6c928115b6e", 43 | fname=input_file, 44 | path=input_path, 45 | progressbar=True, 46 | ) 47 | 48 | iso_data = np.loadtxt( 49 | data_file, 50 | comments="#", 51 | delimiter=None, 52 | usecols=[2, 5, 6, 7, 8, 32, 33], 53 | unpack=False, 54 | ) 55 | 56 | idx_bad = iso_data[:, 2] == -9.999 57 | iso_data = iso_data[~idx_bad, :] 58 | 59 | log_age, mass, log_lum, log_teff, log_g, radius_pol, radius_eq = iso_data.T 60 | 61 | age = 1e-6 * 10.0**log_age # (Myr) 62 | mass *= constants.M_SUN / constants.M_JUP # (Msun) -> (Mjup) 63 | teff = 10.0**log_teff # (K) 64 | radius = (radius_pol + radius_eq) / 2.0 # (Rsun) 65 | radius *= constants.R_SUN / constants.R_JUP # (Rjup) 66 | 67 | print("Adding isochrones: PARSEC v2.0...", end="", flush=True) 68 | 69 | database.create_dataset(f"isochrones/{iso_tag}/mass", data=mass) # (Mjup) 70 | dset = database.create_dataset(f"isochrones/{iso_tag}/age", data=age) # (Myr) 71 | database.create_dataset(f"isochrones/{iso_tag}/teff", data=teff) # (K) 72 | database.create_dataset( 73 | f"isochrones/{iso_tag}/log_lum", data=log_lum 74 | ) # log(L/Lsun) 75 | database.create_dataset(f"isochrones/{iso_tag}/radius", data=radius) # (Rjup) 76 | database.create_dataset(f"isochrones/{iso_tag}/log_g", data=log_g) # log(g) 77 | 78 | dset.attrs["model"] = iso_tag 79 | 80 | print(" [DONE]") 81 | print(f"Database tag: {iso_tag}") 82 | -------------------------------------------------------------------------------- /tests/test_read/test_object.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | from species.data.database import Database 9 | from species.read.read_object import ReadObject 10 | from species.util import test_util 11 | 12 | 13 | class TestObject: 14 | def setup_class(self): 15 | self.limit = 1e-8 16 | self.test_path = os.path.dirname(__file__) + "/" 17 | 18 | def teardown_class(self): 19 | os.remove("species_database.hdf5") 20 | os.remove("species_config.ini") 21 | shutil.rmtree("data/") 22 | 23 | def test_species_init(self): 24 | test_util.create_config("./") 25 | SpeciesInit() 26 | 27 | def test_read_object(self): 28 | database = Database() 29 | 30 | with pytest.warns(UserWarning): 31 | database.add_companion(name="beta Pic b") 32 | 33 | read_object = ReadObject("beta Pic b") 34 | assert read_object.object_name == "beta Pic b" 35 | 36 | with pytest.raises(ValueError) as error: 37 | ReadObject("wrong name") 38 | 39 | assert ( 40 | str(error.value) 41 | == "The object 'wrong name' is not present in the database." 42 | ) 43 | 44 | def test_get_photometry(self): 45 | read_object = ReadObject("beta Pic b") 46 | photometry = read_object.get_photometry("Paranal/NACO.Lp") 47 | 48 | assert isinstance(photometry, np.ndarray) 49 | 50 | assert photometry[0] == pytest.approx(11.3, rel=self.limit, abs=0.0) 51 | assert photometry[1] == pytest.approx(0.06, rel=self.limit, abs=0.0) 52 | assert photometry[2] == pytest.approx( 53 | 1.5566122192562612e-15, rel=self.limit, abs=0.0 54 | ) 55 | assert photometry[3] == pytest.approx( 56 | 8.606536033479756e-17, rel=self.limit, abs=0.0 57 | ) 58 | 59 | def test_get_parallax(self): 60 | read_object = ReadObject("beta Pic b") 61 | parallax = read_object.get_parallax() 62 | 63 | assert parallax == (50.9307, 0.1482) 64 | 65 | def test_get_distance(self): 66 | read_object = ReadObject("beta Pic b") 67 | distance = read_object.get_distance() 68 | 69 | assert distance == (19.63452298908124, 0.05713373162362245) 70 | 71 | def test_get_absmag(self): 72 | read_object = ReadObject("beta Pic b") 73 | abs_mag = read_object.get_absmag("Paranal/NACO.Lp") 74 | 75 | assert abs_mag[0] == pytest.approx(9.834898226163453, rel=self.limit, abs=0.0) 76 | assert abs_mag[1] == pytest.approx(0.06033179718686261, rel=self.limit, abs=0.0) 77 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_baraffe2015.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import numpy as np 5 | import pooch 6 | 7 | from typeguard import typechecked 8 | 9 | from species.core import constants 10 | 11 | 12 | @typechecked 13 | def add_baraffe2015(database: h5py._hl.files.File, input_path: str) -> None: 14 | """ 15 | Function for adding the Baraffe et al. (2015) 16 | isochrone data to the database. 17 | 18 | Parameters 19 | ---------- 20 | database : h5py._hl.files.File 21 | Database. 22 | input_path : str 23 | Folder where the data is located. 24 | 25 | Returns 26 | ------- 27 | NoneType 28 | None 29 | """ 30 | 31 | url = "http://perso.ens-lyon.fr/isabelle.baraffe/BHAC15dir/BHAC15_tracks+structure" 32 | 33 | iso_tag = "Baraffe et al. (2015)" 34 | db_tag = "baraffe2015" 35 | 36 | input_file = url.rsplit("/", maxsplit=1)[-1] 37 | data_file = Path(input_path) / input_file 38 | 39 | if not data_file.exists(): 40 | print() 41 | 42 | pooch.retrieve( 43 | url=url, 44 | known_hash="b95474c5d4284373a2fed3f06d969a44bcd925ac0e5b226cc0235acb7e068d2a", 45 | fname=input_file, 46 | path=input_path, 47 | progressbar=True, 48 | ) 49 | 50 | # M/Ms, log t(yr), Teff, log(L/Ls), log(g), R/Rs, 51 | # Log(Li/Li0), log(Tc), log(ROc), Mrad, Rrad, k2conv, k2rad 52 | mass, log_age, teff, log_lum, log_g, radius, _, _, _, _, _, _, _ = np.loadtxt( 53 | data_file, unpack=True, skiprows=45, comments="!" 54 | ) 55 | 56 | age = 1e-6 * 10.0**log_age # (Myr) 57 | mass *= constants.M_SUN / constants.M_JUP # (Msun) -> (Mjup) 58 | radius *= constants.R_SUN / constants.R_JUP # (Msun) -> (Mjup) 59 | 60 | iso_data = np.column_stack([age, mass, teff, log_lum, log_g, radius]) 61 | 62 | print(f"\nAdding isochrones: {iso_tag}...", end="", flush=True) 63 | 64 | dset = database.create_dataset( 65 | f"isochrones/{db_tag}/age", data=iso_data[:, 0] 66 | ) # (Myr) 67 | database.create_dataset(f"isochrones/{db_tag}/mass", data=iso_data[:, 1]) # (Mjup) 68 | database.create_dataset(f"isochrones/{db_tag}/teff", data=iso_data[:, 2]) # (K) 69 | database.create_dataset( 70 | f"isochrones/{db_tag}/log_lum", data=iso_data[:, 3] 71 | ) # log(L/Lsun) 72 | database.create_dataset(f"isochrones/{db_tag}/log_g", data=iso_data[:, 4]) # log(g) 73 | database.create_dataset( 74 | f"isochrones/{db_tag}/radius", data=iso_data[:, 5] 75 | ) # (Rjup) 76 | 77 | dset.attrs["model"] = db_tag 78 | 79 | print(" [DONE]") 80 | print(f"Database tag: {db_tag}") 81 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | 13 | import os 14 | import sys 15 | sys.path.insert(0, os.path.abspath('../')) 16 | 17 | # -- Project information ----------------------------------------------------- 18 | 19 | project = 'species' 20 | copyright = '2025, Tomas Stolker' 21 | author = 'Tomas Stolker' 22 | 23 | # -- General configuration --------------------------------------------------- 24 | 25 | # Add any Sphinx extension module names here, as strings. They can be 26 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 27 | # ones. 28 | extensions = [ 29 | 'sphinx.ext.autodoc', 30 | 'sphinx.ext.napoleon', 31 | 'sphinx.ext.viewcode', 32 | 'nbsphinx' 33 | ] 34 | 35 | # Disable notebook timeout 36 | nbsphinx_timeout = -1 37 | 38 | # Allow errors from notebooks 39 | nbsphinx_allow_errors = True 40 | 41 | autoclass_content = 'both' 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | templates_path = [] 45 | 46 | # List of patterns, relative to source directory, that match files and 47 | # directories to ignore when looking for source files. 48 | # This pattern also affects html_static_path and html_extra_path. 49 | exclude_patterns = ['_build', 50 | 'Thumbs.db', 51 | '.DS_Store', 52 | 'tutorials/.ipynb_checkpoints/*'] 53 | 54 | # -- Options for HTML output ------------------------------------------------- 55 | 56 | html_theme = 'sphinx_book_theme' 57 | 58 | html_theme_options = { 59 | 'path_to_docs': 'docs', 60 | 'repository_url': 'https://github.com/tomasstolker/species', 61 | 'repository_branch': 'main', 62 | 'launch_buttons': { 63 | 'notebook_interface': 'jupyterlab', 64 | }, 65 | 'use_edit_page_button': True, 66 | 'use_issues_button': True, 67 | 'use_repository_button': True, 68 | 'use_download_button': True, 69 | } 70 | 71 | html_static_path = ['_static'] 72 | 73 | html_search_language = 'en' 74 | 75 | html_logo = '_static/species_logo.png' 76 | 77 | html_context = {'display_github': True, 78 | 'github_user': 'tomasstolker', 79 | 'github_repo': 'species', 80 | 'github_version': 'main/docs/'} 81 | -------------------------------------------------------------------------------- /docs/species.util.rst: -------------------------------------------------------------------------------- 1 | species.util package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.util.box\_util module 8 | ----------------------------- 9 | 10 | .. automodule:: species.util.box_util 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.util.convert\_util module 16 | --------------------------------- 17 | 18 | .. automodule:: species.util.convert_util 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.util.core\_util module 24 | ------------------------------ 25 | 26 | .. automodule:: species.util.core_util 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | species.util.data\_util module 32 | ------------------------------ 33 | 34 | .. automodule:: species.util.data_util 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | species.util.dust\_util module 40 | ------------------------------ 41 | 42 | .. automodule:: species.util.dust_util 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | species.util.fit\_util module 48 | ----------------------------- 49 | 50 | .. automodule:: species.util.fit_util 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | species.util.model\_util module 56 | ------------------------------- 57 | 58 | .. automodule:: species.util.model_util 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | species.util.plot\_util module 64 | ------------------------------ 65 | 66 | .. automodule:: species.util.plot_util 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | species.util.query\_util module 72 | ------------------------------- 73 | 74 | .. automodule:: species.util.query_util 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | species.util.radtrans\_util module 80 | ---------------------------------- 81 | 82 | .. automodule:: species.util.radtrans_util 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | species.util.retrieval\_util module 88 | ----------------------------------- 89 | 90 | .. automodule:: species.util.retrieval_util 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | species.util.spec\_util module 96 | ------------------------------ 97 | 98 | .. automodule:: species.util.spec_util 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | species.util.test\_util module 104 | ------------------------------ 105 | 106 | .. automodule:: species.util.test_util 107 | :members: 108 | :undoc-members: 109 | :show-inheritance: 110 | 111 | Module contents 112 | --------------- 113 | 114 | .. automodule:: species.util 115 | :members: 116 | :undoc-members: 117 | :show-inheritance: 118 | -------------------------------------------------------------------------------- /docs/database.rst: -------------------------------------------------------------------------------- 1 | .. _database: 2 | 3 | Database 4 | ======== 5 | 6 | The central component of a workflow is the :class:`~species.data.database.Database`. This object is used to store various types of data in the `species_database.hdf5` file, which is located in the working folder or another location set in the `species_config.ini` file. Data needs to be added to a database only once, so databases can be conveniently reused by other workflows. 7 | 8 | Want to know which data and attributes have been stored in the database? The :func:`~species.data.database.Database.list_content` method of :class:`~species.data.database.Database` is used for listing the content of the HDF5 file: 9 | 10 | .. code-block:: python 11 | 12 | database.list_content() 13 | 14 | Data which are read from the database, as well as the output of various functions, are stored in :class:`~species.core.box.Box` objects. These can be used as input for the plotting functionalities of `species` (see examples in the :ref:`tutorials` section). Alternatively, users can extract the content of a :class:`~species.core.box.Box` and process it to their own needs. 15 | 16 | The following example will add available photometric data of HR 8799 b to the database, and read the data of the companion into an :class:`~species.core.box.ObjectBox`. The :func:`~species.core.box.Box.open_box` method is used for listing the data in a :class:`~species.core.box.Box`. 17 | 18 | .. code-block:: python 19 | 20 | import species 21 | 22 | species.SpeciesInit() 23 | 24 | database = species.Database() 25 | database.add_companion(name='HR 8799 b') 26 | 27 | objectbox = database.get_object(object_name='HR 8799 b') 28 | objectbox.open_box() 29 | 30 | Data are easily extracted as the attributes of a :class:`~species.core.box.Box` object. For example, in this example a dictionary with the apparent magnitudes is extracted from the :class:`~species.core.box.ObjectBox`: 31 | 32 | .. code-block:: python 33 | 34 | app_mag = objectbox.magnitude 35 | 36 | To delete a group or dataset from the HDF5 file, there is the :func:`~species.data.database.Database.delete_data` method which takes the path in the HDF5 structure as argument. For example, to remove all previously added photometric data of HR 8799 b: 37 | 38 | .. code-block:: python 39 | 40 | database.delete_data("objects/HR 8799 b/photometry") 41 | 42 | .. important:: 43 | When data is added to the HDF5 database by an existing name tag, then the existing data is first deleted before the requested data is added to the database. For example, when the AMES-Dusty spectra are present in the ``models/ames-dusty`` group and ``add_model('ames-dusty')`` is executed, then all spectra are first removed from that group before the requested spectra are added. Similarly, if the ``objects/beta Pic b/photometry/Paranal/NACO.Mp`` group contains NACO $M'$ data of beta Pic b then these data are first removed if that same filter is used by :func:`~species.data.database.Database.add_object`. 44 | -------------------------------------------------------------------------------- /tests/test_read/test_filter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | from species.read.read_filter import ReadFilter 9 | from species.util import test_util 10 | 11 | 12 | class TestFilter: 13 | def setup_class(self): 14 | self.limit = 1e-8 15 | self.test_path = os.path.dirname(__file__) + "/" 16 | 17 | def teardown_class(self): 18 | os.remove("species_database.hdf5") 19 | os.remove("species_config.ini") 20 | shutil.rmtree("data/") 21 | 22 | def test_species_init(self): 23 | test_util.create_config("./") 24 | SpeciesInit() 25 | 26 | def test_read_filter(self): 27 | read_filter = ReadFilter("MKO/NSFCam.H") 28 | 29 | assert read_filter.filter_name == "MKO/NSFCam.H" 30 | 31 | def test_get_filter(self): 32 | read_filter = ReadFilter("MKO/NSFCam.H") 33 | filter_profile = read_filter.get_filter() 34 | 35 | assert filter_profile.shape == (970, 2) 36 | assert np.sum(filter_profile) == pytest.approx(2089.2432, rel=1e-6, abs=0.0) 37 | 38 | def test_detector_type(self): 39 | read_filter = ReadFilter("MKO/NSFCam.H") 40 | det_type = read_filter.detector_type() 41 | 42 | assert det_type == "energy" 43 | 44 | def test_get_filter_photon_counter(self): 45 | read_filter = ReadFilter("Keck/NIRC2.J") 46 | filter_profile = read_filter.get_filter() 47 | 48 | assert filter_profile.shape == (1054, 2) 49 | assert np.sum(filter_profile) == pytest.approx(2038.0486, rel=1e-6, abs=0.0) 50 | 51 | def test_detector_type_photon(self): 52 | read_filter = ReadFilter("Keck/NIRC2.J") 53 | det_type = read_filter.detector_type() 54 | 55 | assert det_type == "photon" 56 | 57 | def test_interpolate_filter(self): 58 | read_filter = ReadFilter("MKO/NSFCam.H") 59 | interp_filter = read_filter.interpolate_filter() 60 | 61 | assert interp_filter.x.shape == (970,) 62 | assert interp_filter.y.shape == (970,) 63 | 64 | assert np.sum(interp_filter.x) == pytest.approx(1575.1079, rel=1e-8, abs=0.0) 65 | assert np.sum(interp_filter.y) == pytest.approx(514.135186, rel=1e-6, abs=0.0) 66 | 67 | def test_wavelength_range(self): 68 | read_filter = ReadFilter("MKO/NSFCam.H") 69 | min_wavel, max_wavel = read_filter.wavelength_range() 70 | 71 | assert min_wavel == pytest.approx(1.382, rel=1e-6, abs=0.0) 72 | assert max_wavel == pytest.approx(1.8656, rel=1e-8, abs=0.0) 73 | 74 | def test_mean_wavelength(self): 75 | read_filter = ReadFilter("MKO/NSFCam.H") 76 | mean_wavel = read_filter.mean_wavelength() 77 | 78 | assert mean_wavel == pytest.approx(1.6298258, rel=1e-6, abs=0.0) 79 | 80 | def test_filter_fwhm(self): 81 | read_filter = ReadFilter("MKO/NSFCam.H") 82 | filter_fwhm = read_filter.filter_fwhm() 83 | 84 | assert filter_fwhm == pytest.approx(0.2956945960962718, rel=self.limit, abs=0.0) 85 | -------------------------------------------------------------------------------- /species/data/spec_data/spec_kesseli2017.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for adding O5 through L3 SDSS stellar spectra from 3 | Kesseli et al. (2017) to the database. 4 | """ 5 | 6 | from pathlib import Path 7 | 8 | import h5py 9 | import numpy as np 10 | import pooch 11 | 12 | from astropy.io import fits 13 | from typeguard import typechecked 14 | 15 | from species.util.data_util import extract_tarfile, remove_directory 16 | 17 | 18 | @typechecked 19 | def add_kesseli2017(input_path: str, database: h5py._hl.files.File) -> None: 20 | """ 21 | Function for adding the SDSS stellar spectra from 22 | Kesseli et al. (2017) to the database. 23 | 24 | Parameters 25 | ---------- 26 | input_path : str 27 | Path of the data folder. 28 | database : h5py._hl.files.File 29 | The HDF5 database. 30 | 31 | Returns 32 | ------- 33 | NoneType 34 | None 35 | """ 36 | 37 | url = "https://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J/ApJS/230/16" 38 | input_file = "J_ApJS_230_16.tar.gz" 39 | data_file = Path(input_path) / input_file 40 | data_folder = Path(input_path) / "kesseli+2017/" 41 | 42 | if not data_file.exists(): 43 | print() 44 | 45 | pooch.retrieve( 46 | url=url, 47 | known_hash=None, 48 | fname=input_file, 49 | path=input_path, 50 | progressbar=True, 51 | ) 52 | 53 | if data_folder.exists(): 54 | remove_directory(data_folder) 55 | 56 | print( 57 | "\nUnpacking SDSS spectra from Kesseli et al. 2017 (145 MB)...", 58 | end="", 59 | flush=True, 60 | ) 61 | extract_tarfile(str(data_file), str(data_folder)) 62 | print(" [DONE]") 63 | 64 | fits_folder = Path(data_folder) / "fits" 65 | 66 | print_message = "" 67 | print() 68 | 69 | spec_files = sorted(fits_folder.glob("*")) 70 | 71 | for file_item in spec_files: 72 | data = fits.getdata(file_item, ext=1) 73 | 74 | wavelength = 1e-4 * 10.0 ** data["LogLam"] # (um) 75 | flux = data["Flux"] # Normalized units 76 | error = data["PropErr"] # Normalized units 77 | 78 | name = file_item.stem.replace("_", " ") 79 | 80 | file_split = file_item.stem.split("_") 81 | sptype = file_split[0].split(".")[0] 82 | 83 | spdata = np.column_stack([wavelength, flux, error]) 84 | 85 | empty_message = len(print_message) * " " 86 | print(f"\r{empty_message}", end="") 87 | 88 | print_message = f"Adding spectra... {name}" 89 | print(f"\r{print_message}", end="") 90 | 91 | dset = database.create_dataset(f"spectra/kesseli+2017/{name}", data=spdata) 92 | 93 | dset.attrs["name"] = str(name).encode() 94 | dset.attrs["sptype"] = str(sptype).encode() 95 | 96 | empty_message = len(print_message) * " " 97 | print(f"\r{empty_message}", end="") 98 | 99 | print_message = "Adding spectra... [DONE]" 100 | print(f"\r{print_message}") 101 | -------------------------------------------------------------------------------- /tests/test_read/test_calibration.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | from species.data.database import Database 9 | from species.read.read_calibration import ReadCalibration 10 | from species.util import test_util 11 | 12 | 13 | class TestCalibration: 14 | def setup_class(self): 15 | self.limit = 1e-8 16 | self.test_path = os.path.dirname(__file__) + "/" 17 | self.model_param = {"scaling": 1.0} 18 | 19 | def teardown_class(self): 20 | os.remove("species_database.hdf5") 21 | os.remove("species_config.ini") 22 | shutil.rmtree("data/") 23 | 24 | def test_species_init(self): 25 | test_util.create_config("./") 26 | SpeciesInit() 27 | 28 | def test_read_calibration(self): 29 | database = Database() 30 | database.add_spectra("vega") 31 | 32 | read_calib = ReadCalibration("vega", filter_name="Paranal/NACO.H") 33 | assert read_calib.wavel_range == pytest.approx((1.44, 1.88), rel=1e-7, abs=0.0) 34 | 35 | def test_resample_spectrum(self): 36 | read_calib = ReadCalibration("vega") 37 | 38 | spec_box = read_calib.resample_spectrum( 39 | wavel_points=np.linspace(1.0, 5.0, 101), 40 | model_param=None, 41 | spec_res=None, 42 | apply_mask=True, 43 | interp_highres=False, 44 | ) 45 | 46 | assert np.sum(spec_box.wavelength) == 303.0 47 | assert np.sum(spec_box.flux) == pytest.approx( 48 | 6.51324713540645e-08, rel=self.limit, abs=0.0 49 | ) 50 | 51 | def test_resample_smooth(self): 52 | read_calib = ReadCalibration("vega") 53 | 54 | spec_box = read_calib.resample_spectrum( 55 | wavel_points=np.linspace(1.0, 5.0, 101), 56 | model_param=None, 57 | spec_res=50., 58 | apply_mask=True, 59 | interp_highres=True, 60 | ) 61 | 62 | assert np.sum(spec_box.wavelength) == 303.0 63 | assert np.sum(spec_box.flux) == pytest.approx( 64 | 6.516063242575011e-08, rel=self.limit, abs=0.0 65 | ) 66 | 67 | def test_get_spectrum(self): 68 | read_calib = ReadCalibration("vega", filter_name="Paranal/NACO.Lp") 69 | spec_box = read_calib.get_spectrum( 70 | self.model_param, apply_mask=True, wavel_sampling=200.0 71 | ) 72 | 73 | assert np.sum(spec_box.wavelength) == pytest.approx( 74 | 175.68298611775307, rel=self.limit, abs=0.0 75 | ) 76 | assert np.sum(spec_box.flux) == pytest.approx( 77 | 2.3131999524734138e-09, rel=self.limit, abs=0.0 78 | ) 79 | 80 | def test_get_flux(self): 81 | read_calib = ReadCalibration("vega", filter_name="Paranal/NACO.H") 82 | flux = read_calib.get_flux(model_param=self.model_param) 83 | 84 | assert flux[0] == pytest.approx(1.1149293297882683e-09, rel=self.limit, abs=0.0) 85 | 86 | def test_get_magnitude(self): 87 | read_calib = ReadCalibration("vega", filter_name="Paranal/NACO.H") 88 | app_mag, abs_mag = read_calib.get_magnitude(model_param=self.model_param) 89 | 90 | assert app_mag[0] == 0.03 91 | assert abs_mag[0] is None 92 | -------------------------------------------------------------------------------- /species/data/spec_data/spec_vega.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for adding a flux-calibrated spectrum of Vega to the database. 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | import h5py 8 | import numpy as np 9 | import pooch 10 | import requests 11 | 12 | from astropy.io import fits 13 | from typeguard import typechecked 14 | 15 | 16 | @typechecked 17 | def add_vega(input_path: str, database: h5py._hl.files.File) -> None: 18 | """ 19 | Function for adding a flux-calibrated spectrum of Vega to the 20 | database. The latest spectrum (alpha_lyr_stis_011.fits) is 21 | downloaded from the STScI archive (see `CALSPEC page `_ for details). 24 | 25 | Parameters 26 | ---------- 27 | input_path : str 28 | Path of the data folder. 29 | database : h5py._hl.files.File 30 | Database. 31 | 32 | Returns 33 | ------- 34 | NoneType 35 | None 36 | """ 37 | 38 | data_file = Path(input_path) / "alpha_lyr_stis_011.fits" 39 | 40 | if not data_file.exists(): 41 | print() 42 | 43 | try: 44 | url = ( 45 | "https://archive.stsci.edu/hlsps/reference-atlases" 46 | "/cdbs/current_calspec/alpha_lyr_stis_011.fits" 47 | ) 48 | 49 | pooch.retrieve( 50 | url=url, 51 | known_hash="60aebf5c193223f69061cd176d6309730c3210051fffad0dd6ad44475199ceaa", 52 | fname="alpha_lyr_stis_011.fits", 53 | path=input_path, 54 | progressbar=True, 55 | ) 56 | 57 | except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError): 58 | url = ( 59 | "https://home.strw.leidenuniv.nl/~stolker/" 60 | "species/alpha_lyr_stis_011.fits" 61 | ) 62 | 63 | pooch.retrieve( 64 | url=url, 65 | known_hash="60aebf5c193223f69061cd176d6309730c3210051fffad0dd6ad44475199ceaa", 66 | fname="alpha_lyr_stis_011.fits", 67 | path=input_path, 68 | progressbar=True, 69 | ) 70 | 71 | if "spectra/calibration/vega" in database: 72 | del database["spectra/calibration/vega"] 73 | 74 | vega_data = fits.getdata(data_file, ext=1) 75 | wavelength = vega_data["WAVELENGTH"] # (Angstrom) 76 | flux = vega_data["FLUX"] # (erg s-1 cm-2 A-1) 77 | error_stat = vega_data["STATERROR"] # (erg s-1 cm-2 A-1) 78 | error_sys = vega_data["SYSERROR"] # (erg s-1 cm-2 A-1) 79 | 80 | wavelength *= 1e-4 # (Angstrom) -> (um) 81 | flux *= 1e-3 * 1e4 # (erg s-1 cm-2 A-1) -> (W m-2 um-1) 82 | error_stat *= 1e-3 * 1e4 # (erg s-1 cm-2 A-1) -> (W m-2 um-1) 83 | error_sys *= 1e-3 * 1e4 # (erg s-1 cm-2 A-1) -> (W m-2 um-1) 84 | error = np.sqrt(error_stat**2 + error_sys**2) # (W m-2 um-1) 85 | 86 | print("\nAdding spectrum: Vega", end="", flush=True) 87 | 88 | database.create_dataset( 89 | "spectra/calibration/vega", data=np.vstack((wavelength, flux, error)) 90 | ) 91 | 92 | print("\nReference: Bohlin et al. 2014, PASP, 126") 93 | print("URL: https://ui.adsabs.harvard.edu/abs/2014PASP..126..711B/abstract") 94 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | *species* 2 | ========= 3 | 4 | **spe**\ctral **c**\ haracterization and **i**\ nference for **e**\ xoplanet **s**\ cience 5 | 6 | .. container:: 7 | 8 | |PyPI Status| |Python Versions| |CI Status| |Docs Status| |Code Coverage| |Code Quality| |License| 9 | 10 | *species* is a toolkit for atmospheric characterization of directly imaged exoplanets. It provides a coherent framework for spectral and photometric analysis which builds on publicly-available data and models from various resources. 11 | 12 | There are tools available for retrieval of atmospheric and evolutionary parameters, synthetic photometry, interpolation of a variety atmospheric and evolutionary model grids, color-magnitude and color-color diagrams, empirical spectral analysis, spectral and photometric calibration, and analysis of emission lines. The package has been released on `PyPI `_ and is actively developed and maintained on `Github `_. 13 | 14 | Documentation 15 | ------------- 16 | 17 | Documentation can be found at `http://species.readthedocs.io `_. 18 | 19 | Tutorials 20 | --------- 21 | 22 | There are several `Jupyter notebooks `_ with tutorials for various types of analyses and functionalities. 23 | 24 | Attribution 25 | ----------- 26 | 27 | Please cite `Stolker et al. (2020) `_ whenever results from *species* are used in a publication. Please also make sure to give credit to the relevant papers regarding the use of the publicly available data that *species* benefits from. 28 | 29 | Contributing 30 | ------------ 31 | 32 | Contributions are welcome so please consider `forking `_ the repository and creating a `pull request `_. Bug reports and feature requests can be provided by creating an `issue `_ on the Github page. 33 | 34 | License 35 | ------- 36 | 37 | Copyright 2025 Tomas Stolker and `contributors `_. 38 | 39 | *species* is distributed under the MIT License. See `LICENSE `_ for the terms and conditions. 40 | 41 | .. |PyPI Status| image:: https://img.shields.io/pypi/v/species 42 | :target: https://pypi.python.org/pypi/species 43 | 44 | .. |Python Versions| image:: https://img.shields.io/pypi/pyversions/species 45 | :target: https://pypi.python.org/pypi/species 46 | 47 | .. |CI Status| image:: https://github.com/tomasstolker/species/workflows/CI/badge.svg?branch=main 48 | :target: https://github.com/tomasstolker/species/actions 49 | 50 | .. |Docs Status| image:: https://img.shields.io/readthedocs/species 51 | :target: http://species.readthedocs.io 52 | 53 | .. |Code Coverage| image:: https://codecov.io/gh/tomasstolker/species/branch/main/graph/badge.svg?token=LSSCPMJ5JH 54 | :target: https://codecov.io/gh/tomasstolker/species 55 | 56 | .. |Code Quality| image:: https://img.shields.io/codefactor/grade/github/tomasstolker/species 57 | :target: https://www.codefactor.io/repository/github/tomasstolker/species 58 | 59 | .. |License| image:: https://img.shields.io/github/license/tomasstolker/species 60 | :target: https://github.com/tomasstolker/species/blob/main/LICENSE 61 | -------------------------------------------------------------------------------- /species/data/companion_data/companion_spectra.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for getting spectra of directly imaged planets and brown dwarfs. 3 | """ 4 | 5 | import json 6 | import pooch 7 | 8 | from pathlib import Path 9 | from typing import Dict, Optional, Tuple 10 | 11 | from typeguard import typechecked 12 | 13 | from species.util.core_util import print_section 14 | 15 | 16 | @typechecked 17 | def companion_spectra( 18 | input_path: Path, comp_name: str, verbose: bool = True 19 | ) -> Optional[Dict[str, Tuple[str, Optional[str], float]]]: 20 | """ 21 | Function for extracting a dictionary with the spectra of 22 | directly imaged planets and brown dwarfs. These data can 23 | be added to the database with the 24 | :func:`~species.data.database.Database.add_companion` 25 | method of :class:`~species.data.database.Database`. 26 | 27 | Parameters 28 | ---------- 29 | input_path : Path 30 | Path of the data folder. 31 | comp_name : str 32 | Companion name for which the spectra will be returned. 33 | verbose : bool 34 | Print details on the companion data that are added to 35 | the database. 36 | 37 | Returns 38 | ------- 39 | dict, None 40 | Dictionary with the spectra of ``comp_name``. A ``None`` 41 | will be returned if there are not any spectra available. 42 | The dictionary includes the spectrum, (optional) 43 | covariances, spectral resolution, and filename. 44 | """ 45 | 46 | data_folder = input_path / "companion_data" 47 | spec_file = Path(__file__).parent.resolve() / "companion_spectra.json" 48 | 49 | with open(spec_file, "r", encoding="utf-8") as json_file: 50 | comp_spec = json.load(json_file) 51 | 52 | if comp_name in comp_spec: 53 | if verbose: 54 | print_section("Get companion spectra") 55 | 56 | if not data_folder.exists(): 57 | data_folder.mkdir() 58 | 59 | spec_dict = {} 60 | 61 | for spec_key, spec_value in comp_spec[comp_name].items(): 62 | if verbose: 63 | print(f"Getting {spec_key} spectrum of {comp_name}...", end="", flush=True) 64 | 65 | spec_url = ( 66 | f"https://home.strw.leidenuniv.nl/~stolker/species/spectra/{spec_value[0]}" 67 | ) 68 | spec_file = data_folder / spec_value[0] 69 | 70 | if spec_value[1] is None: 71 | cov_file = None 72 | else: 73 | cov_file = data_folder / spec_value[1] 74 | 75 | if not spec_file.exists(): 76 | pooch.retrieve( 77 | url=spec_url, 78 | known_hash=None, 79 | fname=spec_value[0], 80 | path=data_folder, 81 | progressbar=True, 82 | ) 83 | 84 | if cov_file is None: 85 | spec_dict[spec_key] = (str(spec_file), cov_file, spec_value[2]) 86 | else: 87 | spec_dict[spec_key] = (str(spec_file), str(cov_file), spec_value[2]) 88 | 89 | if verbose: 90 | print(" [DONE]") 91 | 92 | print( 93 | f"Please cite {spec_value[3]} when making " 94 | "use of this spectrum in a publication" 95 | ) 96 | 97 | else: 98 | spec_dict = None 99 | 100 | return spec_dict 101 | -------------------------------------------------------------------------------- /docs/species.data.isochrone_data.rst: -------------------------------------------------------------------------------- 1 | species.data.isochrone\_data package 2 | ==================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | species.data.isochrone\_data.add\_isochrone module 8 | -------------------------------------------------- 9 | 10 | .. automodule:: species.data.isochrone_data.add_isochrone 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | species.data.isochrone\_data.iso\_ames module 16 | --------------------------------------------- 17 | 18 | .. automodule:: species.data.isochrone_data.iso_ames 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | species.data.isochrone\_data.iso\_atmo module 24 | --------------------------------------------- 25 | 26 | .. automodule:: species.data.isochrone_data.iso_atmo 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | species.data.isochrone\_data.iso\_baraffe2015 module 32 | ---------------------------------------------------- 33 | 34 | .. automodule:: species.data.isochrone_data.iso_baraffe2015 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | species.data.isochrone\_data.iso\_btsettl module 40 | ------------------------------------------------ 41 | 42 | .. automodule:: species.data.isochrone_data.iso_btsettl 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | species.data.isochrone\_data.iso\_chabrier2023 module 48 | ----------------------------------------------------- 49 | 50 | .. automodule:: species.data.isochrone_data.iso_chabrier2023 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | species.data.isochrone\_data.iso\_linder2019 module 56 | --------------------------------------------------- 57 | 58 | .. automodule:: species.data.isochrone_data.iso_linder2019 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | species.data.isochrone\_data.iso\_manual module 64 | ----------------------------------------------- 65 | 66 | .. automodule:: species.data.isochrone_data.iso_manual 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | species.data.isochrone\_data.iso\_marleau module 72 | ------------------------------------------------ 73 | 74 | .. automodule:: species.data.isochrone_data.iso_marleau 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | species.data.isochrone\_data.iso\_nextgen module 80 | ------------------------------------------------ 81 | 82 | .. automodule:: species.data.isochrone_data.iso_nextgen 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | species.data.isochrone\_data.iso\_parsec module 88 | ----------------------------------------------- 89 | 90 | .. automodule:: species.data.isochrone_data.iso_parsec 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | species.data.isochrone\_data.iso\_saumon2008 module 96 | --------------------------------------------------- 97 | 98 | .. automodule:: species.data.isochrone_data.iso_saumon2008 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | species.data.isochrone\_data.iso\_sonora\_bobcat module 104 | ------------------------------------------------------- 105 | 106 | .. automodule:: species.data.isochrone_data.iso_sonora_bobcat 107 | :members: 108 | :undoc-members: 109 | :show-inheritance: 110 | 111 | species.data.isochrone\_data.iso\_sonora\_diamondback module 112 | ------------------------------------------------------------ 113 | 114 | .. automodule:: species.data.isochrone_data.iso_sonora_diamondback 115 | :members: 116 | :undoc-members: 117 | :show-inheritance: 118 | 119 | Module contents 120 | --------------- 121 | 122 | .. automodule:: species.data.isochrone_data 123 | :members: 124 | :undoc-members: 125 | :show-inheritance: 126 | -------------------------------------------------------------------------------- /species/data/phot_data/phot_jwst_ydwarfs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for the photometric data and parallaxes from the JWST late T and 3 | early Y dwarf observations recorded in Beiler et al. 2024 (ApJ). 4 | """ 5 | 6 | from pathlib import Path 7 | 8 | import h5py 9 | import numpy as np 10 | import pooch 11 | 12 | from astropy.io import fits 13 | from typeguard import typechecked 14 | 15 | from species.util.data_util import update_sptype 16 | 17 | 18 | @typechecked 19 | def add_jwst_ydwarfs(input_path: str, database: h5py._hl.files.File) -> None: 20 | """ 21 | Function for adding the synthesized photometry from 'Precise 22 | Bolometric Luminosities and Effective Temperatures of 23 Late-T 23 | and Y Dwarfs Obtained with JWST' by Beiler et al. (2024, ApJ). 24 | 25 | Parameters 26 | ---------- 27 | input_path : str 28 | Data folder. 29 | database : h5py._hl.files.File 30 | The HDF5 database that has been opened. 31 | 32 | Returns 33 | ------- 34 | NoneType 35 | None 36 | """ 37 | 38 | input_file = "Beiler2024_synth_phot.fits" 39 | data_file = Path(input_path) / "Beiler2024_synth_phot.fits" 40 | url = "https://home.strw.leidenuniv.nl/~stolker/species/Beiler2024_synth_phot.fits" 41 | 42 | if not data_file.exists(): 43 | print() 44 | 45 | pooch.retrieve( 46 | url=url, 47 | known_hash=None, 48 | fname=input_file, 49 | path=input_path, 50 | progressbar=True, 51 | ) 52 | 53 | database.create_group("photometry/beiler2024") 54 | 55 | with fits.open(data_file, mode="update") as hdu_list: 56 | phot_data = hdu_list[1].data 57 | 58 | parallax = np.array(phot_data["PLX"], dtype=float) # (mas) 59 | parallax_error = np.array(phot_data["EPLX"], dtype=float) # (mas) 60 | 61 | name = np.array(phot_data["Name"]).astype("str") 62 | 63 | sptype_nir = np.array(phot_data["SpT"]).astype("str") 64 | sptype_nir = np.core.defchararray.strip(sptype_nir) 65 | 66 | sptype = update_sptype(sptype_nir) 67 | 68 | dtype = h5py.special_dtype(vlen=str) 69 | 70 | flag = np.repeat("null", np.size(name)) 71 | 72 | dset = database.create_dataset( 73 | "photometry/beiler2024/flag", (np.size(flag),), dtype=dtype 74 | ) 75 | dset[...] = flag 76 | 77 | dset = database.create_dataset( 78 | "photometry/beiler2024/name", (np.size(name),), dtype=dtype 79 | ) 80 | dset[...] = name 81 | 82 | dset = database.create_dataset( 83 | "photometry/beiler2024/sptype", (np.size(sptype),), dtype=dtype 84 | ) 85 | dset[...] = sptype 86 | 87 | database.create_dataset("photometry/beiler2024/parallax", data=parallax) 88 | database.create_dataset("photometry/beiler2024/parallax_error", data=parallax_error) 89 | 90 | nirc_filts = [ 91 | "F090W", 92 | "F115W", 93 | "F140M", 94 | "F150W", 95 | "F150W2", 96 | "F162M", 97 | "F164N", 98 | "F182M", 99 | "F187N", 100 | "F200W", 101 | "F210M", 102 | "F212N", 103 | "F250M", 104 | "F277W", 105 | "F300M", 106 | "F322W2", 107 | "F323N", 108 | "F335M", 109 | "F356W", 110 | "F360M", 111 | "F405N", 112 | "F410M", 113 | "F430M", 114 | "F444W", 115 | "F460M", 116 | "F466N", 117 | "F470N", 118 | "F480M", 119 | ] 120 | 121 | for filt in nirc_filts: 122 | database.create_dataset( 123 | f"photometry/beiler2024/JWST/NIRCAM.{filt}", 124 | data=np.array(phot_data[f"{filt}"], dtype=float), 125 | ) 126 | 127 | miri_filts = ["F560W", "F770W", "F1065C", "F1130W", "F1140C"] 128 | 129 | for filt in miri_filts: 130 | database.create_dataset( 131 | f"photometry/beiler2024/JWST/MIRI.{filt}", 132 | data=np.array(phot_data[f"{filt}"], dtype=float), 133 | ) 134 | 135 | database.close() 136 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. _installation: 2 | 3 | Installation 4 | ============ 5 | 6 | ``species`` is compatible with `Python `_ versions 3.10/3.11/3.12/3.13 and is available in the `PyPI repository `_ and on `Github `_. 7 | 8 | It is recommended to install ``species`` within a `virtual environment `_ such that the required dependency versions will not cause any conflicts with other installed packages. 9 | 10 | Installation from PyPI 11 | ---------------------- 12 | 13 | The ``species`` toolkit can be installed with the `pip package manager `_, but first `Cython `_ should be separately installed: 14 | 15 | .. code-block:: console 16 | 17 | $ pip install cython 18 | $ pip install species 19 | 20 | Or, to include all dependencies, therefore enabling all functionalities of ``species``: 21 | 22 | .. code-block:: console 23 | 24 | $ pip install 'species[full]' 25 | 26 | Or, to update ``species`` to the most recent version: 27 | 28 | .. code-block:: console 29 | 30 | $ pip install --upgrade species 31 | 32 | It is also required to separately install ``petitRADTRANS``, although this step can be skipped: 33 | 34 | .. code-block:: console 35 | 36 | $ pip install petitRADTRANS 37 | 38 | The installation of ``petitRADTRANS`` can be somewhat challenging on some machines. When skipping the installation of `petitRADTRANS `_ it is still possible to use most of the functionalities of ``species``. 39 | 40 | .. important:: 41 | The ``PyMultiNest`` package requires the manual installation of ``MultiNest``. Please follow the `instructions `_ for the building the library and make sure that the ``LD_LIBRARY_PATH`` (on Linux) or ``DYLD_LIBRARY_PATH`` (on macOS) environment variable is set. It is also possible to use ``species`` without installing ``MultiNest`` (but a warning will appear), apart from the functionalities that rely on ``PyMultiNest``. 42 | 43 | Installation from Github 44 | ------------------------ 45 | 46 | Using pip 47 | ^^^^^^^^^ 48 | 49 | The repository on `Github `_ contains the latest implementations and can also be installed with `pip `_, including the minimum of required dependencies: 50 | 51 | .. code-block:: console 52 | 53 | $ pip install git+https://github.com/tomasstolker/species.git 54 | 55 | Or, to include all dependencies, therefore enabling all functionalities of ``species``: 56 | 57 | .. code-block:: console 58 | 59 | $ pip install 'git+https://github.com/tomasstolker/species.git#egg=species[full]' 60 | 61 | 62 | Cloning the repository 63 | ^^^^^^^^^^^^^^^^^^^^^^ 64 | 65 | In case you want to look into and make changes to the code, it is best to clone the repository: 66 | 67 | .. code-block:: console 68 | 69 | $ git clone https://github.com/tomasstolker/species.git 70 | 71 | Next, the package is installed by running ``pip`` in the local repository folder: 72 | 73 | .. code-block:: console 74 | 75 | $ pip install -e . 76 | 77 | Or, to install with all dependencies: 78 | 79 | .. code-block:: console 80 | 81 | $ pip install ".[full]" 82 | 83 | New commits can be pulled from Github once a local copy of the repository exists: 84 | 85 | .. code-block:: console 86 | 87 | $ git pull origin main 88 | 89 | Do you want to make changes to the code? Please fork the `species` repository on the Github page and clone your own fork instead of the main repository. Contributions and pull requests are welcome (see :ref:`contributing` section). 90 | 91 | Testing `species` 92 | ----------------- 93 | 94 | The installation can now be tested, for example by starting Python in interactive mode and initializing a workflow in the current working folder: 95 | 96 | .. code-block:: python 97 | 98 | >>> from species import SpeciesInit 99 | >>> SpeciesInit() 100 | -------------------------------------------------------------------------------- /species/data/isochrone_data/add_isochrone.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for isochrone data from evolutionary models. 3 | """ 4 | 5 | from typing import Optional 6 | 7 | import h5py 8 | 9 | from typeguard import typechecked 10 | 11 | from species.data.isochrone_data.iso_ames import add_ames 12 | from species.data.isochrone_data.iso_atmo import add_atmo 13 | from species.data.isochrone_data.iso_baraffe2015 import add_baraffe2015 14 | from species.data.isochrone_data.iso_btsettl import add_btsettl 15 | from species.data.isochrone_data.iso_chabrier2023 import add_chabrier2023 16 | from species.data.isochrone_data.iso_linder2019 import add_linder2019 17 | from species.data.isochrone_data.iso_manual import add_manual 18 | from species.data.isochrone_data.iso_marleau import add_marleau 19 | from species.data.isochrone_data.iso_nextgen import add_nextgen 20 | from species.data.isochrone_data.iso_parsec import add_parsec 21 | from species.data.isochrone_data.iso_saumon2008 import add_saumon2008 22 | from species.data.isochrone_data.iso_sonora_bobcat import add_sonora_bobcat 23 | from species.data.isochrone_data.iso_sonora_diamondback import add_sonora_diamondback 24 | 25 | 26 | @typechecked 27 | def add_isochrone_grid( 28 | data_folder: str, 29 | hdf5_file: h5py._hl.files.File, 30 | model_name: Optional[str] = None, 31 | filename: Optional[str] = None, 32 | tag: Optional[str] = None, 33 | ) -> None: 34 | """ 35 | Function for adding an isochrone grid to the database. 36 | 37 | Parameters 38 | ---------- 39 | data_folder : str 40 | Folder where the data is located. 41 | hdf5_file : h5py._hl.files.File 42 | Database. 43 | model_name : str, None 44 | Evolutionary model ('ames', 'atmo', 'atmo-chabrier2023', 45 | 'baraffe2015', 'bt-settl', 'linder2019', 'nextgen', 46 | 'parsec', 'saumon2008', 'sonora-bobcat', 47 | 'sonora-diamondback'). Isochrones will be automatically 48 | downloaded. Alternatively, the ``filename`` parameter 49 | can be used in combination with ``tag``. 50 | filename : str, None 51 | Filename with the isochrone data. The argument of 52 | ``model`` will be ignored by setting the argument 53 | of ``filename``. When using ``filename``, also 54 | the argument of ``tag`` should be set. Only files 55 | with isochrone data from 56 | https://phoenix.ens-lyon.fr/Grids/ and 57 | https://perso.ens-lyon.fr/isabelle.baraffe/ are 58 | supported. The parameter is ignored by setting 59 | the argument to ``None``. 60 | tag : str, None 61 | Database tag name where the isochrone that will be 62 | stored. Setting the argument is only required in 63 | combination with the ``filename`` parameter. 64 | Otherwise, the argument can be set to ``None``. 65 | 66 | Returns 67 | ------- 68 | None 69 | NoneType 70 | """ 71 | 72 | if model_name == "ames": 73 | add_ames(hdf5_file, data_folder) 74 | 75 | elif model_name == "atmo": 76 | add_atmo(hdf5_file, data_folder) 77 | 78 | elif model_name == "atmo-chabrier2023": 79 | add_chabrier2023(hdf5_file, data_folder) 80 | 81 | elif model_name == "baraffe2015": 82 | add_baraffe2015(hdf5_file, data_folder) 83 | 84 | elif model_name == "bt-settl": 85 | add_btsettl(hdf5_file, data_folder) 86 | 87 | elif model_name == "linder2019": 88 | add_linder2019(hdf5_file, data_folder) 89 | 90 | elif model_name == "marleau": 91 | add_marleau(hdf5_file, tag, filename) 92 | 93 | elif model_name == "nextgen": 94 | add_nextgen(hdf5_file, data_folder) 95 | 96 | elif model_name == "parsec": 97 | add_parsec(hdf5_file, data_folder) 98 | 99 | elif model_name == "saumon2008": 100 | add_saumon2008(hdf5_file, data_folder) 101 | 102 | elif model_name == "sonora-bobcat": 103 | add_sonora_bobcat(hdf5_file, data_folder) 104 | 105 | elif model_name == "sonora-diamondback": 106 | add_sonora_diamondback(hdf5_file, data_folder) 107 | 108 | else: 109 | add_manual(hdf5_file, tag, filename, model_name=tag) 110 | -------------------------------------------------------------------------------- /species/data/phot_data/phot_vlm_plx.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for the photometric data and parallaxes from the 3 | Database of Ultracool Parallaxes. 4 | """ 5 | 6 | from pathlib import Path 7 | 8 | import h5py 9 | import numpy as np 10 | import pooch 11 | 12 | from astropy.io import fits 13 | from typeguard import typechecked 14 | 15 | from species.util.data_util import update_sptype 16 | 17 | 18 | @typechecked 19 | def add_vlm_plx(input_path: str, database: h5py._hl.files.File) -> None: 20 | """ 21 | Function for adding the Database of Ultracool Parallaxes to the 22 | database. The FITS file with data was originally downloaded from 23 | http://www.as.utexas.edu/~tdupuy/plx/Database_of_Ultracool_Parallaxes_files/vlm-plx-all.fits 24 | but that website has been taken offline, probably because of the 25 | new table at http://bit.ly/UltracoolSheet. 26 | 27 | Parameters 28 | ---------- 29 | input_path : str 30 | Data folder. 31 | database : h5py._hl.files.File 32 | The HDF5 database that has been opened. 33 | 34 | Returns 35 | ------- 36 | NoneType 37 | None 38 | """ 39 | 40 | input_file = "vlm-plx-all.fits" 41 | data_file = Path(input_path) / "vlm-plx-all.fits" 42 | url = "https://home.strw.leidenuniv.nl/~stolker/species/vlm-plx-all.fits" 43 | 44 | if not data_file.exists(): 45 | print() 46 | 47 | pooch.retrieve( 48 | url=url, 49 | known_hash="d31bb3162d7de890c09ebf9f0497d51159889b5f5e7c4da1ddf01f24d0c2b36f", 50 | fname=input_file, 51 | path=input_path, 52 | progressbar=True, 53 | ) 54 | 55 | database.create_group("photometry/vlm-plx") 56 | 57 | with fits.open(data_file) as hdu_list: 58 | phot_data = hdu_list[1].data 59 | 60 | parallax = phot_data["PLX"] # (mas) 61 | parallax_error = phot_data["EPLX"] # (mas) 62 | 63 | name = phot_data["NAME"] 64 | name = np.core.defchararray.strip(name) 65 | 66 | sptype = phot_data["OSPTSTR"] 67 | sptype = np.core.defchararray.strip(sptype) 68 | 69 | sptype_nir = phot_data["ISPTSTR"] 70 | sptype_nir = np.core.defchararray.strip(sptype_nir) 71 | 72 | for i, item in enumerate(sptype): 73 | if item == "null": 74 | sptype[i] = sptype_nir[i] 75 | 76 | flag = phot_data["FLAG"] 77 | flag = np.core.defchararray.strip(flag) 78 | 79 | sptype = update_sptype(sptype) 80 | 81 | dtype = h5py.special_dtype(vlen=str) 82 | 83 | dset = database.create_dataset( 84 | "photometry/vlm-plx/name", (np.size(name),), dtype=dtype 85 | ) 86 | dset[...] = name 87 | 88 | dset = database.create_dataset( 89 | "photometry/vlm-plx/sptype", (np.size(sptype),), dtype=dtype 90 | ) 91 | dset[...] = sptype 92 | 93 | dset = database.create_dataset( 94 | "photometry/vlm-plx/flag", (np.size(flag),), dtype=dtype 95 | ) 96 | dset[...] = flag 97 | 98 | database.create_dataset("photometry/vlm-plx/ra", data=phot_data["RA"]) # (deg) 99 | database.create_dataset("photometry/vlm-plx/dec", data=phot_data["DEC"]) # (deg) 100 | database.create_dataset("photometry/vlm-plx/parallax", data=parallax) 101 | database.create_dataset("photometry/vlm-plx/parallax_error", data=parallax_error) 102 | database.create_dataset("photometry/vlm-plx/Keck/NIRC.Y", data=phot_data["YMAG"]) 103 | database.create_dataset("photometry/vlm-plx/MKO/NSFCam.J", data=phot_data["JMAG"]) 104 | database.create_dataset("photometry/vlm-plx/MKO/NSFCam.H", data=phot_data["HMAG"]) 105 | database.create_dataset("photometry/vlm-plx/MKO/NSFCam.K", data=phot_data["KMAG"]) 106 | database.create_dataset("photometry/vlm-plx/MKO/NSFCam.Lp", data=phot_data["LMAG"]) 107 | database.create_dataset("photometry/vlm-plx/MKO/NSFCam.Mp", data=phot_data["MMAG"]) 108 | database.create_dataset("photometry/vlm-plx/2MASS/2MASS.J", data=phot_data["J2MAG"]) 109 | database.create_dataset("photometry/vlm-plx/2MASS/2MASS.H", data=phot_data["H2MAG"]) 110 | database.create_dataset( 111 | "photometry/vlm-plx/2MASS/2MASS.Ks", data=phot_data["K2MAG"] 112 | ) 113 | 114 | database.close() 115 | -------------------------------------------------------------------------------- /tests/test_read/test_isochrone.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import urllib.request 4 | 5 | import pytest 6 | import numpy as np 7 | 8 | from species import SpeciesInit 9 | from species.data.database import Database 10 | from species.read.read_isochrone import ReadIsochrone 11 | from species.util import test_util 12 | 13 | 14 | class TestIsochrone: 15 | def setup_class(self): 16 | self.limit = 1e-8 17 | self.test_path = os.path.dirname(__file__) + "/" 18 | 19 | def teardown_class(self): 20 | os.remove("species_database.hdf5") 21 | os.remove("species_config.ini") 22 | shutil.rmtree("data/") 23 | 24 | def test_species_init(self): 25 | test_util.create_config("./") 26 | SpeciesInit() 27 | 28 | def test_read_isochrone(self): 29 | database = Database() 30 | database.add_isochrones("ames") 31 | 32 | database.add_model("ames-cond", teff_range=(2000.0, 2500.0)) 33 | 34 | read_isochrone = ReadIsochrone("ames-cond") 35 | assert read_isochrone.tag == "ames-cond" 36 | 37 | def test_get_isochrone(self): 38 | read_isochrone = ReadIsochrone("ames-cond") 39 | 40 | isochrone_box = read_isochrone.get_isochrone( 41 | age=100.0, 42 | masses=np.linspace(10.0, 100.0, 10), 43 | filter_mag="J", 44 | filters_color=("J", "H"), 45 | ) 46 | 47 | assert np.sum(isochrone_box.mass) == pytest.approx( 48 | 550.0, rel=self.limit, abs=0.0 49 | ) 50 | 51 | assert np.sum(isochrone_box.teff) == pytest.approx( 52 | 23007.53864754502, rel=self.limit, abs=0.0 53 | ) 54 | 55 | assert np.sum(isochrone_box.logg) == pytest.approx( 56 | 47.475577469602705, rel=self.limit, abs=0.0 57 | ) 58 | 59 | assert np.sum(isochrone_box.radius) == pytest.approx( 60 | 14.29977420304961, rel=self.limit, abs=0.0 61 | ) 62 | 63 | assert isochrone_box.color.shape == (10,) 64 | assert isochrone_box.magnitude.shape == (10,) 65 | 66 | assert np.sum(isochrone_box.color) == pytest.approx( 67 | 2.625186321644007, rel=self.limit, abs=0.0 68 | ) 69 | 70 | assert np.sum(isochrone_box.magnitude) == pytest.approx( 71 | 108.78841310475491, rel=self.limit, abs=0.0 72 | ) 73 | 74 | def test_get_color_magnitude(self): 75 | read_isochrone = ReadIsochrone("ames-cond") 76 | 77 | colormag_box = read_isochrone.get_color_magnitude( 78 | age=100.0, 79 | masses=np.linspace(35.0, 45.0, 10), 80 | filters_color=("MKO/NSFCam.J", "MKO/NSFCam.H"), 81 | filter_mag="MKO/NSFCam.J", 82 | ) 83 | 84 | assert colormag_box.object_type == "model" 85 | assert colormag_box.color.shape == (10,) 86 | assert colormag_box.magnitude.shape == (10,) 87 | 88 | assert np.sum(colormag_box.color) == pytest.approx( 89 | 2.5274585721232263, rel=self.limit, abs=0.0 90 | ) 91 | 92 | assert np.sum(colormag_box.magnitude) == pytest.approx( 93 | 109.43164467710773, rel=self.limit, abs=0.0 94 | ) 95 | 96 | assert np.sum(colormag_box.mass) == pytest.approx( 97 | 400.0, rel=self.limit, abs=0.0 98 | ) 99 | 100 | def test_get_color_color(self): 101 | read_isochrone = ReadIsochrone("ames-cond") 102 | 103 | colorcolor_box = read_isochrone.get_color_color( 104 | age=100.0, 105 | masses=np.linspace(35.0, 45.0, 10), 106 | filters_colors=( 107 | ("MKO/NSFCam.J", "MKO/NSFCam.H"), 108 | ("MKO/NSFCam.H", "MKO/NSFCam.Ks"), 109 | ), 110 | ) 111 | 112 | assert colorcolor_box.object_type == "model" 113 | assert colorcolor_box.color1.shape == (10,) 114 | assert colorcolor_box.color2.shape == (10,) 115 | 116 | assert np.sum(colorcolor_box.color1) == pytest.approx( 117 | 2.5274585721232263, rel=self.limit, abs=0.0 118 | ) 119 | 120 | assert np.sum(colorcolor_box.color2) == pytest.approx( 121 | 3.3721764168667043, rel=self.limit, abs=0.0 122 | ) 123 | 124 | assert np.sum(colorcolor_box.mass) == pytest.approx( 125 | 400.0, rel=self.limit, abs=0.0 126 | ) 127 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_sonora_bobcat.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import numpy as np 5 | import pooch 6 | 7 | from typeguard import typechecked 8 | 9 | from species.core import constants 10 | from species.util.data_util import extract_tarfile 11 | 12 | 13 | @typechecked 14 | def add_sonora_bobcat(database: h5py._hl.files.File, input_path: str) -> None: 15 | """ 16 | Function for adding the isochrone data of `Sonora Bobcat 17 | `_ to the database. 18 | 19 | Parameters 20 | ---------- 21 | database : h5py._hl.files.File 22 | Database. 23 | input_path : str 24 | Folder where the data is located. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | url = "https://zenodo.org/record/5063476/files/evolution_and_photometery.tar.gz" 33 | 34 | input_file = "evolution_and_photometery.tar.gz" 35 | data_folder = Path(input_path) / "sonora-bobcat-evolution" 36 | data_file = data_folder / input_file 37 | 38 | if not data_folder.exists(): 39 | data_folder.mkdir() 40 | 41 | if not data_file.exists(): 42 | print() 43 | 44 | pooch.retrieve( 45 | url=url, 46 | known_hash="2198426d1ca0e410fda7b63c3b7f45f3890a8d9f2fcf0a3a1e36e14185283ca5", 47 | fname=input_file, 48 | path=data_folder, 49 | progressbar=True, 50 | ) 51 | 52 | print("\nUnpacking Sonora Bobcat evolution (929 kB)...", end="", flush=True) 53 | extract_tarfile(str(data_file), str(data_folder)) 54 | print(" [DONE]") 55 | 56 | iso_files = [ 57 | "evo_tables+0.0/nc+0.0_co1.0_age", 58 | "evo_tables+0.5/nc+0.5_co1.0_age", 59 | "evo_tables-0.5/nc-0.5_co1.0_age", 60 | ] 61 | 62 | labels = ["[M/H] = +0.0", "[M/H] = +0.5", "[M/H] = -0.5"] 63 | 64 | for iso_idx, iso_item in enumerate(iso_files): 65 | iso_file = f"evolution_tables/{iso_item}" 66 | iso_path = Path(data_folder) / iso_file 67 | 68 | iso_data = [] 69 | 70 | with open(str(iso_path), encoding="utf-8") as open_file: 71 | for line_idx, line_item in enumerate(open_file): 72 | if line_idx == 0 or " " not in line_item.strip(): 73 | continue 74 | 75 | # age(Gyr) M/Msun log(L/Lsun) Teff(K) log(g) R/Rsun 76 | param = list(filter(None, line_item.strip().split(" "))) 77 | param = list(map(float, param)) 78 | 79 | param[0] = 1e3 * param[0] # (Gyr) -> (Myr) 80 | param[1] = ( 81 | param[1] * constants.M_SUN / constants.M_JUP 82 | ) # (Msun) -> (Mjup) 83 | param[5] = ( 84 | param[5] * constants.R_SUN / constants.R_JUP 85 | ) # (Rsun) -> (Rjup) 86 | 87 | iso_data.append( 88 | [param[0], param[1], param[2], param[3], param[4], param[5]] 89 | ) 90 | 91 | print( 92 | f"\nAdding isochrones: Sonora {labels[iso_idx]}...", end="", flush=True 93 | ) 94 | 95 | iso_data = np.array(iso_data) 96 | 97 | metallicity = labels[iso_idx].split(" ")[2] 98 | 99 | dset = database.create_dataset( 100 | f"isochrones/sonora-bobcat{metallicity}/age", data=iso_data[:, 0] 101 | ) # (Myr) 102 | 103 | database.create_dataset( 104 | f"isochrones/sonora-bobcat{metallicity}/mass", data=iso_data[:, 1] 105 | ) # (Mjup) 106 | 107 | database.create_dataset( 108 | f"isochrones/sonora-bobcat{metallicity}/log_lum", data=iso_data[:, 2] 109 | ) # log(L/Lsun) 110 | 111 | database.create_dataset( 112 | f"isochrones/sonora-bobcat{metallicity}/teff", data=iso_data[:, 3] 113 | ) # (K) 114 | 115 | database.create_dataset( 116 | f"isochrones/sonora-bobcat{metallicity}/log_g", data=iso_data[:, 4] 117 | ) # log(g) 118 | 119 | database.create_dataset( 120 | f"isochrones/sonora-bobcat{metallicity}/radius", data=iso_data[:, 5] 121 | ) # (Rjup) 122 | 123 | dset.attrs["model"] = "sonora-bobcat" 124 | 125 | print(" [DONE]") 126 | print(f"Database tag: sonora{metallicity}") 127 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_manual.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import numpy as np 3 | 4 | from typeguard import typechecked 5 | 6 | from species.core import constants 7 | 8 | 9 | @typechecked 10 | def add_manual( 11 | database: h5py._hl.files.File, tag: str, file_name: str, model_name: str 12 | ) -> None: 13 | """ 14 | Function for adding any of the isochrones from 15 | https://phoenix.ens-lyon.fr/Grids/ or 16 | https://perso.ens-lyon.fr/isabelle.baraffe/ to 17 | the database. 18 | 19 | Parameters 20 | ---------- 21 | database : h5py._hl.files.File 22 | Database. 23 | tag : str 24 | Tag name in the database. 25 | file_name : str 26 | Filename with the isochrones data. 27 | model_name : str 28 | Model name that is stored as attribute of the 29 | isochrone dataset in the HDF5 database. 30 | 31 | Returns 32 | ------- 33 | NoneType 34 | None 35 | """ 36 | 37 | # Read in all the data, ignoring empty lines or lines with "---" 38 | 39 | data = [] 40 | 41 | check_baraffe = False 42 | baraffe_continue = False 43 | 44 | with open(file_name, encoding="utf-8") as open_file: 45 | for line in open_file: 46 | if "BHAC15" in line: 47 | check_baraffe = True 48 | continue 49 | 50 | if not baraffe_continue: 51 | if "(Gyr)" in line: 52 | baraffe_continue = True 53 | else: 54 | continue 55 | 56 | if line[0] == "!": 57 | line = line[1:] 58 | 59 | elif line[:2] == " !": 60 | line = line[2:] 61 | 62 | if "---" in line or line == "\n": 63 | continue 64 | 65 | data.append(list(filter(None, line.rstrip().split(" ")))) 66 | 67 | iso_data = [] 68 | 69 | for line in data: 70 | if "(Gyr)" in line: 71 | age = line[-1] 72 | 73 | elif "lg(g)" in line: 74 | # Isochrones from Phoenix website 75 | header = ["M/Ms", "Teff(K)"] + line[1:] 76 | 77 | elif "M/Ms" in line: 78 | # Isochrones from Baraffe et al. (2015) 79 | header = line.copy() 80 | 81 | else: 82 | line.insert(0, age) 83 | iso_data.append(line) 84 | 85 | header = np.asarray(header, dtype=str) 86 | iso_data = np.asarray(iso_data, dtype=float) 87 | 88 | iso_data[:, 0] *= 1e3 # (Myr) 89 | iso_data[:, 1] *= constants.M_SUN / constants.M_JUP # (Mjup) 90 | 91 | if check_baraffe: 92 | iso_data[:, 5] *= constants.R_SUN / constants.R_JUP # (Rjup) 93 | else: 94 | iso_data[:, 5] *= 1e9 # (cm) 95 | iso_data[:, 5] *= 1e-2 / constants.R_JUP # (cm) -> (Rjup) 96 | 97 | index_sort = np.argsort(iso_data[:, 0]) 98 | iso_data = iso_data[index_sort, :] 99 | 100 | print(f"\nAdding isochrones: {tag}...", end="", flush=True) 101 | 102 | if check_baraffe: 103 | filters = header[6:] 104 | else: 105 | filters = header[7:] 106 | 107 | dtype = h5py.string_dtype(encoding="utf-8", length=None) 108 | 109 | dset = database.create_dataset( 110 | f"isochrones/{tag}/filters", (np.size(filters),), dtype=dtype 111 | ) 112 | 113 | dset[...] = filters 114 | 115 | dset = database.create_dataset( 116 | f"isochrones/{tag}/age", data=iso_data[:, 0] 117 | ) # (Myr) 118 | database.create_dataset(f"isochrones/{tag}/mass", data=iso_data[:, 1]) # (Mjup) 119 | database.create_dataset(f"isochrones/{tag}/teff", data=iso_data[:, 2]) # (K) 120 | database.create_dataset( 121 | f"isochrones/{tag}/log_lum", data=iso_data[:, 3] 122 | ) # log(L/Lsun) 123 | database.create_dataset(f"isochrones/{tag}/log_g", data=iso_data[:, 4]) # log(g) 124 | database.create_dataset(f"isochrones/{tag}/radius", data=iso_data[:, 5]) # (Rjup) 125 | 126 | if check_baraffe: 127 | database.create_dataset(f"isochrones/{tag}/lithium", data=iso_data[:, 6]) 128 | database.create_dataset(f"isochrones/{tag}/magnitudes", data=iso_data[:, 7:]) 129 | 130 | else: 131 | database.create_dataset(f"isochrones/{tag}/deuterium", data=iso_data[:, 6]) 132 | database.create_dataset(f"isochrones/{tag}/lithium", data=iso_data[:, 7]) 133 | database.create_dataset(f"isochrones/{tag}/magnitudes", data=iso_data[:, 8:]) 134 | 135 | dset.attrs["model"] = model_name 136 | 137 | print(" [DONE]") 138 | print(f"Database tag: {tag}") 139 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_saumon2008.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import numpy as np 5 | import pooch 6 | 7 | from typeguard import typechecked 8 | 9 | from species.core import constants 10 | from species.util.data_util import extract_tarfile 11 | 12 | 13 | @typechecked 14 | def add_saumon2008(database: h5py._hl.files.File, input_path: str) -> None: 15 | """ 16 | Function for adding the Saumon & Marley (2008) 17 | isochrone data to the database. 18 | 19 | Parameters 20 | ---------- 21 | database : h5py._hl.files.File 22 | Database. 23 | input_path : str 24 | Folder where the data is located. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | url = "https://home.strw.leidenuniv.nl/~stolker/species/BD_evolution.tgz" 33 | 34 | iso_tag = "Saumon & Marley (2008)" 35 | iso_size = "800 kB" 36 | 37 | data_folder = Path(input_path) / "saumon_marley_2008" 38 | 39 | if not data_folder.exists(): 40 | data_folder.mkdir() 41 | 42 | input_file = url.rsplit("/", maxsplit=1)[-1] 43 | data_file = Path(input_path) / input_file 44 | 45 | if not data_file.exists(): 46 | print() 47 | 48 | pooch.retrieve( 49 | url=url, 50 | known_hash="fb64793b74a4503f13b9b1daa7d04e9594e9ba6f87353a0dbb50b73257961c88", 51 | fname=input_file, 52 | path=input_path, 53 | progressbar=True, 54 | ) 55 | 56 | print(f"\nUnpacking {iso_tag} isochrones ({iso_size})...", end="", flush=True) 57 | extract_tarfile(str(data_file), str(data_folder)) 58 | print(" [DONE]") 59 | 60 | iso_files = [ 61 | "nc_solar_age", 62 | "nc-0.3_age", 63 | "nc+0.3_age", 64 | "f2_solar_age", 65 | "hybrid_solar_age", 66 | ] 67 | 68 | labels = [ 69 | "Cloudless [M/H] = 0.0", 70 | "Cloudless [M/H] = -0.3", 71 | "Cloudless [M/H] = +0.3", 72 | "Cloudy f_sed = 2", 73 | "Hybrid (cloudless / f_sed = 2)", 74 | ] 75 | 76 | db_tags = [ 77 | "saumon2008-nc_solar", 78 | "saumon2008-nc_-0.3", 79 | "saumon2008-nc_+0.3", 80 | "saumon2008-f2_solar", 81 | "saumon2008-hybrid_solar", 82 | ] 83 | 84 | for iso_idx, iso_item in enumerate(iso_files): 85 | iso_path = Path(data_folder) / iso_item 86 | 87 | iso_data = [] 88 | 89 | with open(str(iso_path), encoding="utf-8") as open_file: 90 | for i, line in enumerate(open_file): 91 | if i == 0 or " " not in line.strip(): 92 | continue 93 | 94 | # age(Gyr) M/Msun log(L/Lsun) Teff(K) log(g) R/Rsun 95 | param = list(filter(None, line.strip().split(" "))) 96 | param = list(map(float, param)) 97 | 98 | param[0] = 1e3 * param[0] # (Gyr) -> (Myr) 99 | param[1] = ( 100 | param[1] * constants.M_SUN / constants.M_JUP 101 | ) # (Msun) -> (Mjup) 102 | param[5] = ( 103 | param[5] * constants.R_SUN / constants.R_JUP 104 | ) # (Rsun) -> (Rjup) 105 | 106 | iso_data.append( 107 | [param[0], param[1], param[2], param[3], param[4], param[5]] 108 | ) 109 | 110 | print( 111 | f"\nAdding isochrones: {iso_tag} {labels[iso_idx]}...", end="", flush=True 112 | ) 113 | 114 | iso_data = np.array(iso_data) 115 | 116 | dset = database.create_dataset( 117 | f"isochrones/{db_tags[iso_idx]}/age", data=iso_data[:, 0] 118 | ) # (Myr) 119 | database.create_dataset( 120 | f"isochrones/{db_tags[iso_idx]}/mass", data=iso_data[:, 1] 121 | ) # (Mjup) 122 | database.create_dataset( 123 | f"isochrones/{db_tags[iso_idx]}/log_lum", data=iso_data[:, 2] 124 | ) # log(L/Lsun) 125 | database.create_dataset( 126 | f"isochrones/{db_tags[iso_idx]}/teff", data=iso_data[:, 3] 127 | ) # (K) 128 | database.create_dataset( 129 | f"isochrones/{db_tags[iso_idx]}/log_g", data=iso_data[:, 4] 130 | ) # log(g) 131 | database.create_dataset( 132 | f"isochrones/{db_tags[iso_idx]}/radius", data=iso_data[:, 5] 133 | ) # (Rjup) 134 | 135 | dset.attrs["model"] = "saumon2008" 136 | 137 | print(" [DONE]") 138 | print(f"Database tag: {db_tags[iso_idx]}") 139 | -------------------------------------------------------------------------------- /tests/test_read/test_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import pytest 5 | import numpy as np 6 | 7 | from species import SpeciesInit 8 | from species.data.database import Database 9 | from species.read.read_model import ReadModel 10 | from species.util import test_util 11 | 12 | 13 | class TestModel: 14 | def setup_class(self): 15 | self.limit = 1e-8 16 | self.test_path = os.path.dirname(__file__) + "/" 17 | self.model_param = { 18 | "teff": 2200.0, 19 | "logg": 4.5, 20 | "radius": 1.0, 21 | "parallax": 100.0, 22 | } 23 | 24 | def teardown_class(self): 25 | os.remove("species_database.hdf5") 26 | os.remove("species_config.ini") 27 | shutil.rmtree("data/") 28 | 29 | def test_species_init(self): 30 | test_util.create_config("./") 31 | SpeciesInit() 32 | 33 | def test_read_model(self): 34 | database = Database() 35 | 36 | database.add_model( 37 | "ames-cond", 38 | wavel_range=(1.0, 5.0), 39 | wavel_sampling=200.0, 40 | teff_range=(2000.0, 2500.0), 41 | ) 42 | 43 | read_model = ReadModel("ames-cond") 44 | assert read_model.model == "ames-cond" 45 | 46 | def test_get_model(self): 47 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 48 | 49 | model_box = read_model.get_model( 50 | self.model_param, 51 | spec_res=100.0, 52 | magnitude=False, 53 | ) 54 | 55 | assert np.sum(model_box.wavelength) == pytest.approx( 56 | 152.40318409856752, rel=self.limit, abs=0.0 57 | ) 58 | assert np.sum(model_box.flux) == pytest.approx( 59 | 2.5045445767075435e-12, rel=self.limit, abs=0.0 60 | ) 61 | 62 | model_box = read_model.get_model( 63 | self.model_param, 64 | spec_res=100.0, 65 | magnitude=True, 66 | ) 67 | 68 | assert np.sum(model_box.wavelength) == pytest.approx( 69 | 152.40318409856752, rel=self.limit, abs=0.0 70 | ) 71 | assert np.sum(model_box.flux) == pytest.approx( 72 | 1076.665025699217, rel=self.limit, abs=0.0 73 | ) 74 | 75 | def test_get_data(self): 76 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 77 | model_box = read_model.get_data(self.model_param) 78 | 79 | assert np.sum(model_box.wavelength) == pytest.approx( 80 | 152.40318409856752, rel=self.limit, abs=0.0 81 | ) 82 | assert np.sum(model_box.flux) == pytest.approx( 83 | 2.5044525130092417e-12, rel=self.limit, abs=0.0 84 | ) 85 | 86 | def test_get_flux(self): 87 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 88 | flux = read_model.get_flux(self.model_param) 89 | 90 | assert flux[0] == pytest.approx(3.489600819641628e-14, rel=self.limit, abs=0.0) 91 | 92 | def test_get_magnitude(self): 93 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 94 | magnitude = read_model.get_magnitude(self.model_param) 95 | 96 | assert magnitude[0] == pytest.approx( 97 | 11.291178975241221, rel=self.limit, abs=0.0 98 | ) 99 | assert magnitude[1] == pytest.approx( 100 | 11.291178975241221, rel=self.limit, abs=0.0 101 | ) 102 | 103 | def test_get_bounds(self): 104 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 105 | bounds = read_model.get_bounds() 106 | 107 | assert bounds["teff"] == (2000.0, 2500.0) 108 | assert bounds["logg"] == (2.5, 5.5) 109 | 110 | def test_get_wavelengths(self): 111 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 112 | wavelengths = read_model.get_wavelengths() 113 | 114 | assert np.sum(wavelengths) == pytest.approx( 115 | 813.2224003071026, rel=1e-7, abs=0.0 116 | ) 117 | 118 | def test_get_points(self): 119 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 120 | points = read_model.get_points() 121 | 122 | assert np.sum(points["teff"]) == 13500.0 123 | assert np.sum(points["logg"]) == 28.0 124 | 125 | def test_get_parameters(self): 126 | read_model = ReadModel("ames-cond", filter_name="Paranal/NACO.H") 127 | parameters = read_model.get_parameters() 128 | 129 | assert parameters == ["teff", "logg"] 130 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_atmo.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import numpy as np 5 | import pooch 6 | 7 | from typeguard import typechecked 8 | 9 | from species.core import constants 10 | from species.util.data_util import extract_tarfile 11 | 12 | 13 | @typechecked 14 | def add_atmo(database: h5py._hl.files.File, input_path: str) -> None: 15 | """ 16 | Function for adding the AMES-Cond and AMES-Dusty 17 | isochrone data to the database. 18 | 19 | Parameters 20 | ---------- 21 | database : h5py._hl.files.File 22 | Database. 23 | input_path : str 24 | Folder where the data is located. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | url = ( 33 | "https://home.strw.leidenuniv.nl/~stolker/" 34 | "species/atmo_evolutionary_tracks.tgz" 35 | ) 36 | 37 | iso_tag = "ATMO" 38 | iso_size = "9.6 MB" 39 | 40 | data_folder = Path(input_path) / "atmo_evolutionary_tracks" 41 | 42 | if not data_folder.exists(): 43 | data_folder.mkdir() 44 | 45 | input_file = url.rsplit("/", maxsplit=1)[-1] 46 | data_file = Path(input_path) / input_file 47 | 48 | if not data_file.exists(): 49 | print() 50 | 51 | pooch.retrieve( 52 | url=url, 53 | known_hash="f905698a397b980acdc7a3cd7fa514a39e315c674b21dcd5cbba70833777cd3b", 54 | fname=input_file, 55 | path=input_path, 56 | progressbar=True, 57 | ) 58 | 59 | print(f"\nUnpacking {iso_tag} isochrones ({iso_size})...", end="", flush=True) 60 | extract_tarfile(str(data_file), str(data_folder)) 61 | print(" [DONE]") 62 | 63 | iso_files = [ 64 | "ATMO_CEQ", 65 | "ATMO_NEQ_weak", 66 | "ATMO_NEQ_strong", 67 | ] 68 | 69 | labels = [ 70 | "ATMO equilibrium chemistry", 71 | "ATMO weak non-equilibrium chemistry", 72 | "ATMO strong non-equilibrium chemistry", 73 | ] 74 | 75 | db_tags = [ 76 | "atmo-ceq", 77 | "atmo-neq-weak", 78 | "atmo-neq-strong", 79 | ] 80 | 81 | for iso_idx, iso_item in enumerate(iso_files): 82 | iso_path = Path(data_folder) / iso_item / "MKO_WISE_IRAC" 83 | 84 | # Ignore hidden files 85 | file_list = sorted(iso_path.glob("[!.]*.txt")) 86 | 87 | for file_idx, file_item in enumerate(file_list): 88 | # Mass (Msun) - Age (Gyr) - Teff (K) - log(L/Lsun) - Radius (Rsun) - log(g) 89 | if file_idx == 0: 90 | iso_data = np.loadtxt(str(file_item)) 91 | 92 | else: 93 | iso_load = np.loadtxt(str(file_item)) 94 | iso_data = np.vstack((iso_data, iso_load)) 95 | 96 | with open(str(file_item), encoding="utf-8") as open_file: 97 | parameters = open_file.readline() 98 | filter_names = parameters.split()[7:] 99 | 100 | iso_data[:, 0] *= constants.M_SUN / constants.M_JUP # (Msun) -> (Mjup) 101 | iso_data[:, 1] *= 1e3 # (Gyr) -> (Myr) 102 | iso_data[:, 4] *= constants.R_SUN / constants.R_JUP # (Rsun) -> (Rjup) 103 | 104 | print(f"Adding isochrones: {labels[iso_idx]}...", end="", flush=True) 105 | 106 | dtype = h5py.string_dtype(encoding="utf-8", length=None) 107 | 108 | dset = database.create_dataset( 109 | f"isochrones/{db_tags[iso_idx]}/filters", 110 | (np.size(filter_names),), 111 | dtype=dtype, 112 | ) 113 | 114 | dset[...] = filter_names 115 | 116 | database.create_dataset( 117 | f"isochrones/{db_tags[iso_idx]}/mass", data=iso_data[:, 0] 118 | ) # (Mjup) 119 | dset = database.create_dataset( 120 | f"isochrones/{db_tags[iso_idx]}/age", data=iso_data[:, 1] 121 | ) # (Myr) 122 | database.create_dataset( 123 | f"isochrones/{db_tags[iso_idx]}/teff", data=iso_data[:, 2] 124 | ) # (K) 125 | database.create_dataset( 126 | f"isochrones/{db_tags[iso_idx]}/log_lum", data=iso_data[:, 3] 127 | ) # log(L/Lsun) 128 | database.create_dataset( 129 | f"isochrones/{db_tags[iso_idx]}/radius", data=iso_data[:, 4] 130 | ) # (Rjup) 131 | database.create_dataset( 132 | f"isochrones/{db_tags[iso_idx]}/log_g", data=iso_data[:, 5] 133 | ) # log(g) 134 | 135 | database.create_dataset( 136 | f"isochrones/{db_tags[iso_idx]}/magnitudes", data=iso_data[:, 6:] 137 | ) 138 | 139 | dset.attrs["model"] = "atmo" 140 | 141 | print(" [DONE]") 142 | print(f"Database tag: {db_tags[iso_idx]}") 143 | -------------------------------------------------------------------------------- /species/data/misc_data/accretion_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for the accretion luminosity relation. 3 | """ 4 | 5 | import os 6 | 7 | import h5py 8 | import numpy as np 9 | import pooch 10 | 11 | from typeguard import typechecked 12 | 13 | from species.core import constants 14 | 15 | 16 | @typechecked 17 | def add_accretion_relation(input_path: str, database: h5py._hl.files.File) -> None: 18 | """ 19 | Function for adding the accretion relation from `Aoyama et al. 20 | (2021) `_ and extrapolation from 22 | `Marleau & Aoyama (2022) `_ to the database. It provides 24 | coefficients to convert line to accretion luminosities. 25 | 26 | Parameters 27 | ---------- 28 | input_path : str 29 | Folder where the data is located. 30 | database : h5py._hl.files.File 31 | Database. 32 | 33 | Returns 34 | ------- 35 | None 36 | NoneType 37 | """ 38 | 39 | if not os.path.exists(input_path): 40 | os.makedirs(input_path) 41 | 42 | url = ( 43 | "https://home.strw.leidenuniv.nl/~stolker/" 44 | "species/ab-Koeffienzenten_mehrStellen.dat" 45 | ) 46 | 47 | data_file = os.path.join(input_path, "ab-Koeffienzenten_mehrStellen.dat") 48 | 49 | if not os.path.isfile(data_file): 50 | pooch.retrieve( 51 | url=url, 52 | known_hash="941b416e678128648c9ce485016af908f16bfe16ab72e0f8cb57a6bad963429a", 53 | fname="ab-Koeffienzenten_mehrStellen.dat", 54 | path=input_path, 55 | progressbar=True, 56 | ) 57 | 58 | print("Adding coefficients for accretion relation (2.1 kB)...", end="", flush=True) 59 | 60 | data = np.genfromtxt( 61 | data_file, 62 | dtype=None, 63 | skip_header=1, 64 | encoding=None, 65 | names=True, 66 | usecols=[0, 1, 2, 3, 4], 67 | ) 68 | 69 | line_names = data["name"] 70 | coefficients = np.column_stack([data["a"], data["b"]]) 71 | 72 | n_init = data["ni"] 73 | n_final = data["nf"] 74 | 75 | delta_n_min = {"H": 9, "Pa": 8, "Br": 6} 76 | delta_n_max = {"H": 14, "Pa": 13, "Br": 12} 77 | 78 | for i, item in enumerate(["H", "Pa", "Br"]): 79 | for j in range(delta_n_min[item], delta_n_max[item] + 1): 80 | # n_f: Ly=1, H=Ba=2, Pa=3, Br=4 81 | n_f_tmp = i + 2 82 | 83 | idx_insert = np.argwhere(line_names == f"{item}{j+n_f_tmp-1}")[0][0] + 1 84 | line_names = np.insert(line_names, idx_insert, f"{item}{j+n_f_tmp}") 85 | n_final = np.insert(n_final, idx_insert, n_f_tmp) 86 | 87 | # delta_n = n_i - n_f 88 | n_i_tmp = j + n_f_tmp 89 | n_init = np.insert(n_init, idx_insert, n_i_tmp) 90 | 91 | # Relation for extrapolation of coefficients 92 | # See Marleau & Aoyama (2022) 93 | 94 | a_coeff = 0.811 - (1.0 / (9.90 * n_f_tmp - 9.5 * n_i_tmp)) 95 | 96 | b_coeff = ( 97 | 1.0 98 | + 1.05 * np.log(n_i_tmp) 99 | + (1.0 / (n_i_tmp - n_f_tmp)) 100 | - (1.0 / n_f_tmp) 101 | ) * (1.07 + 0.0694 * n_f_tmp) - 1.41 102 | 103 | coefficients = np.insert( 104 | coefficients, idx_insert, [[a_coeff, b_coeff]], axis=0 105 | ) 106 | 107 | # Rest vacuum wavelength (um) from Rydberg formula, 108 | # which is valid usually to three or four decimal places. 109 | # Exact values, if needed, can be obtained from e.g. 110 | # Wiese & Fuhr (2009): 111 | # http://adsabs.harvard.edu/abs/2009JPCRD..38..565W 112 | # https://www.nist.gov/system/files/documents/srd/jpcrd382009565p.pdf 113 | wavelengths = 1e6 / (constants.RYDBERG * (1.0 / n_final**2 - 1.0 / n_init**2)) 114 | 115 | # data = np.column_stack( 116 | # [ 117 | # line_names, 118 | # wavelengths, 119 | # n_init, 120 | # n_final, 121 | # coefficients[:, 0], 122 | # coefficients[:, 1], 123 | # ] 124 | # ) 125 | # np.savetxt("acc_lines.dat", data, delimiter=" ", fmt="%s") 126 | 127 | database.create_dataset("accretion/wavelengths", data=wavelengths) 128 | database.create_dataset("accretion/coefficients", data=coefficients) 129 | 130 | dtype = h5py.special_dtype(vlen=str) 131 | dset = database.create_dataset( 132 | "accretion/hydrogen_lines", (np.size(line_names),), dtype=dtype 133 | ) 134 | dset[...] = line_names 135 | 136 | print(" [DONE]") 137 | 138 | print( 139 | "Please cite Aoyama et al. (2021) and Marleau & Aoyama " 140 | "(2022) when using the accretion relation in a publication" 141 | ) 142 | -------------------------------------------------------------------------------- /species/data/spec_data/spec_bonnefoy2014.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for adding young, M- and L-type dwarf spectra from 3 | `Bonnefoy et al. (2014) `_ to the database. 5 | """ 6 | 7 | import gzip 8 | 9 | from pathlib import Path 10 | 11 | import h5py 12 | import numpy as np 13 | import pooch 14 | 15 | from astropy.io import fits 16 | from typeguard import typechecked 17 | 18 | from species.util.data_util import extract_tarfile, remove_directory 19 | 20 | 21 | @typechecked 22 | def add_bonnefoy2014(input_path: str, database: h5py._hl.files.File) -> None: 23 | """ 24 | Function for adding the SINFONI spectra of young, M- and L-type 25 | dwarfs from `Bonnefoy et al. (2014) `_ to the database. 27 | 28 | Parameters 29 | ---------- 30 | input_path : str 31 | Path of the data folder. 32 | database : h5py._hl.files.File 33 | The HDF5 database. 34 | 35 | Returns 36 | ------- 37 | NoneType 38 | None 39 | """ 40 | 41 | print_text = "spectra of young M/L type objects from Bonnefoy et al. 2014" 42 | 43 | url = "http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J/A+A/562/A127/" 44 | input_file = "J_A+A_562_A127.tar.gz" 45 | data_file = Path(input_path) / input_file 46 | data_folder = Path(input_path) / "bonnefoy+2014/" 47 | 48 | if not data_file.exists(): 49 | print() 50 | 51 | pooch.retrieve( 52 | url=url, 53 | known_hash=None, 54 | fname=input_file, 55 | path=input_path, 56 | progressbar=True, 57 | ) 58 | 59 | if data_folder.exists(): 60 | remove_directory(data_folder) 61 | 62 | print(f"\nUnpacking {print_text} (2.3 MB)...", end="", flush=True) 63 | extract_tarfile(str(data_file), str(data_folder)) 64 | print(" [DONE]") 65 | 66 | spec_dict = {} 67 | 68 | data_file = Path(data_folder) / "stars.dat.gz" 69 | 70 | with gzip.open(data_file, "r") as gzip_file: 71 | for line in gzip_file: 72 | name = line[:13].decode().strip() 73 | files = line[80:].decode().strip().split() 74 | sptype = line[49:56].decode().strip() 75 | 76 | if name == "NAME 2M1207A": 77 | name = "2M1207A" 78 | 79 | if len(sptype) == 0: 80 | sptype = None 81 | elif "." in sptype: 82 | sptype = sptype[:4] 83 | else: 84 | sptype = sptype[:2] 85 | 86 | if name == "Cha1109": 87 | sptype = "M9" 88 | elif name == "DH Tau B": 89 | sptype = "M9" 90 | elif name == "TWA 22A": 91 | sptype = "M6" 92 | elif name == "TWA 22B": 93 | sptype = "M6" 94 | elif name == "CT Cha b": 95 | sptype = "M9" 96 | 97 | spec_dict[name] = {"name": name, "sptype": sptype, "files": files} 98 | 99 | fits_folder = Path(data_folder) / "sp" 100 | 101 | print_message = "" 102 | print() 103 | 104 | spec_files = sorted(fits_folder.glob("*")) 105 | 106 | for file_item in spec_files: 107 | fname_split = file_item.stem.split("_") 108 | 109 | data = fits.getdata(file_item) 110 | 111 | for spec_key, spec_value in spec_dict.items(): 112 | if file_item.name in spec_value["files"]: 113 | if spec_key == "TWA 22AB": 114 | # Binary spectrum 115 | continue 116 | 117 | if "JHK" in fname_split: 118 | spec_value["JHK"] = data 119 | 120 | elif "J" in fname_split: 121 | spec_value["J"] = data 122 | 123 | elif "H+K" in fname_split or "HK" in fname_split: 124 | spec_value["HK"] = data 125 | 126 | for spec_key, spec_value in spec_dict.items(): 127 | empty_message = len(print_message) * " " 128 | print(f"\r{empty_message}", end="") 129 | 130 | print_message = f"Adding spectra... {spec_key}" 131 | print(f"\r{print_message}", end="") 132 | 133 | if "JHK" in spec_value: 134 | sp_data = spec_value["JHK"] 135 | 136 | elif "J" in spec_value and "HK" in spec_value: 137 | sp_data = np.vstack((spec_value["J"], spec_value["HK"])) 138 | 139 | else: 140 | # Binary spectrum 141 | continue 142 | 143 | dset = database.create_dataset( 144 | f"spectra/bonnefoy+2014/{spec_key}", data=sp_data 145 | ) 146 | 147 | dset.attrs["name"] = str(spec_key).encode() 148 | dset.attrs["sptype"] = str(spec_value["sptype"]).encode() 149 | 150 | empty_message = len(print_message) * " " 151 | print(f"\r{empty_message}", end="") 152 | 153 | print_message = "Adding spectra... [DONE]" 154 | print(f"\r{print_message}") 155 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_chabrier2023.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import h5py 4 | import numpy as np 5 | import pooch 6 | 7 | from typeguard import typechecked 8 | 9 | from species.core import constants 10 | from species.util.data_util import extract_tarfile 11 | 12 | 13 | @typechecked 14 | def add_chabrier2023(database: h5py._hl.files.File, input_path: str) -> None: 15 | """ 16 | Function for adding the AMES-Cond and AMES-Dusty 17 | isochrone data to the database. 18 | 19 | Parameters 20 | ---------- 21 | database : h5py._hl.files.File 22 | Database. 23 | input_path : str 24 | Folder where the data is located. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | url = "https://home.strw.leidenuniv.nl/~stolker/species/chabrier2023_tracks.tgz" 33 | 34 | iso_tag = "ATMO (Chabrier et al. 2023" 35 | iso_size = "12 MB" 36 | 37 | data_folder = Path(input_path) / "chabrier2023_tracks" 38 | 39 | if not data_folder.exists(): 40 | data_folder.mkdir() 41 | 42 | input_file = url.rsplit("/", maxsplit=1)[-1] 43 | data_file = Path(input_path) / input_file 44 | 45 | if not data_file.exists(): 46 | print() 47 | 48 | pooch.retrieve( 49 | url=url, 50 | known_hash="2f268839107d7084f2512152e4da4be9fb1220793619899806c1f3fc4b3e4034", 51 | fname=input_file, 52 | path=input_path, 53 | progressbar=True, 54 | ) 55 | 56 | print(f"\nUnpacking {iso_tag} isochrones ({iso_size})...", end="", flush=True) 57 | extract_tarfile(str(data_file), str(data_folder)) 58 | print(" [DONE]") 59 | 60 | iso_files = [ 61 | "ATMO_CEQ", 62 | "ATMO_NEQ_weak", 63 | "ATMO_NEQ_strong", 64 | ] 65 | 66 | labels = [ 67 | "ATMO equilibrium chemistry (Chabrier et al. 2023)", 68 | "ATMO weak non-equilibrium chemistry (Chabrier et al. 2023)", 69 | "ATMO strong non-equilibrium chemistry (Chabrier et al. 2023)", 70 | ] 71 | 72 | db_tags = [ 73 | "atmo-ceq-chabrier2023", 74 | "atmo-neq-weak-chabrier2023", 75 | "atmo-neq-strong-chabrier2023", 76 | ] 77 | 78 | for iso_idx, iso_item in enumerate(iso_files): 79 | tar_file = str(data_folder / iso_item) + "_neweos.tar.gz" 80 | print(f"\nUnpacking {iso_item} isochrones...", end="", flush=True) 81 | extract_tarfile(tar_file, str(data_folder)) 82 | print(" [DONE]") 83 | 84 | iso_path = Path(data_folder) / iso_item / "MKO_WISE_IRAC_vega" 85 | 86 | # Ignore hidden files 87 | file_list = sorted(iso_path.glob("[!.]*.txt")) 88 | 89 | for file_idx, file_item in enumerate(file_list): 90 | # Mass (Msun) - Age (Gyr) - Teff (K) - log(L/Lsun) - Radius (Rsun) - log(g) 91 | if file_idx == 0: 92 | iso_data = np.loadtxt(str(file_item)) 93 | 94 | else: 95 | iso_load = np.loadtxt(str(file_item)) 96 | iso_data = np.vstack((iso_data, iso_load)) 97 | 98 | with open(str(file_item), encoding="utf-8") as open_file: 99 | parameters = open_file.readline() 100 | filter_names = parameters.split()[7:] 101 | 102 | iso_data[:, 0] *= constants.M_SUN / constants.M_JUP # (Msun) -> (Mjup) 103 | iso_data[:, 1] *= 1e3 # (Gyr) -> (Myr) 104 | iso_data[:, 4] *= constants.R_SUN / constants.R_JUP # (Rsun) -> (Rjup) 105 | 106 | print(f"Adding isochrones: {labels[iso_idx]}...", end="", flush=True) 107 | 108 | dtype = h5py.string_dtype(encoding="utf-8", length=None) 109 | 110 | dset = database.create_dataset( 111 | f"isochrones/{db_tags[iso_idx]}/filters", 112 | (np.size(filter_names),), 113 | dtype=dtype, 114 | ) 115 | 116 | dset[...] = filter_names 117 | 118 | database.create_dataset( 119 | f"isochrones/{db_tags[iso_idx]}/mass", data=iso_data[:, 0] 120 | ) # (Mjup) 121 | dset = database.create_dataset( 122 | f"isochrones/{db_tags[iso_idx]}/age", data=iso_data[:, 1] 123 | ) # (Myr) 124 | database.create_dataset( 125 | f"isochrones/{db_tags[iso_idx]}/teff", data=iso_data[:, 2] 126 | ) # (K) 127 | database.create_dataset( 128 | f"isochrones/{db_tags[iso_idx]}/log_lum", data=iso_data[:, 3] 129 | ) # log(L/Lsun) 130 | database.create_dataset( 131 | f"isochrones/{db_tags[iso_idx]}/radius", data=iso_data[:, 4] 132 | ) # (Rjup) 133 | database.create_dataset( 134 | f"isochrones/{db_tags[iso_idx]}/log_g", data=iso_data[:, 5] 135 | ) # log(g) 136 | 137 | database.create_dataset( 138 | f"isochrones/{db_tags[iso_idx]}/magnitudes", data=iso_data[:, 6:] 139 | ) 140 | 141 | dset.attrs["model"] = "atmo" 142 | 143 | print(" [DONE]") 144 | print(f"Database tag: {db_tags[iso_idx]}") 145 | -------------------------------------------------------------------------------- /tests/test_phot/test_synphot.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import urllib.request 4 | 5 | import pytest 6 | import numpy as np 7 | 8 | from species import SpeciesInit 9 | from species.phot.syn_phot import SyntheticPhotometry 10 | from species.util import test_util 11 | 12 | 13 | class TestPhotometry: 14 | def setup_class(self): 15 | self.limit = 1e-8 16 | 17 | url = "http://irtfweb.ifa.hawaii.edu/~spex/IRTF_Spectral_Library/Data/plnt_Jupiter.txt" 18 | urllib.request.urlretrieve(url, "plnt_Jupiter.txt") 19 | 20 | def teardown_class(self): 21 | os.remove("species_database.hdf5") 22 | os.remove("species_config.ini") 23 | os.remove("plnt_Jupiter.txt") 24 | shutil.rmtree("data/") 25 | 26 | def test_species_init(self): 27 | test_util.create_config("./") 28 | SpeciesInit() 29 | 30 | def test_synthetic_photometry(self): 31 | SyntheticPhotometry("MKO/NSFCam.J") 32 | 33 | def test_magnitude_to_flux(self): 34 | synphot = SyntheticPhotometry("MKO/NSFCam.J") 35 | flux, error = synphot.magnitude_to_flux(20.0, error=0.5) 36 | 37 | assert flux == pytest.approx(3.066477264611711e-17, rel=self.limit, abs=0.0) 38 | assert error == pytest.approx(1.462611294865793e-17, rel=self.limit, abs=0.0) 39 | 40 | def test_flux_to_magnitude(self): 41 | synphot = SyntheticPhotometry("MKO/NSFCam.J") 42 | app_mag, abs_mag = synphot.flux_to_magnitude( 43 | 1e-10, error=None, distance=(50.0, None) 44 | ) 45 | 46 | assert app_mag[0] == pytest.approx(3.7165993727542115, rel=self.limit, abs=0.0) 47 | assert abs_mag[0] == pytest.approx(0.2217493510741182, rel=self.limit, abs=0.0) 48 | 49 | def test_spectrum_to_flux(self): 50 | jup_wavel, jup_flux, jup_err = np.loadtxt("plnt_Jupiter.txt", unpack=True) 51 | 52 | synphot = SyntheticPhotometry("MKO/NSFCam.J") 53 | 54 | phot_flux, phot_error = synphot.spectrum_to_flux( 55 | jup_wavel, jup_flux, error=jup_err, threshold=None 56 | ) 57 | 58 | assert phot_flux == pytest.approx( 59 | 1.802998152236653e-09, rel=self.limit, abs=0.0 60 | ) 61 | 62 | # The error is estimated with Monte Carlo sampling 63 | assert phot_error == pytest.approx(8.8e-14, rel=0.0, abs=2e-14) 64 | 65 | def test_spectrum_to_flux_no_error(self): 66 | jup_wavel, jup_flux, _ = np.loadtxt("plnt_Jupiter.txt", unpack=True) 67 | 68 | synphot = SyntheticPhotometry("MKO/NSFCam.J") 69 | 70 | phot_flux, phot_error = synphot.spectrum_to_flux( 71 | jup_wavel, jup_flux, error=None, threshold=None 72 | ) 73 | 74 | assert phot_flux == pytest.approx( 75 | 1.802998152236653e-09, rel=self.limit, abs=0.0 76 | ) 77 | assert phot_error is None 78 | 79 | def test_spectrum_to_flux_threshold(self): 80 | jup_wavel, jup_flux, _ = np.loadtxt("plnt_Jupiter.txt", unpack=True) 81 | 82 | synphot = SyntheticPhotometry("MKO/NSFCam.J") 83 | 84 | phot_flux, phot_error = synphot.spectrum_to_flux( 85 | jup_wavel, jup_flux, error=None, threshold=0.05 86 | ) 87 | 88 | assert phot_flux == pytest.approx( 89 | 1.802998152236653e-09, rel=self.limit, abs=0.0 90 | ) 91 | assert phot_error is None 92 | 93 | def test_spectrum_to_flux_photon_detector(self): 94 | jup_wavel, jup_flux, jup_err = np.loadtxt("plnt_Jupiter.txt", unpack=True) 95 | 96 | synphot = SyntheticPhotometry("Keck/NIRC2.J") 97 | 98 | phot_flux, phot_error = synphot.spectrum_to_flux( 99 | jup_wavel, jup_flux, error=jup_err, threshold=None 100 | ) 101 | 102 | assert phot_flux == pytest.approx( 103 | 1.8139883721554032e-09, rel=self.limit, abs=0.0 104 | ) 105 | 106 | # The error is estimated with Monte Carlo sampling 107 | assert phot_error == pytest.approx(8.4e-14, rel=0.0, abs=2e-14) 108 | 109 | def test_spectrum_to_magnitude(self): 110 | jup_wavel, jup_flux, jup_err = np.loadtxt("plnt_Jupiter.txt", unpack=True) 111 | 112 | synphot = SyntheticPhotometry("MKO/NSFCam.J") 113 | 114 | app_mag, abs_mag = synphot.spectrum_to_magnitude( 115 | jup_wavel, jup_flux, error=jup_err, distance=(1.0, 0.01), threshold=None 116 | ) 117 | 118 | assert app_mag[0] == pytest.approx(0.576611165817664, rel=self.limit, abs=0.0) 119 | assert abs_mag[0] == pytest.approx(5.576611165817664, rel=self.limit, abs=0.0) 120 | 121 | # The error is estimated with Monte Carlo sampling 122 | assert app_mag[1] == pytest.approx(5.368048545366946e-05, rel=0.0, abs=2e-5) 123 | assert abs_mag[1] == pytest.approx(0.021714790446227043, rel=0.0, abs=1e-2) 124 | 125 | def test_zero_point(self): 126 | with pytest.warns(UserWarning) as warning: 127 | synphot = SyntheticPhotometry("MKO/NSFCam.J", zero_point=1e-2) 128 | 129 | flux, error = synphot.magnitude_to_flux(20.0, error=0.5) 130 | 131 | assert flux == pytest.approx(1.0280162981264745e-10, rel=self.limit, abs=0.0) 132 | assert error == pytest.approx(4.903307995457426e-11, rel=self.limit, abs=0.0) 133 | -------------------------------------------------------------------------------- /species/util/spec_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for manipulating spectra. 3 | """ 4 | 5 | import warnings 6 | 7 | from math import ceil 8 | from typing import Tuple, Union 9 | 10 | import numpy as np 11 | 12 | from scipy.ndimage import gaussian_filter 13 | from typeguard import typechecked 14 | 15 | 16 | @typechecked 17 | def create_wavelengths( 18 | wavel_range: Tuple[Union[float, np.float32], Union[float, np.float32]], 19 | wavel_sampling: float, 20 | ) -> np.ndarray: 21 | """ 22 | Function for creating logarithmically-spaced wavelengths, 23 | so with a constant :math:`\\lambda/\\Delta\\lambda`. 24 | 25 | Parameters 26 | ---------- 27 | wavel_range : tuple(float, float) 28 | Wavelength range (:math:`\\mu\\mathrm{m}`). Tuple with the 29 | minimum and maximum wavelength. 30 | wavel_sampling : float 31 | Wavelength sampling :math:`\\lambda/\\Delta\\lambda`. 32 | 33 | Returns 34 | ------- 35 | np.ndarray 36 | Array with the wavelengths (:math:`\\mu\\mathrm{m}`). Since 37 | the wavelength boundaries are fixed, the output sampling 38 | is slightly different from the value provided as 39 | argument of ``wavel_sampling``. 40 | """ 41 | 42 | n_test = 100 43 | 44 | wavel_test = np.logspace(np.log10(wavel_range[0]), np.log10(wavel_range[1]), n_test) 45 | sampling_test = 0.5 * (wavel_test[1:] + wavel_test[:-1]) / np.diff(wavel_test) 46 | 47 | # math.ceil returns int, but np.ceil returns float 48 | wavel_array = np.logspace( 49 | np.log10(wavel_range[0]), 50 | np.log10(wavel_range[1]), 51 | ceil(n_test * wavel_sampling / np.mean(sampling_test)) + 1, 52 | ) 53 | 54 | # Check wavelength sampling, lambda/D_lambda, of the created array 55 | # res_out = np.mean(0.5*(wavel_array[1:]+wavel_array[:-1])/np.diff(wavel_array)) 56 | 57 | return wavel_array 58 | 59 | 60 | @typechecked 61 | def smooth_spectrum( 62 | wavelength: np.ndarray, 63 | flux: np.ndarray, 64 | spec_res: float, 65 | kernel_size: int = 11, 66 | force_smooth: bool = False, 67 | ) -> np.ndarray: 68 | """ 69 | Function for smoothing a spectrum with a Gaussian kernel to a 70 | fixed spectral resolution. The kernel size is set to 5 times the 71 | FWHM of the Gaussian. The FWHM of the Gaussian is equal to the 72 | ratio of the wavelength and the spectral resolution. If the 73 | kernel does not fit within the available wavelength grid (i.e. 74 | at the edge of the array) then the flux values are set to NaN. 75 | 76 | Parameters 77 | ---------- 78 | wavelength : np.ndarray 79 | Wavelength points (um). Should be sampled with constant 80 | logarithmic steps (i.e. fixed :math:`\\lambda/\\Delta\\lambda`) 81 | or sampled with a uniform linear spacing. The latter 82 | implementation is slow so the first is preferred. 83 | flux : np.ndarray 84 | Flux (W m-2 um-1). 85 | spec_res : float 86 | Spectral resolution. 87 | kernel_size : int 88 | Kernel size (odd integer). Only used when the wavelengths 89 | are linearly sampled. Not used by the function. 90 | force_smooth : bool 91 | Force the smoothing for logarithmically spaced wavelengths. 92 | 93 | Returns 94 | ------- 95 | np.ndarray 96 | Smoothed spectrum (W m-2 um-1). 97 | """ 98 | 99 | def _gaussian(kernel_size, sigma): 100 | pos = range(-(kernel_size - 1) // 2, (kernel_size - 1) // 2 + 1) 101 | kernel = [ 102 | np.exp(-float(x) ** 2 / (2.0 * sigma**2)) / (sigma * np.sqrt(2.0 * np.pi)) 103 | for x in pos 104 | ] 105 | 106 | return np.asarray(kernel / sum(kernel)) 107 | 108 | spacing = np.mean(2.0 * np.diff(wavelength) / (wavelength[1:] + wavelength[:-1])) 109 | spacing_std = np.std(2.0 * np.diff(wavelength) / (wavelength[1:] + wavelength[:-1])) 110 | 111 | if spacing_std / spacing < 1e-2 or force_smooth: 112 | # delta_lambda of resolution element is 113 | # FWHM of the LSF's standard deviation 114 | sigma_lsf = 1.0 / spec_res / (2.0 * np.sqrt(2.0 * np.log(2.0))) 115 | 116 | # Calculate the sigma to be used with the Gaussian filter 117 | # in units of the input wavelength bins 118 | sigma_filter = sigma_lsf / spacing 119 | 120 | flux_smooth = gaussian_filter(flux, sigma=sigma_filter, mode="nearest") 121 | 122 | else: 123 | flux_smooth = np.zeros(flux.shape) # (W m-2 um-1) 124 | 125 | spacing = np.mean(np.diff(wavelength)) # (um) 126 | spacing_std = np.std(np.diff(wavelength)) # (um) 127 | 128 | if spacing_std / spacing > 1e-2: 129 | warnings.warn( 130 | "The wavelength spacing is not uniform " 131 | f"(lambda/d_lambda = {spacing} +/- {spacing_std}). " 132 | "The smoothing with the Gaussian kernel requires " 133 | "either the spectral resolution or the wavelength " 134 | "spacing to be uniformly sampled. This warning " 135 | "should not have occurred with any of the model " 136 | "grids provided by species. Please open an issue " 137 | "on the Github page if help is needed." 138 | ) 139 | 140 | for i, item in enumerate(wavelength): 141 | fwhm = item / spec_res # (um) 142 | sigma = fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0))) # (um) 143 | 144 | # Kernel size 5 times the width of the LSF 145 | kernel_size = int(5.0 * sigma / spacing) 146 | 147 | if kernel_size % 2 == 0: 148 | kernel_size += 1 149 | 150 | gaussian = _gaussian(kernel_size, sigma / spacing) 151 | 152 | try: 153 | flux_smooth[i] = np.sum( 154 | gaussian 155 | * flux[i - (kernel_size - 1) // 2 : i + (kernel_size - 1) // 2 + 1] 156 | ) 157 | 158 | except ValueError: 159 | flux_smooth[i] = np.nan 160 | 161 | return flux_smooth 162 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_sonora_diamondback.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from zipfile import ZipFile 3 | 4 | import h5py 5 | import numpy as np 6 | import pooch 7 | 8 | from typeguard import typechecked 9 | 10 | from species.core import constants 11 | 12 | 13 | @typechecked 14 | def add_sonora_diamondback(database: h5py._hl.files.File, input_path: str) -> None: 15 | """ 16 | Function for adding the isochrone data of `Sonora Diamondback 17 | `_ to the database. 18 | 19 | Parameters 20 | ---------- 21 | database : h5py._hl.files.File 22 | Database. 23 | input_path : str 24 | Folder where the data is located. 25 | 26 | Returns 27 | ------- 28 | NoneType 29 | None 30 | """ 31 | 32 | url = "https://zenodo.org/records/12735103/files/evolution.zip" 33 | 34 | input_file = "evolution.zip" 35 | data_folder = Path(input_path) / "sonora-diamondback-evolution" 36 | data_file = Path(data_folder) / input_file 37 | 38 | if not data_folder.exists(): 39 | data_folder.mkdir() 40 | 41 | if not data_file.exists(): 42 | print() 43 | 44 | pooch.retrieve( 45 | url=url, 46 | known_hash="1efb4e5297060fa7b0329dec363e0bfff4f6132d2d11b655281cabea091d78ee", 47 | fname=input_file, 48 | path=data_folder, 49 | progressbar=True, 50 | ) 51 | 52 | print("\nUnpacking Sonora Diamondback evolution (830 kB)...", end="", flush=True) 53 | with ZipFile(str(data_file)) as zip_object: 54 | zip_object.extractall(path=str(data_folder)) 55 | print(" [DONE]") 56 | 57 | url = "https://zenodo.org/records/12735103/files/photometry.zip" 58 | 59 | input_file = "photometry.zip" 60 | data_file = Path(data_folder) / input_file 61 | 62 | if not data_file.exists(): 63 | print() 64 | 65 | pooch.retrieve( 66 | url=url, 67 | known_hash="7210f358c6da317d60a19dffb5b18a398e20a565aebaaa297e521ee9076bbc9c", 68 | fname=input_file, 69 | path=data_folder, 70 | progressbar=True, 71 | ) 72 | 73 | print("\nUnpacking Sonora Diamondback photometry (239 kB)...", end="", flush=True) 74 | with ZipFile(str(data_file)) as zip_object: 75 | zip_object.extractall(path=str(data_folder)) 76 | print(" [DONE]") 77 | 78 | iso_files = [ 79 | "nc_m-0.5_age", 80 | "nc_m0.0_age", 81 | "nc_m+0.5_age", 82 | "hybrid_f2_m-0.5_age", 83 | "hybrid_f2_m0.0_age", 84 | "hybrid_f2_m+0.5_age", 85 | "hybrid-grav_f2_m-0.5_age", 86 | "hybrid-grav_f2_m0.0_age", 87 | "hybrid-grav_f2_m+0.5_age", 88 | ] 89 | 90 | labels = [ 91 | "cloud-free, [M/H] = -0.5", 92 | "cloud-free, [M/H] = +0.0", 93 | "cloud-free, [M/H] = +0.5", 94 | "hybrid, fsed = 2, [M/H] = -0.5", 95 | "hybrid, fsed = 2, [M/H] = +0.0", 96 | "hybrid, fsed = 2, [M/H] = +0.5", 97 | "hybrid-grav, fsed = 2, [M/H] = -0.5", 98 | "hybrid-grav, fsed = 2, [M/H] = +0.0", 99 | "hybrid-grav, fsed = 2, [M/H] = +0.5", 100 | ] 101 | 102 | iso_tags = [ 103 | "nc-0.5", 104 | "nc+0.0", 105 | "nc+0.5", 106 | "hybrid-0.5", 107 | "hybrid+0.0", 108 | "hybrid+0.5", 109 | "hybrid-grav-0.5", 110 | "hybrid-grav+0.0", 111 | "hybrid-grav+0.5", 112 | ] 113 | 114 | for iso_idx, iso_item in enumerate(iso_files): 115 | iso_file = f"evolution/{iso_item}" 116 | iso_path = Path(data_folder) / iso_file 117 | 118 | iso_data = [] 119 | 120 | with open(str(iso_path), encoding="utf-8") as open_file: 121 | for line_idx, line_item in enumerate(open_file): 122 | if line_idx == 0 or "Gyr" in line_item: 123 | continue 124 | 125 | # age(Gyr) M/Msun log(L/Lsun) Teff(K) log(g) R/Rjup 126 | # The data files have R/Rsun, but should be R/Rjup 127 | param = list(filter(None, line_item.strip().split(" "))) 128 | param = list(map(float, param)) 129 | 130 | param[0] = 1e3 * param[0] # (Gyr) -> (Myr) 131 | 132 | param[1] = ( 133 | param[1] * constants.M_SUN / constants.M_JUP 134 | ) # (Msun) -> (Mjup) 135 | 136 | iso_data.append( 137 | [param[0], param[1], param[2], param[3], param[4], param[5]] 138 | ) 139 | 140 | print( 141 | f"\nAdding isochrones: Sonora {labels[iso_idx]}...", end="", flush=True 142 | ) 143 | 144 | iso_data = np.array(iso_data) 145 | 146 | dset = database.create_dataset( 147 | f"isochrones/sonora-diamondback-{iso_tags[iso_idx]}/age", 148 | data=iso_data[:, 0], 149 | ) # (Myr) 150 | 151 | database.create_dataset( 152 | f"isochrones/sonora-diamondback-{iso_tags[iso_idx]}/mass", 153 | data=iso_data[:, 1], 154 | ) # (Mjup) 155 | 156 | database.create_dataset( 157 | f"isochrones/sonora-diamondback-{iso_tags[iso_idx]}/log_lum", 158 | data=iso_data[:, 2], 159 | ) # log(L/Lsun) 160 | 161 | database.create_dataset( 162 | f"isochrones/sonora-diamondback-{iso_tags[iso_idx]}/teff", 163 | data=iso_data[:, 3], 164 | ) # (K) 165 | 166 | database.create_dataset( 167 | f"isochrones/sonora-diamondback-{iso_tags[iso_idx]}/log_g", 168 | data=iso_data[:, 4], 169 | ) # log(g) 170 | 171 | database.create_dataset( 172 | f"isochrones/sonora-diamondback-{iso_tags[iso_idx]}/radius", 173 | data=iso_data[:, 5], 174 | ) # (Rjup) 175 | 176 | dset.attrs["model"] = "sonora-diamondback" 177 | 178 | print(" [DONE]") 179 | print(f"Database tag: sonora-diamondback-{iso_tags[iso_idx]}") 180 | -------------------------------------------------------------------------------- /species/data/spec_data/spec_allers2013.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for adding young, M- and L-type dwarf spectra from 3 | `Allers & Liu (2013) `_ to the . These spectra are 5 | also available in the `SpeX Prism Library Analysis Toolkit 6 | `_. 7 | """ 8 | 9 | from pathlib import Path 10 | 11 | import h5py 12 | import numpy as np 13 | import pandas as pd 14 | import pooch 15 | 16 | from astropy.io import fits 17 | from astroquery.simbad import Simbad 18 | from typeguard import typechecked 19 | 20 | from species.util.data_util import extract_tarfile, remove_directory 21 | from species.util.query_util import get_simbad 22 | 23 | 24 | @typechecked 25 | def add_allers2013(input_path: str, database: h5py._hl.files.File) -> None: 26 | """ 27 | Function for adding the spectra of young, M- and L-type dwarfs 28 | from `Allers & Liu (2013) `_ to the database. 30 | 31 | Parameters 32 | ---------- 33 | input_path : str 34 | Path of the data folder. 35 | database : h5py._hl.files.File 36 | The HDF5 database. 37 | 38 | Returns 39 | ------- 40 | NoneType 41 | None 42 | """ 43 | 44 | Simbad.add_votable_fields("plx", "plx_error") 45 | 46 | url = "https://home.strw.leidenuniv.nl/~stolker/species/parallax.dat" 47 | input_file = "parallax.dat" 48 | data_file = Path(input_path) / input_file 49 | 50 | if not data_file.exists(): 51 | print() 52 | 53 | pooch.retrieve( 54 | url=url, 55 | known_hash="e2fe0719a919dc98d24627a12f535862a107e473bc67f09298a40ad474cdd491", 56 | fname=input_file, 57 | path=input_path, 58 | progressbar=True, 59 | ) 60 | 61 | parallax_data = pd.pandas.read_csv( 62 | data_file, 63 | usecols=[0, 1, 2], 64 | names=["object", "parallax", "parallax_error"], 65 | delimiter=",", 66 | dtype={"object": str, "parallax": float, "parallax_error": float}, 67 | ) 68 | 69 | print_text = "spectra of young M/L type objects from Allers & Liu 2013" 70 | 71 | url = "https://home.strw.leidenuniv.nl/~stolker/species/allers_liu_2013.tgz" 72 | input_file = "allers_liu_2013.tgz" 73 | data_file = Path(input_path) / input_file 74 | data_folder = Path(input_path) / "allers+2013/" 75 | 76 | if not data_file.exists(): 77 | print() 78 | 79 | pooch.retrieve( 80 | url=url, 81 | known_hash=None, 82 | fname=input_file, 83 | path=input_path, 84 | progressbar=True, 85 | ) 86 | 87 | if data_folder.exists(): 88 | remove_directory(data_folder) 89 | 90 | print(f"\nUnpacking {print_text} (173 kB)...", end="", flush=True) 91 | extract_tarfile(str(data_file), str(data_folder)) 92 | print(" [DONE]") 93 | 94 | data_file = Path(data_folder) / "sources.csv" 95 | 96 | sources = np.genfromtxt( 97 | data_file, 98 | delimiter=",", 99 | dtype=None, 100 | encoding="ASCII", 101 | ) 102 | 103 | source_names = sources[:, 0] 104 | source_sptype = sources[:, 7] 105 | 106 | print_message = "" 107 | print() 108 | 109 | spec_files = sorted(data_folder.glob("*")) 110 | 111 | for file_item in spec_files: 112 | if file_item.suffix == ".fits": 113 | sp_data, header = fits.getdata(file_item, header=True) 114 | 115 | else: 116 | continue 117 | 118 | sp_data = np.transpose(sp_data) 119 | 120 | # (erg s-1 cm-2 A-1) -> (W m-2 um-1) 121 | sp_data[:, 1:] *= 10.0 122 | 123 | name = header["OBJECT"] 124 | 125 | if "RES" in header: 126 | spec_res = header["RES"] 127 | elif "RP" in header: 128 | spec_res = header["RP"] 129 | 130 | simbad_id = get_simbad(name) 131 | 132 | if simbad_id is not None: 133 | if not isinstance(simbad_id, str): 134 | simbad_id = simbad_id.decode("utf-8") 135 | 136 | par_select = parallax_data[parallax_data["object"] == simbad_id] 137 | 138 | if not par_select.empty: 139 | parallax = ( 140 | par_select["parallax"].values[0], 141 | par_select["parallax_error"].values[0], 142 | ) 143 | 144 | else: 145 | parallax = (np.nan, np.nan) 146 | 147 | else: 148 | parallax = (np.nan, np.nan) 149 | 150 | if np.isnan(parallax[0]) and simbad_id is not None: 151 | simbad_result = Simbad.query_object(simbad_id) 152 | 153 | if simbad_result is not None and len(simbad_result) > 0: 154 | if "PLX_VALUE" in simbad_result.columns: 155 | if not simbad_result["PLX_VALUE"].mask[0]: 156 | parallax = ( 157 | simbad_result["PLX_VALUE"].value[0], 158 | simbad_result["PLX_ERROR"].value[0], 159 | ) 160 | 161 | else: 162 | if not simbad_result["plx_value"].mask[0]: 163 | parallax = ( 164 | simbad_result["plx_value"].value[0], 165 | simbad_result["plx_err"].value[0], 166 | ) 167 | 168 | index = np.argwhere(source_names == name) 169 | 170 | if len(index) == 0: 171 | sptype = None 172 | else: 173 | sptype = source_sptype[index][0][0][:2] 174 | 175 | empty_message = len(print_message) * " " 176 | print(f"\r{empty_message}", end="") 177 | 178 | print_message = f"Adding spectra... {name}" 179 | print(f"\r{print_message}", end="") 180 | 181 | dset = database.create_dataset(f"spectra/allers+2013/{name}", data=sp_data) 182 | 183 | dset.attrs["name"] = str(name).encode() 184 | dset.attrs["sptype"] = str(sptype).encode() 185 | dset.attrs["simbad"] = str(simbad_id).encode() 186 | dset.attrs["parallax"] = float(parallax[0]) # (mas) 187 | dset.attrs["parallax_error"] = float(parallax[1]) # (mas) 188 | dset.attrs["spec_res"] = float(spec_res) 189 | 190 | empty_message = len(print_message) * " " 191 | print(f"\r{empty_message}", end="") 192 | 193 | print_message = "Adding spectra... [DONE]" 194 | print(f"\r{print_message}") 195 | -------------------------------------------------------------------------------- /species/data/isochrone_data/iso_linder2019.py: -------------------------------------------------------------------------------- 1 | from requests.exceptions import HTTPError 2 | from pathlib import Path 3 | 4 | import h5py 5 | import numpy as np 6 | import pooch 7 | 8 | from typeguard import typechecked 9 | 10 | from species.core import constants 11 | from species.util.data_util import extract_tarfile, remove_directory 12 | 13 | 14 | @typechecked 15 | def add_linder2019(database: h5py._hl.files.File, input_path: str) -> None: 16 | """ 17 | Function for adding the `Linder et al. (2019) 18 | `_ 19 | isochrones data to the database. 20 | 21 | Parameters 22 | ---------- 23 | database : h5py._hl.files.File 24 | Database. 25 | input_path : str 26 | Folder where the data is located. 27 | 28 | Returns 29 | ------- 30 | NoneType 31 | None 32 | """ 33 | 34 | filters = ( 35 | "Paranal/NACO.J", 36 | "Paranal/NACO.H", 37 | "Paranal/NACO.Ks", 38 | "Paranal/NACO.Lp", 39 | "Paranal/NACO.Mp", 40 | "Generic/Cousins.R", 41 | "Generic/Cousins.I", 42 | "WISE/WISE.W1", 43 | "WISE/WISE.W2", 44 | "WISE/WISE.W3", 45 | "WISE/WISE.W4", 46 | "JWST/NIRCam.F115W", 47 | "JWST/NIRCam.F150W", 48 | "JWST/NIRCam.F200W", 49 | "JWST/NIRCam.F277W", 50 | "JWST/NIRCam.F356W", 51 | "JWST/NIRCam.F444W", 52 | "JWST/MIRI.F560W", 53 | "JWST/MIRI.F770W", 54 | "JWST/MIRI.F1000W", 55 | "JWST/MIRI.F1280W", 56 | "JWST/MIRI.F1500W", 57 | "JWST/MIRI.F1800W", 58 | "JWST/MIRI.F2100W", 59 | "JWST/MIRI.F2550W", 60 | "Paranal/VISIR.B87", 61 | "Paranal/VISIR.SiC", 62 | "Paranal/SPHERE.IRDIS_B_Y", 63 | "Paranal/SPHERE.IRDIS_B_J", 64 | "Paranal/SPHERE.IRDIS_B_H", 65 | "Paranal/SPHERE.IRDIS_B_Ks", 66 | "Paranal/SPHERE.IRDIS_D_J23_2", 67 | "Paranal/SPHERE.IRDIS_D_J23_3", 68 | "Paranal/SPHERE.IRDIS_D_H23_2", 69 | "Paranal/SPHERE.IRDIS_D_H23_3", 70 | "Paranal/SPHERE.IRDIS_D_K12_1", 71 | "Paranal/SPHERE.IRDIS_D_K12_2", 72 | ) 73 | 74 | data_folder = Path(input_path) / "linder_2019" 75 | 76 | if data_folder.exists(): 77 | # The folder should be removed if the TAR file was previously 78 | # unpacked because the file permissions are set to read-only 79 | # such that the extract_tarfile will cause an error if the 80 | # files need to be overwritten 81 | remove_directory(data_folder) 82 | 83 | data_folder.mkdir() 84 | 85 | input_file = "J_A+A_623_A85.tar.gz" 86 | data_file = Path(input_path) / input_file 87 | 88 | if not data_file.exists(): 89 | print() 90 | 91 | try: 92 | url = "https://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J/A+A/623/A85" 93 | 94 | pooch.retrieve( 95 | url=url, 96 | known_hash=None, 97 | fname=input_file, 98 | path=input_path, 99 | progressbar=True, 100 | ) 101 | 102 | except HTTPError: 103 | url = ( 104 | "https://home.strw.leidenuniv.nl/~stolker/species/J_A+A_623_A85.tar.gz" 105 | ) 106 | 107 | pooch.retrieve( 108 | url=url, 109 | known_hash="83bbc673a10207838983e0155ec21915caedd6465d6926fba23675562797923d", 110 | fname=input_file, 111 | path=input_path, 112 | progressbar=True, 113 | ) 114 | 115 | print("\nUnpacking Linder et al. (2019) isochrones (536 kB)...", end="", flush=True) 116 | extract_tarfile(str(data_file), str(data_folder)) 117 | print(" [DONE]") 118 | 119 | iso_folder = Path(data_folder) / "isochrones" 120 | iso_files = sorted(iso_folder.glob("*")) 121 | 122 | for iso_item in iso_files: 123 | file_param = iso_item.stem.split("_") 124 | mags_idx = file_param.index("mags") 125 | 126 | if int(file_param[mags_idx + 1]) == -2: 127 | atm_model = "petitCODE" 128 | elif int(file_param[mags_idx + 1]) == -3: 129 | atm_model = "HELIOS" 130 | else: 131 | raise ValueError("Atmospheric model not recognized.") 132 | 133 | mh_idx = file_param.index("MH") 134 | metallicity = float(file_param[mh_idx + 1]) 135 | 136 | if "brighter" in file_param or "fainter" in file_param: 137 | # Skip _brighter and _fainter files 138 | continue 139 | 140 | if "fsed" in file_param: 141 | fsed_idx = file_param.index("fsed") 142 | fsed = float(file_param[fsed_idx + 1]) 143 | else: 144 | fsed = None 145 | 146 | iso_data = np.loadtxt(iso_item) 147 | 148 | print( 149 | f"\nAdding isochrones: Linder et al. (2019) {atm_model}...", 150 | end="", 151 | flush=True, 152 | ) 153 | 154 | age = 1e-6 * 10.0 ** iso_data[:, 0] # (Myr) 155 | mass = iso_data[:, 1] * constants.M_EARTH / constants.M_JUP # (Mjup) 156 | radius = iso_data[:, 2] # (Rjup) 157 | log_lum = np.log10(8.710e-10 * iso_data[:, 3]) # log(L/Lsun) 158 | teff = iso_data[:, 4] # (K) 159 | logg = iso_data[:, 5] # log(g/cgs) 160 | magnitudes = iso_data[:, 6:] 161 | 162 | if fsed is None: 163 | tag_label = f"linder2019-{atm_model}-metal_{metallicity}" 164 | else: 165 | tag_label = f"linder2019-{atm_model}-metal_{metallicity}-fsed_{fsed}" 166 | 167 | dtype = h5py.string_dtype(encoding="utf-8", length=None) 168 | 169 | dset = database.create_dataset( 170 | f"isochrones/{tag_label}/filters", (np.size(filters),), dtype=dtype 171 | ) 172 | 173 | dset[...] = filters 174 | 175 | dset = database.create_dataset(f"isochrones/{tag_label}/age", data=age) # (Myr) 176 | database.create_dataset(f"isochrones/{tag_label}/mass", data=mass) # (Mjup) 177 | database.create_dataset( 178 | f"isochrones/{tag_label}/log_lum", data=log_lum 179 | ) # log(L/Lsun) 180 | database.create_dataset(f"isochrones/{tag_label}/teff", data=teff) # (K) 181 | database.create_dataset(f"isochrones/{tag_label}/log_g", data=logg) # log(g) 182 | database.create_dataset(f"isochrones/{tag_label}/radius", data=radius) # (Rjup) 183 | database.create_dataset(f"isochrones/{tag_label}/magnitudes", data=magnitudes) 184 | 185 | dset.attrs["model"] = "linder2019" 186 | 187 | print(" [DONE]") 188 | print(f"Database tag: {tag_label}") 189 | -------------------------------------------------------------------------------- /species/data/spec_data/spec_irtf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for adding the IRTF Spectral Library to the database. 3 | """ 4 | 5 | from pathlib import Path 6 | from typing import Optional, List 7 | 8 | import h5py 9 | import numpy as np 10 | import pandas as pd 11 | import pooch 12 | 13 | from astropy.io import fits 14 | from typeguard import typechecked 15 | 16 | from species.util.data_util import extract_tarfile, update_sptype 17 | from species.util.query_util import get_parallax, get_simbad 18 | 19 | 20 | @typechecked 21 | def add_irtf( 22 | input_path: str, database: h5py._hl.files.File, sptypes: Optional[List[str]] = None 23 | ) -> None: 24 | """ 25 | Function for adding the IRTF Spectral Library to the database. 26 | 27 | Parameters 28 | ---------- 29 | input_path : str 30 | Path of the data folder. 31 | database : h5py._hl.files.File 32 | Database. 33 | sptypes : list(str), None 34 | List with the spectral types ('F', 'G', 'K', 'M', 'L', 'T'). All spectral types are 35 | included if set to ``None``. 36 | 37 | Returns 38 | ------- 39 | NoneType 40 | None 41 | """ 42 | 43 | if sptypes is None: 44 | sptypes = ["F", "G", "K", "M", "L", "T"] 45 | 46 | url = "https://home.strw.leidenuniv.nl/~stolker/species/parallax.dat" 47 | input_file = "parallax.dat" 48 | data_file = Path(input_path) / input_file 49 | 50 | if not data_file.exists(): 51 | print() 52 | 53 | pooch.retrieve( 54 | url=url, 55 | known_hash="e2fe0719a919dc98d24627a12f535862a107e473bc67f09298a40ad474cdd491", 56 | fname=input_file, 57 | path=input_path, 58 | progressbar=True, 59 | ) 60 | 61 | parallax_data = pd.pandas.read_csv( 62 | data_file, 63 | usecols=[0, 1, 2], 64 | names=["object", "parallax", "parallax_error"], 65 | delimiter=",", 66 | dtype={"object": str, "parallax": float, "parallax_error": float}, 67 | ) 68 | 69 | data_folder = Path(input_path) / "irtf" 70 | 71 | if not data_folder.exists(): 72 | data_folder.mkdir() 73 | 74 | data_file = { 75 | "F": Path(input_path) / "irtf/F_fits_091201.tar", 76 | "G": Path(input_path) / "irtf/G_fits_091201.tar", 77 | "K": Path(input_path) / "irtf/K_fits_091201.tar", 78 | "M": Path(input_path) / "irtf/M_fits_091201.tar", 79 | "L": Path(input_path) / "irtf/L_fits_091201.tar", 80 | "T": Path(input_path) / "irtf/T_fits_091201.tar", 81 | } 82 | 83 | data_folder = { 84 | "F": Path(input_path) / "irtf/F_fits_091201", 85 | "G": Path(input_path) / "irtf/G_fits_091201", 86 | "K": Path(input_path) / "irtf/K_fits_091201", 87 | "M": Path(input_path) / "irtf/M_fits_091201", 88 | "L": Path(input_path) / "irtf/L_fits_091201", 89 | "T": Path(input_path) / "irtf/T_fits_091201", 90 | } 91 | 92 | irtf_folder = Path(input_path) / "irtf/" 93 | 94 | known_hash = { 95 | "F": "2bc34cfc1262582a8825c4b16d97c47f93cedb46dfcafa43b9db6dfb6fabe9c8", 96 | "G": "555c19d41dcb5278b796609e6eaceb5d1e8e484cb3b030fda5437722a1a84238", 97 | "K": "6c5b234a01681c174c3174366e7ee7b90ff35c542f762b9846cedb1d796c50a0", 98 | "M": "5488656537062593af43c175f9ee5068d0b7dece19b26175cce97d0593232436", 99 | "L": "2c2f6507c1dca2b81b5f085b3422664d2c21c6e77997037ab053dd77cc2d3381", 100 | "T": "74675599470c2e86803e9f395ec90fd62f8682a29bcbb281dd1ccd12750033e1", 101 | } 102 | 103 | url_root = "http://irtfweb.ifa.hawaii.edu/~spex/IRTF_Spectral_Library/Data/" 104 | 105 | url = { 106 | "F": url_root + "F_fits_091201.tar", 107 | "G": url_root + "G_fits_091201.tar", 108 | "K": url_root + "K_fits_091201.tar", 109 | "M": url_root + "M_fits_091201.tar", 110 | "L": url_root + "L_fits_091201.tar", 111 | "T": url_root + "T_fits_091201.tar", 112 | } 113 | 114 | for spt_item in sptypes: 115 | if not data_file[spt_item].exists(): 116 | print() 117 | 118 | pooch.retrieve( 119 | url=url[spt_item], 120 | known_hash=known_hash[spt_item], 121 | fname=data_file[spt_item].name, 122 | path=irtf_folder, 123 | progressbar=True, 124 | ) 125 | 126 | print("\nUnpacking IRTF Spectral Library...", end="", flush=True) 127 | 128 | for spt_item in sptypes: 129 | extract_tarfile(str(data_file[spt_item]), str(irtf_folder)) 130 | 131 | print(" [DONE]") 132 | 133 | print_message = "" 134 | print() 135 | 136 | for spt_item in sptypes: 137 | spec_files = sorted(data_folder[spt_item].glob("*")) 138 | 139 | for file_item in spec_files: 140 | if file_item.stem[-4:] != "_ext": 141 | spdata, header = fits.getdata(file_item, header=True) 142 | spdata = np.transpose(spdata) 143 | 144 | name = header["OBJECT"] 145 | sptype = header["SPTYPE"] 146 | 147 | if name[-2:] == "AB": 148 | name = name[:-2] 149 | elif name[-3:] == "ABC": 150 | name = name[:-3] 151 | 152 | spt_split = sptype.split() 153 | 154 | if spt_item in ["L", "T"] or spt_split[1][0] == "V": 155 | empty_message = len(print_message) * " " 156 | print(f"\r{empty_message}", end="") 157 | 158 | print_message = f"Adding spectra... {name}" 159 | print(f"\r{print_message}", end="") 160 | 161 | simbad_id = get_simbad(name) 162 | 163 | if simbad_id is not None: 164 | # For backward compatibility 165 | if not isinstance(simbad_id, str): 166 | simbad_id = simbad_id.decode("utf-8") 167 | 168 | par_select = parallax_data[parallax_data["object"] == simbad_id] 169 | 170 | if not par_select.empty: 171 | parallax = ( 172 | par_select["parallax"], 173 | par_select["parallax_error"], 174 | ) 175 | else: 176 | simbad_id, parallax = get_parallax(name) 177 | 178 | else: 179 | parallax = (np.nan, np.nan) 180 | 181 | sptype = update_sptype(np.array([sptype]))[0] 182 | 183 | dset = database.create_dataset(f"spectra/irtf/{name}", data=spdata) 184 | 185 | dset.attrs["name"] = str(name).encode() 186 | dset.attrs["sptype"] = str(sptype).encode() 187 | dset.attrs["simbad"] = str(simbad_id).encode() 188 | dset.attrs["parallax"] = parallax[0] 189 | dset.attrs["parallax_error"] = parallax[1] 190 | 191 | empty_message = len(print_message) * " " 192 | print(f"\r{empty_message}", end="") 193 | 194 | print_message = "Adding spectra... [DONE]" 195 | print(f"\r{print_message}") 196 | -------------------------------------------------------------------------------- /species/fit/fit_spectrum.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module with functionalities for photometric and spectroscopic 3 | calibration. The fitting routine be used to fit photometric 4 | data with a calibration spectrum (e.g. extracted with 5 | :func:`~species.read.read_model.ReadModel.get_model`) by 6 | simply fitting a scaling parameter. 7 | """ 8 | 9 | import math 10 | 11 | from typing import Dict, List, Optional, Tuple, Union 12 | 13 | from multiprocessing import cpu_count, Pool 14 | 15 | import emcee 16 | import numpy as np 17 | 18 | from typeguard import typechecked 19 | 20 | from species.phot.syn_phot import SyntheticPhotometry 21 | from species.read.read_calibration import ReadCalibration 22 | from species.read.read_object import ReadObject 23 | 24 | 25 | @typechecked 26 | def lnprob( 27 | param: np.ndarray, 28 | bounds: Dict[str, Tuple[float, float]], 29 | modelpar: List[str], 30 | objphot: List[np.ndarray], 31 | specphot: Union[ 32 | List[float], 33 | List[Tuple[SyntheticPhotometry, Tuple[np.float64, np.float64]]], 34 | ], 35 | ) -> float: 36 | """ 37 | Internal function for calculating the posterior probability. 38 | 39 | Parameters 40 | ---------- 41 | param : np.ndarray 42 | Value of the scaling parameter. 43 | bounds : dict 44 | Boundaries of the main scaling parameter. 45 | modelpar : list(str) 46 | Parameter names. 47 | objphot : list(tuple(float, float)) 48 | Photometry of the object. 49 | specphot : list(float), SyntheticPhotometry 50 | Synthetic photometry of the calibration spectrum for the 51 | same filters as the photometry of the object. 52 | 53 | Returns 54 | ------- 55 | float 56 | Log posterior probability. 57 | """ 58 | 59 | ln_prob = 0.0 60 | 61 | for i, item in enumerate(modelpar): 62 | if bounds[item][0] <= param[i] <= bounds[item][1]: 63 | ln_prob += 0.0 64 | 65 | else: 66 | ln_prob += -np.inf 67 | break 68 | 69 | if not math.isinf(ln_prob): 70 | for i, obj_item in enumerate(objphot): 71 | if obj_item.ndim == 1: 72 | ln_prob += ( 73 | -0.5 74 | * (obj_item[0] - param[0] * specphot[i]) ** 2 75 | / obj_item[1] ** 2 76 | ) 77 | 78 | else: 79 | for j in range(obj_item.shape[1]): 80 | ln_prob += ( 81 | -0.5 82 | * (obj_item[0, j] - param[0] * specphot[i]) ** 2 83 | / obj_item[1, j] ** 2 84 | ) 85 | 86 | return ln_prob 87 | 88 | 89 | class FitSpectrum: 90 | """ 91 | Class for fitting a calibration spectrum to photometric data. 92 | """ 93 | 94 | @typechecked 95 | def __init__( 96 | self, 97 | object_name: str, 98 | filters: Optional[List[str]], 99 | spectrum: str, 100 | bounds: Dict[str, Tuple[float, float]], 101 | ) -> None: 102 | """ 103 | Parameters 104 | ---------- 105 | object_name : str 106 | Object name in the database. 107 | filters : list(str) 108 | Filter names for which the photometry is selected. All 109 | available photometry of the object is selected if the 110 | argument is set to ``None``. 111 | spectrum : str 112 | Calibration spectrum as labelled in the database. The 113 | calibration spectrum can be stored in the database with 114 | :func:`~species.data.database.Database.add_calibration`. 115 | bounds : dict 116 | Boundaries of the scaling parameter, as 117 | ``{'scaling':(min, max)}``. 118 | 119 | Returns 120 | ------- 121 | NoneType 122 | None 123 | """ 124 | 125 | self.object = ReadObject(object_name) 126 | 127 | self.spectrum = spectrum 128 | self.bounds = bounds 129 | 130 | self.objphot = [] 131 | self.specphot = [] 132 | 133 | if filters is None: 134 | from species.data.database import Database 135 | 136 | species_db = Database() 137 | 138 | objectbox = species_db.get_object( 139 | object_name, inc_phot=True, inc_spec=False 140 | ) 141 | filters = objectbox.filters 142 | 143 | for item in filters: 144 | readcalib = ReadCalibration(self.spectrum, item) 145 | calibspec = readcalib.get_spectrum() 146 | 147 | synphot = SyntheticPhotometry(item) 148 | spec_phot = synphot.spectrum_to_flux(calibspec.wavelength, calibspec.flux) 149 | self.specphot.append(spec_phot[0]) 150 | 151 | obj_phot = self.object.get_photometry(item) 152 | self.objphot.append(np.array([obj_phot[2], obj_phot[3]])) 153 | 154 | self.modelpar = ["scaling"] 155 | 156 | @typechecked 157 | def run_mcmc( 158 | self, 159 | nwalkers: int, 160 | nsteps: int, 161 | guess: Union[Dict[str, float], Dict[str, None]], 162 | tag: str, 163 | ) -> None: 164 | """ 165 | Function to run the MCMC sampler. 166 | 167 | Parameters 168 | ---------- 169 | nwalkers : int 170 | Number of walkers. 171 | nsteps : int 172 | Number of steps per walker. 173 | guess : dict(str, float), dict(str, None) 174 | Guess of the scaling parameter. 175 | tag : str 176 | Database tag where the MCMC samples will be stored. 177 | 178 | Returns 179 | ------- 180 | NoneType 181 | None 182 | """ 183 | 184 | print("Running MCMC...") 185 | 186 | ndim = 1 187 | 188 | initial = np.zeros((nwalkers, ndim)) 189 | 190 | for i, item in enumerate(self.modelpar): 191 | if guess[item] is not None: 192 | width = min( 193 | abs(guess[item] - self.bounds[item][0]), 194 | abs(guess[item] - self.bounds[item][1]), 195 | ) 196 | 197 | initial[:, i] = guess[item] + np.random.normal(0, 0.1 * width, nwalkers) 198 | 199 | else: 200 | initial[:, i] = np.random.uniform( 201 | low=self.bounds[item][0], high=self.bounds[item][1], size=nwalkers 202 | ) 203 | 204 | with Pool(processes=cpu_count()): 205 | ens_sampler = emcee.EnsembleSampler( 206 | nwalkers, 207 | ndim, 208 | lnprob, 209 | args=([self.bounds, self.modelpar, self.objphot, self.specphot]), 210 | ) 211 | 212 | ens_sampler.run_mcmc(initial, nsteps, progress=True) 213 | 214 | # Dictionary with attributes that will be stored 215 | 216 | attr_dict = { 217 | "model_type": "calibration", 218 | "model_name": self.spectrum, 219 | "mean_accept": np.mean(ens_sampler.acceptance_fraction), 220 | } 221 | 222 | # Add samples to the database 223 | 224 | from species.data.database import Database 225 | 226 | species_db = Database() 227 | 228 | species_db.add_samples( 229 | sampler="emcee", 230 | samples=ens_sampler.get_chain(), 231 | ln_prob=ens_sampler.get_log_prob(), 232 | tag=tag, 233 | modelpar=self.modelpar, 234 | attr_dict=attr_dict, 235 | ) 236 | -------------------------------------------------------------------------------- /species/data/phot_data/phot_leggett.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for the photometric data of L, T, Y dwarfs from Leggett et al. 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | import h5py 8 | import numpy as np 9 | import pandas as pd 10 | import pooch 11 | 12 | from typeguard import typechecked 13 | 14 | from species.util.data_util import update_sptype 15 | 16 | 17 | @typechecked 18 | def add_leggett(input_path: str, database: h5py._hl.files.File) -> None: 19 | """ 20 | Function for adding photometric data of L, T, and Y dwarfs from 21 | Leggett et al. to the database. 22 | 23 | Parameters 24 | ---------- 25 | input_path : str 26 | Path of the data folder. 27 | database : h5py._hl.files.File 28 | The HDF5 database that has been opened. 29 | 30 | Returns 31 | ------- 32 | NoneType 33 | None 34 | """ 35 | 36 | input_file_1 = "2010_phot.xls" 37 | data_file_1 = Path(input_path) / input_file_1 38 | url_1 = "http://staff.gemini.edu/~sleggett/2010_phot.xls" 39 | 40 | if not data_file_1.exists(): 41 | print() 42 | 43 | pooch.retrieve( 44 | url=url_1, 45 | known_hash=None, 46 | fname=input_file_1, 47 | path=input_path, 48 | progressbar=True, 49 | ) 50 | 51 | input_file_2 = "datafile8.txt" 52 | data_file_2 = Path(input_path) / input_file_2 53 | url_2 = "http://staff.gemini.edu/~sleggett/datafile8.txt" 54 | 55 | if not data_file_2.exists(): 56 | print() 57 | 58 | pooch.retrieve( 59 | url=url_2, 60 | known_hash=None, 61 | fname=input_file_2, 62 | path=input_path, 63 | progressbar=True, 64 | ) 65 | 66 | print("\nAdding Leggett L and T Dwarf Data...", end="", flush=True) 67 | 68 | group = "photometry/leggett" 69 | 70 | database.create_group(group) 71 | 72 | dataframe = pd.pandas.read_excel(data_file_1) 73 | dataframe.columns = dataframe.columns.str.replace("'", "") 74 | 75 | modulus = np.asarray(dataframe["M-m"]) # M-m (mag) 76 | modulus_error = np.asarray(dataframe["Mmerr"]) # M-m (mag) 77 | 78 | distance = 10.0 ** (-modulus / 5.0 + 1.0) # (pc) 79 | distance_lower = distance - 10.0 ** (-(modulus + modulus_error) / 5.0 + 1.0) # (pc) 80 | distance_upper = 10.0 ** (-(modulus - modulus_error) / 5.0 + 1.0) - distance # (pc) 81 | distance_error = (distance_lower + distance_upper) / 2.0 82 | 83 | name = np.asarray(dataframe["Name"]) 84 | 85 | # Near-infrared spectral type 86 | sptype = np.asarray(dataframe["Type"]) 87 | sptype = update_sptype(sptype) 88 | sptype = np.asarray(sptype) 89 | 90 | mag_y = np.asarray(dataframe["Y"]) 91 | mag_j = np.asarray(dataframe["J"]) 92 | mag_h = np.asarray(dataframe["H"]) 93 | mag_k = np.asarray(dataframe["K"]) 94 | mag_lp = np.asarray(dataframe["L"]) 95 | mag_mp = np.asarray(dataframe["M"]) 96 | mag_ch1 = np.asarray(dataframe["Ch1"]) 97 | mag_ch2 = np.asarray(dataframe["Ch2"]) 98 | mag_ch3 = np.asarray(dataframe["Ch3"]) 99 | mag_ch4 = np.asarray(dataframe["Ch4"]) 100 | mag_w1 = np.repeat(np.nan, np.size(name)) 101 | mag_w2 = np.repeat(np.nan, np.size(name)) 102 | mag_w3 = np.repeat(np.nan, np.size(name)) 103 | mag_w4 = np.repeat(np.nan, np.size(name)) 104 | 105 | print(" [DONE]") 106 | print("Adding Leggett T6+ and Y Dwarf Data...", end="", flush=True) 107 | 108 | with open(data_file_2, "r", encoding="utf-8") as open_file: 109 | lines = open_file.readlines()[69:] 110 | 111 | for item in lines: 112 | name = np.append(name, item[0:16]) 113 | 114 | spt_tmp = item[62:66] 115 | if spt_tmp[0] == "2": 116 | spt_tmp = "T" + spt_tmp[1] 117 | elif spt_tmp[0] == "3": 118 | spt_tmp = "Y" + spt_tmp[1] 119 | 120 | sptype = np.append(sptype, spt_tmp) 121 | 122 | modulus = float(item[67:73]) # M-m (mag) 123 | if modulus == 999.0: 124 | modulus = np.nan 125 | 126 | distance = np.append(distance, 10.0 ** (-modulus / 5.0 + 1.0)) # (pc) 127 | 128 | mag = np.zeros(14) 129 | 130 | mag[0] = float(item[95:101]) # MKO Y 131 | mag[1] = float(item[102:107]) # MKO J 132 | mag[2] = float(item[108:114]) # MKO H 133 | mag[3] = float(item[115:121]) # MKO K 134 | mag[4] = float(item[122:128]) # MKO L' 135 | mag[5] = float(item[129:135]) # MKO M' 136 | mag[6] = float(item[136:142]) # Spitzer/IRAC 3.6 um 137 | mag[7] = float(item[143:149]) # Spitzer/IRAC 4.5 um 138 | mag[8] = float(item[150:156]) # Spitzer/IRAC 5.8 um 139 | mag[9] = float(item[157:163]) # Spitzer/IRAC 8.0 um 140 | mag[10] = float(item[164:170]) # WISE W1 141 | mag[11] = float(item[171:176]) # WISE W2 142 | mag[12] = float(item[177:183]) # WISE W3 143 | mag[13] = float(item[184:190]) # WISE W4 144 | 145 | for j, mag_item in enumerate(mag): 146 | if mag_item == 999.0: 147 | mag[j] = np.nan 148 | 149 | mag_y = np.append(mag_y, mag[0]) 150 | mag_j = np.append(mag_j, mag[1]) 151 | mag_h = np.append(mag_h, mag[2]) 152 | mag_k = np.append(mag_k, mag[3]) 153 | mag_lp = np.append(mag_lp, mag[4]) 154 | mag_mp = np.append(mag_mp, mag[5]) 155 | mag_ch1 = np.append(mag_ch1, mag[6]) 156 | mag_ch2 = np.append(mag_ch2, mag[7]) 157 | mag_ch3 = np.append(mag_ch3, mag[8]) 158 | mag_ch4 = np.append(mag_ch4, mag[9]) 159 | mag_w1 = np.append(mag_w1, mag[10]) 160 | mag_w2 = np.append(mag_w2, mag[11]) 161 | mag_w3 = np.append(mag_w3, mag[12]) 162 | mag_w4 = np.append(mag_w4, mag[13]) 163 | 164 | dtype = h5py.special_dtype(vlen=str) 165 | 166 | dset = database.create_dataset(group + "/name", (np.size(name),), dtype=dtype) 167 | dset[...] = name 168 | 169 | dset = database.create_dataset(group + "/sptype", (np.size(sptype),), dtype=dtype) 170 | dset[...] = sptype 171 | 172 | flag = np.repeat("null", np.size(name)) 173 | 174 | dset = database.create_dataset(group + "/flag", (np.size(flag),), dtype=dtype) 175 | dset[...] = flag 176 | 177 | database.create_dataset(group + "/distance", data=distance) 178 | database.create_dataset(group + "/distance_error", data=distance_error) 179 | database.create_dataset(group + "/MKO/NSFCam.Y", data=mag_y) 180 | database.create_dataset(group + "/MKO/NSFCam.J", data=mag_j) 181 | database.create_dataset(group + "/MKO/NSFCam.H", data=mag_h) 182 | database.create_dataset(group + "/MKO/NSFCam.K", data=mag_k) 183 | database.create_dataset(group + "/MKO/NSFCam.Lp", data=mag_lp) 184 | database.create_dataset(group + "/MKO/NSFCam.Mp", data=mag_mp) 185 | database.create_dataset(group + "/Spitzer/IRAC.I1", data=mag_ch1) 186 | database.create_dataset(group + "/Spitzer/IRAC.I2", data=mag_ch2) 187 | database.create_dataset(group + "/Spitzer/IRAC.I3", data=mag_ch3) 188 | database.create_dataset(group + "/Spitzer/IRAC.I4", data=mag_ch4) 189 | database.create_dataset(group + "/WISE/WISE.W1", data=mag_w1) 190 | database.create_dataset(group + "/WISE/WISE.W2", data=mag_w2) 191 | database.create_dataset(group + "/WISE/WISE.W3", data=mag_w3) 192 | database.create_dataset(group + "/WISE/WISE.W4", data=mag_w4) 193 | 194 | print(" [DONE]") 195 | 196 | database.close() 197 | -------------------------------------------------------------------------------- /species/util/convert_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for converting quantities. 3 | """ 4 | 5 | from typing import Optional, Tuple, Union 6 | 7 | import numpy as np 8 | 9 | from typeguard import typechecked 10 | 11 | from species.core import constants 12 | 13 | 14 | @typechecked 15 | def apparent_to_absolute( 16 | app_mag: Union[ 17 | Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]] 18 | ], 19 | distance: Union[ 20 | Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]] 21 | ], 22 | ) -> Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]]: 23 | """ 24 | Function for converting an apparent magnitude into an absolute 25 | magnitude. The uncertainty on the distance is propagated into the 26 | uncertainty on the absolute magnitude. 27 | 28 | Parameters 29 | ---------- 30 | app_mag : tuple(float, float), tuple(np.ndarray, np.ndarray) 31 | Apparent magnitude and uncertainty (mag). The returned error 32 | on the absolute magnitude is set to None if the error on the 33 | apparent magnitude is set to None, for example 34 | ``app_mag=(15., None)``. 35 | distance : tuple(float, float), tuple(np.ndarray, np.ndarray) 36 | Distance and uncertainty (pc). The error is not propagated 37 | into the error on the absolute magnitude if set to None, for 38 | example ``distance=(20., None)``. 39 | 40 | Returns 41 | ------- 42 | float, np.ndarray 43 | Absolute magnitude (mag). 44 | float, np.ndarray, None 45 | Uncertainty (mag). 46 | """ 47 | 48 | abs_mag = app_mag[0] - 5.0 * np.log10(distance[0]) + 5.0 49 | 50 | if app_mag[1] is not None and distance[1] is not None: 51 | dist_err = distance[1] * (5.0 / (distance[0] * np.log(10.0))) 52 | abs_err = np.sqrt(app_mag[1] ** 2 + dist_err**2) 53 | 54 | elif app_mag[1] is not None and distance[1] is None: 55 | abs_err = app_mag[1] 56 | 57 | else: 58 | abs_err = None 59 | 60 | return abs_mag, abs_err 61 | 62 | 63 | @typechecked 64 | def absolute_to_apparent( 65 | abs_mag: Union[ 66 | Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]] 67 | ], 68 | distance: Union[ 69 | Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]] 70 | ], 71 | ) -> Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]]: 72 | """ 73 | Function for converting an absolute magnitude 74 | into an apparent magnitude. 75 | 76 | Parameters 77 | ---------- 78 | abs_mag : tuple(float, float), tuple(np.ndarray, np.ndarray) 79 | Tuple with the absolute magnitude and uncertainty (mag). 80 | The uncertainty on the returned apparent magnitude is 81 | simply adopted from the absolute magnitude. Providing the 82 | uncertainty is optional and can be set to ``None``. 83 | distance : tuple(float, float), tuple(np.ndarray, np.ndarray) 84 | Tuple with the distance and uncertainty (pc). The uncertainty 85 | is optional and can be set to ``None``. The distance 86 | uncertainty is currently not used by this function but 87 | included so it can be implemented at some point into the 88 | error budget. 89 | 90 | Returns 91 | ------- 92 | float, np.ndarray 93 | Apparent magnitude (mag). 94 | float, np.ndarray, None 95 | Uncertainty (mag). 96 | """ 97 | 98 | app_mag = abs_mag[0] + 5.0 * np.log10(distance[0]) - 5.0 99 | 100 | return app_mag, abs_mag[1] 101 | 102 | 103 | @typechecked 104 | def parallax_to_distance( 105 | parallax: Union[ 106 | Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]] 107 | ], 108 | ) -> Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]]: 109 | """ 110 | Function for converting from parallax to distance. 111 | 112 | Parameters 113 | ---------- 114 | parallax : tuple(float, float), tuple(np.ndarray, np.ndarray) 115 | Parallax and optional uncertainty (mas). The 116 | uncertainty is not used if set to ``None``, 117 | for example, ``parallax=(2., None)``. 118 | 119 | Returns 120 | ------- 121 | float, np.ndarray 122 | Distance (pc). 123 | float, np.ndarray, None 124 | Uncertainty (pc). 125 | """ 126 | 127 | # From parallax (mas) to distance (pc) 128 | distance = 1e3 / parallax[0] 129 | 130 | if parallax[1] is None: 131 | distance_error = None 132 | 133 | else: 134 | distance_minus = distance - 1.0 / ((parallax[0] + parallax[1]) * 1e-3) 135 | distance_plus = 1.0 / ((parallax[0] - parallax[1]) * 1e-3) - distance 136 | distance_error = (distance_plus + distance_minus) / 2.0 137 | 138 | return distance, distance_error 139 | 140 | 141 | @typechecked 142 | def logg_to_mass( 143 | logg: Union[float, np.ndarray], radius: Union[float, np.ndarray] 144 | ) -> Union[float, np.ndarray]: 145 | """ 146 | Function for converting :math:`\\log(g)` and a radius into a mass. 147 | 148 | Parameters 149 | ---------- 150 | logg : float, np.ndarray 151 | Log10 of the surface gravity (cgs). 152 | radius : float, np.ndarray 153 | Radius (Rjup). 154 | 155 | Returns 156 | ------- 157 | float, np.ndarray 158 | Mass (Mjup). 159 | """ 160 | 161 | surface_grav = 1e-2 * 10.0**logg # (m s-2) 162 | radius *= constants.R_JUP # (m) 163 | mass = surface_grav * radius**2 / constants.GRAVITY # (kg) 164 | 165 | return mass / constants.M_JUP 166 | 167 | 168 | @typechecked 169 | def logg_to_radius( 170 | logg: Union[float, np.ndarray], mass: Union[float, np.ndarray] 171 | ) -> Union[float, np.ndarray]: 172 | """ 173 | Function for converting :math:`\\log(g)` and a mass into a radius. 174 | 175 | Parameters 176 | ---------- 177 | logg : float, np.ndarray 178 | Log10 of the surface gravity (cgs). 179 | mass : float, np.ndarray 180 | Mass (Mjup). 181 | 182 | Returns 183 | ------- 184 | float, np.ndarray 185 | Radius (Rjup). 186 | """ 187 | 188 | surface_grav = 1e-2 * 10.0**logg # (m s-2) 189 | mass_kg = mass * constants.M_JUP # (kg) 190 | radius = np.sqrt(mass_kg * constants.GRAVITY / surface_grav) # (m) 191 | 192 | return radius / constants.R_JUP 193 | 194 | 195 | @typechecked 196 | def mass_to_logg( 197 | mass: Union[float, np.ndarray], radius: Union[float, np.ndarray] 198 | ) -> Union[float, np.ndarray]: 199 | """ 200 | Function for converting a mass and radius into :math:`\\log(g)`. 201 | 202 | Parameters 203 | ---------- 204 | mass : float, np.ndarray 205 | Mass ($M_\\mathrm{J}$). 206 | radius : float, np.ndarray 207 | Radius ($R_\\mathrm{J}$). 208 | Returns 209 | ------- 210 | float, np.ndarray 211 | Surface gravity :math:`\\log(g)`. 212 | """ 213 | 214 | mass *= constants.M_JUP # (kg) 215 | radius *= constants.R_JUP # (m) 216 | gravity = 1e2 * mass * constants.GRAVITY / radius**2 # (cm s-2) 217 | 218 | return np.log10(gravity) 219 | 220 | 221 | @typechecked 222 | def luminosity_to_teff( 223 | luminosity: Union[float, np.ndarray], radius: Union[float, np.ndarray] 224 | ) -> Union[float, np.ndarray]: 225 | """ 226 | Function for converting a luminosity and radius into :math:`T_\\mathrm{eff}`. 227 | Parameters 228 | ---------- 229 | luminosity : float, np.ndarray 230 | Bolometric luminosity ($L_\\odot$). 231 | radius : float, np.ndarray 232 | Radius ($R_\\mathrm{J}$). 233 | Returns 234 | ------- 235 | float, np.ndarray 236 | Effective temperature (K). 237 | """ 238 | 239 | radius *= constants.R_JUP # (Rjup) 240 | teff = (luminosity / (4.0 * np.pi * radius**2 * constants.SIGMA_SB)) ** 0.25 241 | 242 | return teff 243 | -------------------------------------------------------------------------------- /species/data/misc_data/dust_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for optical constants of dust grains. 3 | """ 4 | 5 | import os 6 | import zipfile 7 | import urllib.request 8 | 9 | import h5py 10 | import numpy as np 11 | 12 | from astropy.io import fits 13 | from typeguard import typechecked 14 | 15 | 16 | @typechecked 17 | def add_optical_constants(input_path: str, database: h5py._hl.files.File) -> None: 18 | """ 19 | Function for adding the optical constants of crystalline and amorphous 20 | MgSiO3 and Fe to the database. 21 | 22 | Parameters 23 | ---------- 24 | input_path : str 25 | Folder where the data is located. 26 | database : h5py._hl.files.File 27 | Database. 28 | 29 | Returns 30 | ------- 31 | None 32 | NoneType 33 | """ 34 | 35 | if not os.path.exists(input_path): 36 | os.makedirs(input_path) 37 | 38 | url = "https://home.strw.leidenuniv.nl/~stolker/species/optical_constants.zip" 39 | 40 | data_file = os.path.join(input_path, "optical_constants.zip") 41 | 42 | if not os.path.isfile(data_file): 43 | print("\nDownloading optical constants (87 kB)...", end="", flush=True) 44 | urllib.request.urlretrieve(url, data_file) 45 | print(" [DONE]") 46 | 47 | print("\nUnpacking optical constants...", end="", flush=True) 48 | 49 | with zipfile.ZipFile(data_file, "r") as zip_ref: 50 | zip_ref.extractall(input_path) 51 | 52 | print(" [DONE]") 53 | 54 | print("\nAdding optical constants of MgSiO3...", end="") 55 | 56 | nk_file = os.path.join( 57 | input_path, 58 | "optical_constants/mgsio3/crystalline/" "mgsio3_jaeger_98_scott_96_axis1.dat", 59 | ) 60 | 61 | data = np.loadtxt(nk_file) 62 | 63 | database.create_dataset("dust/mgsio3/crystalline/axis_1", data=data) 64 | 65 | nk_file = os.path.join( 66 | input_path, 67 | "optical_constants/mgsio3/crystalline/" "mgsio3_jaeger_98_scott_96_axis2.dat", 68 | ) 69 | 70 | data = np.loadtxt(nk_file) 71 | 72 | database.create_dataset("dust/mgsio3/crystalline/axis_2", data=data) 73 | 74 | nk_file = os.path.join( 75 | input_path, 76 | "optical_constants/mgsio3/crystalline/" "mgsio3_jaeger_98_scott_96_axis3.dat", 77 | ) 78 | 79 | data = np.loadtxt(nk_file) 80 | 81 | database.create_dataset("dust/mgsio3/crystalline/axis_3", data=data) 82 | 83 | nk_file = os.path.join( 84 | input_path, 85 | "optical_constants/mgsio3/amorphous/" "mgsio3_jaeger_2003_reformat.dat", 86 | ) 87 | 88 | data = np.loadtxt(nk_file) 89 | 90 | database.create_dataset("dust/mgsio3/amorphous", data=data) 91 | 92 | print(" [DONE]") 93 | 94 | print("Adding optical constants of Fe...", end="") 95 | 96 | nk_file = os.path.join( 97 | input_path, "optical_constants/fe/crystalline/fe_henning_1996.dat" 98 | ) 99 | data = np.loadtxt(nk_file) 100 | database.create_dataset("dust/fe/crystalline", data=data) 101 | 102 | nk_file = os.path.join( 103 | input_path, "optical_constants/fe/amorphous/fe_pollack_1994.dat" 104 | ) 105 | data = np.loadtxt(nk_file) 106 | database.create_dataset("dust/fe/amorphous", data=data) 107 | 108 | print(" [DONE]") 109 | 110 | 111 | @typechecked 112 | def add_cross_sections(input_path: str, database: h5py._hl.files.File) -> None: 113 | """ 114 | Function for adding the extinction cross section of 115 | crystalline MgSiO3 for a log-normal and power-law size 116 | distribution to the database. 117 | 118 | Parameters 119 | ---------- 120 | input_path : str 121 | Folder where the data is located. 122 | database : h5py._hl.files.File 123 | Database. 124 | 125 | Returns 126 | ------- 127 | None 128 | NoneType 129 | """ 130 | 131 | from species.phot.syn_phot import SyntheticPhotometry 132 | 133 | if not os.path.exists(input_path): 134 | os.makedirs(input_path) 135 | 136 | url = "https://home.strw.leidenuniv.nl/~stolker/species/lognorm_mgsio3_c_ext.fits" 137 | 138 | data_file = os.path.join(input_path, "lognorm_mgsio3_c_ext.fits") 139 | 140 | print( 141 | "\nDownloading log-normal dust cross sections (231 kB)...", end="", flush=True 142 | ) 143 | urllib.request.urlretrieve(url, data_file) 144 | print(" [DONE]") 145 | 146 | syn_phot = SyntheticPhotometry("Generic/Bessell.V") 147 | 148 | print("\nAdding log-normal dust cross sections:") 149 | 150 | with fits.open(os.path.join(input_path, "lognorm_mgsio3_c_ext.fits")) as hdu_list: 151 | data_cross = hdu_list[0].data 152 | data_wavel = hdu_list[1].data 153 | 154 | for i in range(data_cross.shape[1]): 155 | for j in range(data_cross.shape[2]): 156 | cross_vband = syn_phot.spectrum_to_flux(data_wavel, data_cross[:, i, j]) 157 | data_cross[:, i, j] /= cross_vband[0] 158 | 159 | database.create_dataset( 160 | "dust/lognorm/mgsio3/crystalline/cross_section", data=data_cross 161 | ) 162 | 163 | print( 164 | f" - Data shape (n_wavelength, n_radius, n_sigma): {hdu_list[0].data.shape}" 165 | ) 166 | 167 | database.create_dataset( 168 | "dust/lognorm/mgsio3/crystalline/wavelength", data=data_wavel 169 | ) 170 | 171 | data_range = f"{np.amin(hdu_list[1].data)} - {np.amax(hdu_list[1].data)}" 172 | print(f" - Wavelength range: {data_range} um") 173 | 174 | database.create_dataset( 175 | "dust/lognorm/mgsio3/crystalline/radius_g", data=hdu_list[2].data 176 | ) 177 | 178 | data_range = f"{np.amin(hdu_list[2].data)} - {np.amax(hdu_list[2].data)}" 179 | print(f" - Mean geometric radius range: {data_range} um") 180 | 181 | database.create_dataset( 182 | "dust/lognorm/mgsio3/crystalline/sigma_g", data=hdu_list[3].data 183 | ) 184 | 185 | data_range = f"{np.amin(hdu_list[3].data)} - {np.amax(hdu_list[3].data)}" 186 | print(f" - Geometric standard deviation range: {data_range}") 187 | 188 | url = "https://home.strw.leidenuniv.nl/~stolker/species/powerlaw_mgsio3_c_ext.fits" 189 | 190 | data_file = os.path.join(input_path, "powerlaw_mgsio3_c_ext.fits") 191 | 192 | print("\nDownloading power-law dust cross sections (231 kB)...", end="", flush=True) 193 | urllib.request.urlretrieve(url, data_file) 194 | print(" [DONE]") 195 | 196 | print("\nAdding power-law dust cross sections:") 197 | 198 | with fits.open(os.path.join(input_path, "powerlaw_mgsio3_c_ext.fits")) as hdu_list: 199 | data_cross = hdu_list[0].data 200 | data_wavel = hdu_list[1].data 201 | 202 | for i in range(data_cross.shape[1]): 203 | for j in range(data_cross.shape[2]): 204 | cross_vband = syn_phot.spectrum_to_flux(data_wavel, data_cross[:, i, j]) 205 | data_cross[:, i, j] /= cross_vband[0] 206 | 207 | database.create_dataset( 208 | "dust/powerlaw/mgsio3/crystalline/cross_section", data=data_cross 209 | ) 210 | 211 | print( 212 | f" - Data shape (n_wavelength, n_radius, n_exponent): {hdu_list[0].data.shape}" 213 | ) 214 | 215 | database.create_dataset( 216 | "dust/powerlaw/mgsio3/crystalline/wavelength", data=data_wavel 217 | ) 218 | 219 | data_range = f"{np.amin(hdu_list[1].data)} - {np.amax(hdu_list[1].data)}" 220 | print(f" - Wavelength range: {data_range} um") 221 | 222 | database.create_dataset( 223 | "dust/powerlaw/mgsio3/crystalline/radius_max", data=hdu_list[2].data 224 | ) 225 | 226 | data_range = f"{np.amin(hdu_list[2].data)} - {np.amax(hdu_list[2].data)}" 227 | print(f" - Maximum grain radius range: {data_range} um") 228 | 229 | database.create_dataset( 230 | "dust/powerlaw/mgsio3/crystalline/exponent", data=hdu_list[3].data 231 | ) 232 | 233 | data_range = f"{np.amin(hdu_list[3].data)} - {np.amax(hdu_list[3].data)}" 234 | print(f" - Power-law exponent range: {data_range}") 235 | --------------------------------------------------------------------------------