├── imagine ├── templates │ ├── __init__.py │ ├── dummy_field_template.py │ ├── conftest.py │ ├── field_factory_template.py │ ├── thermal_electrons_template.py │ ├── magnetic_field_template.py │ ├── pipeline_template.py │ └── simulator_template.py ├── __version__.py ├── priors │ ├── __init__.py │ └── basic_priors.py ├── observables │ ├── __init__.py │ └── observable.py ├── simulators │ ├── __init__.py │ ├── test_simulator.py │ └── test │ │ └── bi_simulator.py ├── likelihoods │ ├── __init__.py │ ├── simple_likelihood.py │ ├── likelihood.py │ └── ensemble_likelihood.py ├── __init__.py ├── fields │ ├── hamx │ │ ├── __init__.py │ │ ├── tereg_ymw16.py │ │ ├── breg_lsa.py │ │ ├── cre_analytic.py │ │ ├── brnd_es.py │ │ ├── breg_jf12.py │ │ └── brnd_jf12.py │ ├── __init__.py │ ├── test_field.py │ ├── basic_fields.py │ └── base_fields.py ├── pipelines │ ├── __init__.py │ ├── ultranest_pipeline.py │ └── emcee_pipeline.py ├── tools │ ├── __init__.py │ ├── carrier_mapper.py │ ├── class_tools.py │ ├── timer.py │ ├── random_seed.py │ ├── randmodule.cc │ ├── config.py │ ├── masker.py │ ├── misc.py │ ├── io.py │ ├── parallel_ops.py │ └── covariance_estimator.py └── tests │ ├── test_observable.py │ ├── mocks_for_templates.py │ ├── test_dependencies.py │ └── test_likelihood.py ├── doc ├── source │ ├── tutorial_one.nblink │ ├── tutorial_wmap.nblink │ ├── logo.png │ ├── tutorial_fields.nblink │ ├── tutorial_masks.nblink │ ├── tutorial_priors.nblink │ ├── elephant.png │ ├── tutorial_datasets.nblink │ ├── tutorial_hammurabi.nblink │ ├── tutorial_simulator.nblink │ ├── img_data_demo.png │ ├── imagine_design.png │ ├── evidence.rst │ ├── posterior.rst │ ├── parallel.rst │ ├── imagine.rst │ ├── imagine.priors.rst │ ├── imagine.simulators.rst │ ├── imagine.observables.rst │ ├── imagine.likelihoods.rst │ ├── imagine.fields.hamx.rst │ ├── imagine.pipelines.rst │ ├── imagine.fields.rst │ ├── index.rst │ ├── imagine.tools.rst │ ├── design.rst │ ├── conf.py │ └── installation.rst ├── requirements_RTD.txt └── Makefile ├── tutorials ├── requirements_tutorials.txt └── imagine_design.png ├── runs ├── tutorial_example │ ├── pipeline.pkl │ ├── progress_report.pdf │ └── chains │ │ ├── multinest_post_equal_weights.dat │ │ ├── multinest_IS.points │ │ ├── multinest_IS.ptprob │ │ ├── multinest_.txt │ │ ├── multinest_IS.iterinfo │ │ ├── multinest_resume.dat │ │ ├── multinest_summary.txt │ │ └── multinest_stats.dat └── tutorial_example_completed │ ├── pipeline.pkl │ ├── progress_report.pdf │ └── chains │ ├── multinest_IS.points │ ├── multinest_IS.ptprob │ ├── multinest_IS.iterinfo │ ├── multinest_resume.dat │ ├── multinest_summary.txt │ └── multinest_stats.dat ├── requirements_dev.txt ├── requirements.txt ├── setup.cfg ├── imagine_conda_env.yml ├── .travis.yml ├── examples ├── example_pipeline.batch └── README.md ├── setup.py ├── conftest.py ├── .gitignore ├── .github └── workflows │ └── docker-publish.yml ├── README.md └── docker └── Dockerfile /imagine/templates/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/source/tutorial_one.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_one.ipynb" 3 | } -------------------------------------------------------------------------------- /tutorials/requirements_tutorials.txt: -------------------------------------------------------------------------------- 1 | -e .. 2 | astroquery>=0.4.0 3 | seaborn>=0.10.0 4 | -------------------------------------------------------------------------------- /doc/source/tutorial_wmap.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_wmap.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/doc/source/logo.png -------------------------------------------------------------------------------- /doc/source/tutorial_fields.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_fields.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/tutorial_masks.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_masks.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/tutorial_priors.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_priors.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/elephant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/doc/source/elephant.png -------------------------------------------------------------------------------- /doc/source/tutorial_datasets.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_datasets.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/tutorial_hammurabi.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_hammurabi.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/tutorial_simulator.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../../tutorials/tutorial_simulator.ipynb" 3 | } 4 | -------------------------------------------------------------------------------- /doc/source/img_data_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/doc/source/img_data_demo.png -------------------------------------------------------------------------------- /tutorials/imagine_design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/tutorials/imagine_design.png -------------------------------------------------------------------------------- /doc/source/imagine_design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/doc/source/imagine_design.png -------------------------------------------------------------------------------- /runs/tutorial_example/pipeline.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example/pipeline.pkl -------------------------------------------------------------------------------- /runs/tutorial_example/progress_report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example/progress_report.pdf -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_post_equal_weights.dat: -------------------------------------------------------------------------------- 1 | 0.298532654789288454E+01 0.265690569958124279E+02 -0.213014150480233525E+03 2 | -------------------------------------------------------------------------------- /runs/tutorial_example_completed/pipeline.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example_completed/pipeline.pkl -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_IS.points: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example/chains/multinest_IS.points -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_IS.ptprob: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example/chains/multinest_IS.ptprob -------------------------------------------------------------------------------- /doc/source/evidence.rst: -------------------------------------------------------------------------------- 1 | .. _evidence: 2 | 3 | ================ 4 | Model comparison 5 | ================ 6 | 7 | Using *Bayesian evidence* to compare models. 8 | -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_.txt: -------------------------------------------------------------------------------- 1 | 0.100000000000000000E+01 0.426028300960467050E+03 0.298532654789288454E+01 0.265690569958124279E+02 2 | -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_IS.iterinfo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example/chains/multinest_IS.iterinfo -------------------------------------------------------------------------------- /runs/tutorial_example_completed/progress_report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example_completed/progress_report.pdf -------------------------------------------------------------------------------- /runs/tutorial_example_completed/chains/multinest_IS.points: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example_completed/chains/multinest_IS.points -------------------------------------------------------------------------------- /runs/tutorial_example_completed/chains/multinest_IS.ptprob: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example_completed/chains/multinest_IS.ptprob -------------------------------------------------------------------------------- /runs/tutorial_example_completed/chains/multinest_IS.iterinfo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMAGINE-Consortium/imagine/HEAD/runs/tutorial_example_completed/chains/multinest_IS.iterinfo -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | check-manifest 3 | nbval>=0.9.6 4 | pytest>=4.6.0 5 | pytest-cov 6 | pytest-mpl 7 | readme_renderer[md] 8 | setuptools>=38.6.0 9 | twine>=1.13.0 10 | wheel>=0.31.0 11 | -------------------------------------------------------------------------------- /doc/requirements_RTD.txt: -------------------------------------------------------------------------------- 1 | astropy>=4.0.0 2 | e13tools>=0.9.0 3 | ipython 4 | nbsphinx 5 | nbsphinx-link 6 | numpy>=1.18.0 7 | scipy>=1.1.0 8 | sphinx_rtd_theme 9 | sphinx>=1.8 10 | Pygments==2.15.0 11 | pandas>=1.1.5 12 | -------------------------------------------------------------------------------- /doc/source/posterior.rst: -------------------------------------------------------------------------------- 1 | .. _posterior: 2 | 3 | ======================= 4 | Constraining parameters 5 | ======================= 6 | 7 | Computing *posterior distribution* given a model and a dataset. 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /imagine/__version__.py: -------------------------------------------------------------------------------- 1 | """ 2 | IMAGINE Version 3 | =============== 4 | Stores the different versions of the *IMAGINE* package. 5 | 6 | """ 7 | 8 | 9 | # %% VERSIONS 10 | # Default/Latest/Current version 11 | __version__ = '2.0.0a3' 12 | -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_resume.dat: -------------------------------------------------------------------------------- 1 | F 2 | 3400 4482 1 500 3 | -0.926014132734176674E+05 -0.925894006659859151E+05 4 | T 5 | 0 6 | F F 0 500 7 | 0.303361591401490579E-02 -0.926014132734176674E+05 -0.925894006659859151E+05 8 | -------------------------------------------------------------------------------- /runs/tutorial_example_completed/chains/multinest_resume.dat: -------------------------------------------------------------------------------- 1 | F 2 | 9676 11913 1 500 3 | 0.608864624995467011E+03 0.625158760088118584E+03 4 | T 5 | 0 6 | T F 0 500 7 | 0.107538715825105881E-07 0.608864624995467011E+03 0.625158760088118584E+03 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | astropy>=4.0.0 2 | corner>=2.0.0 3 | cloudpickle>=1.6.0 4 | cmasher>=1.5.0 5 | dynesty>=1.0.0 6 | e13tools>=0.9.3 7 | emcee>=3.0.2 8 | h5py>=2.10.0 9 | hampyx>=2.4.0 10 | healpy>=1.0.0 11 | hickle>=4.0.1 12 | mpi4py>=3.0.0 13 | numpy>=1.18.0 14 | pandas>=1.1.5 15 | pymultinest>=2.10.0 16 | scipy>=1.1.0 17 | schwimmbad>=0.3.1 18 | ultranest>=3.2.0 19 | -------------------------------------------------------------------------------- /imagine/priors/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import core modules 5 | from . import prior 6 | from .prior import * 7 | 8 | # Import base modules 9 | from . import basic_priors 10 | from .basic_priors import * 11 | 12 | # All declaration 13 | __all__ = ['basic_priors', 'prior'] 14 | __all__.extend(basic_priors.__all__) 15 | __all__.extend(prior.__all__) 16 | -------------------------------------------------------------------------------- /doc/source/parallel.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Parallelisation 3 | *************** 4 | 5 | The IMAGINE pipeline was designed with hybrid MPI/OpenMP use on a cluster in 6 | mind: the Pipeline distributes sampling work *accross different nodes* using 7 | MPI, while Fields and Simulators are assumed to use OpenMP (or similar shared 8 | memory multiprocessing) to run in parallel *within a single multi-core node*. 9 | -------------------------------------------------------------------------------- /doc/source/imagine.rst: -------------------------------------------------------------------------------- 1 | imagine package 2 | =============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | imagine.fields 11 | imagine.likelihoods 12 | imagine.observables 13 | imagine.pipelines 14 | imagine.priors 15 | imagine.simulators 16 | imagine.tools 17 | 18 | Module contents 19 | --------------- 20 | 21 | .. automodule:: imagine 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | -------------------------------------------------------------------------------- /imagine/observables/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import base modules 5 | from . import ( 6 | dataset, observable, observable_dict) 7 | from .dataset import * 8 | from .observable import * 9 | from .observable_dict import * 10 | 11 | # All declaration 12 | __all__ = ['dataset', 'observable', 'observable_dict'] 13 | __all__.extend(dataset.__all__) 14 | __all__.extend(observable.__all__) 15 | __all__.extend(observable_dict.__all__) 16 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 0 3 | 4 | [tool:pytest] 5 | addopts = -v --cov --nbval --cov-config=setup.cfg --cov-report=term-missing 6 | testpaths = 7 | ./imagine 8 | ./tutorials 9 | filterwarnings = 10 | ignore::DeprecationWarning 11 | ignore::FutureWarning 12 | markers = 13 | quick: avoid slow tests (e.g. tutorials) 14 | 15 | [coverage:run] 16 | include = imagine/* 17 | omit = 18 | imagine/tests/* 19 | imagine/*/tests/* 20 | imagine/__version__* 21 | -------------------------------------------------------------------------------- /imagine/simulators/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import core modules 5 | from . import simulator 6 | from .simulator import * 7 | 8 | # Import base modules 9 | from . import ( 10 | hammurabi, test_simulator) 11 | from .hammurabi import * 12 | from .test_simulator import * 13 | 14 | # All declaration 15 | __all__ = ['hammurabi', 'simulator', 'test_simulator'] 16 | __all__.extend(hammurabi.__all__) 17 | __all__.extend(simulator.__all__) 18 | __all__.extend(test_simulator.__all__) 19 | -------------------------------------------------------------------------------- /imagine/templates/dummy_field_template.py: -------------------------------------------------------------------------------- 1 | from imagine.fields import DummyField 2 | 3 | class DummyFieldTemplate(DummyField): 4 | """ 5 | Description of the dummy field 6 | """ 7 | 8 | # Class attributes 9 | NAME = 'name_of_the_dummy_field' 10 | 11 | @property 12 | def field_checklist(self): 13 | return {'Parameter_A': 'parameter_A_settings', 14 | 'Parameter_B': None} 15 | @property 16 | def simulator_controllist(self): 17 | return {'simulator_property_A': 'some_setting'} 18 | -------------------------------------------------------------------------------- /imagine_conda_env.yml: -------------------------------------------------------------------------------- 1 | name: imagine 2 | channels: 3 | - conda-forge 4 | - anaconda 5 | - defaults 6 | dependencies: 7 | - astropy=4.0.* 8 | - corner=2.0.* 9 | - cmasher=1.5.* 10 | - cloudpickle=1.6.* 11 | - cython=0.29.* 12 | - dynesty=1.0.* 13 | - dill=0.3.* 14 | - e13tools=0.9.* 15 | - emcee=3.0.* 16 | - healpy=1.* 17 | - h5py=2.10.*=mpi* 18 | - ipykernel 19 | - mpi4py=3.* 20 | - numpy=1.18.* 21 | - pandas=1.1.* 22 | - python=3.7.* 23 | - pymultinest=2.10 24 | - scipy=1.* 25 | - schwimmbad=0.3.* 26 | - ultranest=3.2.* 27 | -------------------------------------------------------------------------------- /imagine/likelihoods/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import core modules 5 | from . import likelihood 6 | from .likelihood import * 7 | 8 | # Import base modules 9 | from . import ( 10 | ensemble_likelihood, simple_likelihood) 11 | from .ensemble_likelihood import * 12 | from .simple_likelihood import * 13 | 14 | # All declaration 15 | __all__ = ['ensemble_likelihood', 'likelihood', 'simple_likelihood'] 16 | __all__.extend(ensemble_likelihood.__all__) 17 | __all__.extend(likelihood.__all__) 18 | __all__.extend(simple_likelihood.__all__) 19 | -------------------------------------------------------------------------------- /imagine/__init__.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Version import 3 | from .__version__ import __version__ 4 | 5 | # Global configuration and settings 6 | from .tools.config import rc 7 | # Global configuration and settings 8 | from .tools.io import load_pipeline, save_pipeline 9 | 10 | # Import subpackages 11 | from . import ( 12 | fields, likelihoods, observables, pipelines, priors, simulators, tools) 13 | 14 | # All declaration 15 | __all__ = ['fields', 'likelihoods', 'observables', 'pipelines', 'priors', 16 | 'simulators', 'rc', 'tools', 'load_pipeline', 'save_pipeline'] 17 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | services: 3 | - docker 4 | install: 5 | - export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH; else echo $TRAVIS_PULL_REQUEST_BRANCH; fi) 6 | - export REPO=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_REPO_SLUG; else echo $TRAVIS_PULL_REQUEST_SLUG; fi) 7 | - echo "TRAVIS_BRANCH=$TRAVIS_BRANCH, PR=$PR, BRANCH=$BRANCH", REPO=$REPO 8 | - docker build docker -t imagine --build-arg BRANCH=$BRANCH --build-arg REPO=$REPO 9 | script: 10 | - docker run -i -t imagine /bin/bash -c "source ~/.bashrc && cd /home/lab/imagine && pytest -m quick" 11 | -------------------------------------------------------------------------------- /imagine/templates/conftest.py: -------------------------------------------------------------------------------- 1 | # Settings for the test_templates test_templates 2 | # Ignore this file unless you know what you are doing! 3 | import sys 4 | import numpy as np 5 | import astropy.units as u 6 | from imagine.fields import DummyField 7 | import imagine.tests.mocks_for_templates as mock 8 | 9 | # Here we set up several mock modules which allow testing the template files 10 | sys.modules['MY_GMF_MODEL'] = mock.MY_GMF_MODEL 11 | sys.modules['MY_GALAXY_MODEL'] = mock.MY_GALAXY_MODEL 12 | sys.modules['MY_PACKAGE'] = mock.MY_PACKAGE 13 | sys.modules['MY_SIMULATOR'] = mock.MY_SIMULATOR 14 | sys.modules['MY_SAMPLER'] = mock.MY_SAMPLER 15 | -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_summary.txt: -------------------------------------------------------------------------------- 1 | 0.298532654789288454E+01 0.265690569958124279E+02 0.000000000000000000E+00 0.000000000000000000E+00 0.298532654789288454E+01 0.265690569958124279E+02 0.298532654789288454E+01 0.265690569958124279E+02 -0.626563604722378659E+06 -0.213014150480233525E+03 -0.223251984738017626E+03 0.999846047262577864E+00 2 | 0.298532654789288454E+01 0.265690569958124279E+02 0.000000000000000000E+00 0.000000000000000000E+00 0.298532654789288454E+01 0.265690569958124279E+02 0.298532654789288454E+01 0.265690569958124279E+02 -0.223226758578655705E+03 -0.213014150480233525E+03 3 | -------------------------------------------------------------------------------- /doc/source/imagine.priors.rst: -------------------------------------------------------------------------------- 1 | imagine.priors package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.priors.basic\_priors module 8 | ----------------------------------- 9 | 10 | .. automodule:: imagine.priors.basic_priors 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.priors.prior module 16 | --------------------------- 17 | 18 | .. automodule:: imagine.priors.prior 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: imagine.priors 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /runs/tutorial_example_completed/chains/multinest_summary.txt: -------------------------------------------------------------------------------- 1 | 0.300065502999376710E+01 0.270209077799705959E+02 0.386763843296461616E-03 0.140526462031145237E-01 0.300065504306927577E+01 0.270205504578084117E+02 0.300010754381571498E+01 0.270022788043516968E+02 0.608864624995467011E+03 0.626516167100743701E+03 0.609268613169495666E+03 0.212684336428523683E-01 2 | 0.300065502999094402E+01 0.270209077799450910E+02 0.386774792548402985E-03 0.140526707685789936E-01 0.300065504306927577E+01 0.270205504578084117E+02 0.300010754381571498E+01 0.270022788043516968E+02 0.609216100994649651E+03 0.626516167100743701E+03 3 | -------------------------------------------------------------------------------- /runs/tutorial_example/chains/multinest_stats.dat: -------------------------------------------------------------------------------- 1 | Nested Sampling Global Log-Evidence : -0.223226758578655705E+03 +/- 0.142916815654577062E+00 2 | Nested Importance Sampling Global Log-Evidence: -0.223251984738017626E+03 +/- 0.999846047262577864E+00 3 | 4 | Dim No. Mean Sigma 5 | 1 0.298532654789288454E+01 0.000000000000000000E+00 6 | 2 0.265690569958124279E+02 0.000000000000000000E+00 7 | 8 | Maximum Likelihood Parameters 9 | Dim No. Parameter 10 | 1 0.298532654789288454E+01 11 | 2 0.265690569958124279E+02 12 | 13 | MAP Parameters 14 | Dim No. Parameter 15 | 1 0.298532654789288454E+01 16 | 2 0.265690569958124279E+02 17 | -------------------------------------------------------------------------------- /runs/tutorial_example_completed/chains/multinest_stats.dat: -------------------------------------------------------------------------------- 1 | Nested Sampling Global Log-Evidence : 0.609216100994649651E+03 +/- 0.180536176156572331E+00 2 | Nested Importance Sampling Global Log-Evidence: 0.609268613169495666E+03 +/- 0.212684336428523683E-01 3 | 4 | Dim No. Mean Sigma 5 | 1 0.300065502999094402E+01 0.386774792548402985E-03 6 | 2 0.270209077799450910E+02 0.140526707685789936E-01 7 | 8 | Maximum Likelihood Parameters 9 | Dim No. Parameter 10 | 1 0.300065504306927577E+01 11 | 2 0.270205504578084117E+02 12 | 13 | MAP Parameters 14 | Dim No. Parameter 15 | 1 0.300010754381571498E+01 16 | 2 0.270022788043516968E+02 17 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /imagine/fields/hamx/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import base modules 5 | from . import ( 6 | breg_lsa, breg_jf12, brnd_es, brnd_jf12, cre_analytic, tereg_ymw16) 7 | from .breg_lsa import * 8 | from .breg_jf12 import * 9 | from .brnd_es import * 10 | from .brnd_jf12 import * 11 | from .cre_analytic import * 12 | from .tereg_ymw16 import * 13 | 14 | # All declaration 15 | __all__ = ['breg_lsa', 'breg_jf12', 'brnd_es', 'brnd_jf12','cre_analytic', 'tereg_ymw16'] 16 | __all__.extend(breg_lsa.__all__) 17 | __all__.extend(breg_jf12.__all__) 18 | __all__.extend(brnd_es.__all__) 19 | __all__.extend(brnd_jf12.__all__) 20 | __all__.extend(cre_analytic.__all__) 21 | __all__.extend(tereg_ymw16.__all__) 22 | -------------------------------------------------------------------------------- /imagine/pipelines/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import core modules 5 | from . import pipeline 6 | from .pipeline import * 7 | 8 | # Import base modules 9 | from . import ( 10 | dynesty_pipeline, emcee_pipeline, multinest_pipeline, ultranest_pipeline) 11 | from .dynesty_pipeline import * 12 | from .multinest_pipeline import * 13 | from .ultranest_pipeline import * 14 | from .emcee_pipeline import * 15 | 16 | # All declaration 17 | __all__ = ['dynesty_pipeline', 'multinest_pipeline', 'pipeline', 18 | 'emcee_pipeline', 'ultranest_pipeline'] 19 | __all__.extend(dynesty_pipeline.__all__) 20 | __all__.extend(multinest_pipeline.__all__) 21 | __all__.extend(pipeline.__all__) 22 | __all__.extend(ultranest_pipeline.__all__) 23 | -------------------------------------------------------------------------------- /imagine/fields/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import core modules 5 | from . import field, field_factory 6 | from .field import * 7 | from .field_factory import * 8 | 9 | # Import base modules 10 | from . import ( 11 | base_fields, basic_fields, grid, test_field) 12 | from .base_fields import * 13 | from .basic_fields import * 14 | from .grid import * 15 | from .test_field import * 16 | 17 | # Import subpackages 18 | from . import hamx 19 | 20 | # All declaration 21 | __all__ = ['base_fields', 'basic_fields', 'field', 'field_factory', 'grid', 22 | 'hamx', 'test_field'] 23 | __all__.extend(base_fields.__all__) 24 | __all__.extend(basic_fields.__all__) 25 | __all__.extend(field.__all__) 26 | __all__.extend(field_factory.__all__) 27 | __all__.extend(grid.__all__) 28 | __all__.extend(test_field.__all__) 29 | -------------------------------------------------------------------------------- /examples/example_pipeline.batch: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | #SBATCH --ntasks=10 3 | #SBATCH --cpus-per-task=16 4 | #SBATCH --job-name=IMAGINE 5 | 6 | # This is an example SLURM batch script. Please adjust to your needs. 7 | # We request 10 nodes, which are used by MultiNest to explore the likelihood 8 | # space in parallel 9 | # We request 16 CPUs per node, which allows Hammurabi to use OpenMP to 10 | # speed up the likelihood calculations 11 | 12 | date 13 | echo 14 | echo 'Python used:' 15 | echo `which python` 16 | echo "nodes used: $SLURM_NTASKS" 17 | echo "cores per node used: $SLURM_CPUS_PER_TASK" 18 | echo 19 | 20 | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK 21 | 22 | # Prepares the pipeline (if this was not done before) 23 | python examples/example_pipeline.py prepare 24 | # Launches IMAGINE 25 | mpirun -np $SLURM_NTASKS python examples/example_pipeline.py run 26 | 27 | echo 28 | date 29 | -------------------------------------------------------------------------------- /imagine/templates/field_factory_template.py: -------------------------------------------------------------------------------- 1 | from imagine.fields import FieldFactory 2 | from imagine.priors import FlatPrior, GaussianPrior 3 | # Substitute this by your own code 4 | from MY_PACKAGE import MY_FIELD_CLASS 5 | from MY_PACKAGE import A_std_val, B_std_val, A_min, A_max, B_min, B_max, B_sig 6 | 7 | class FieldFactoryTemplate(FieldFactory): 8 | """Example: field factory for YourFieldClass""" 9 | 10 | # Class attributes 11 | # Field class this factory uses 12 | FIELD_CLASS = MY_FIELD_CLASS 13 | 14 | # Default values are used for inactive parameters 15 | DEFAULT_PARAMETERS = {'Parameter_A': A_std_val, 16 | 'Parameter_B': B_std_val} 17 | 18 | # All parameters need a range and a prior 19 | PRIORS = {'Parameter_A': FlatPrior(xmin=A_min, xmax=A_max), 20 | 'Parameter_B': GaussianPrior(mu=B_std_val, sigma=B_sig)} 21 | -------------------------------------------------------------------------------- /doc/source/imagine.simulators.rst: -------------------------------------------------------------------------------- 1 | imagine.simulators package 2 | ========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.simulators.hammurabi module 8 | ----------------------------------- 9 | 10 | .. automodule:: imagine.simulators.hammurabi 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.simulators.simulator module 16 | ----------------------------------- 17 | 18 | .. automodule:: imagine.simulators.simulator 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.simulators.test\_simulator module 24 | ----------------------------------------- 25 | 26 | .. automodule:: imagine.simulators.test_simulator 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: imagine.simulators 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /doc/source/imagine.observables.rst: -------------------------------------------------------------------------------- 1 | imagine.observables package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.observables.dataset module 8 | ---------------------------------- 9 | 10 | .. automodule:: imagine.observables.dataset 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.observables.observable module 16 | ------------------------------------- 17 | 18 | .. automodule:: imagine.observables.observable 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.observables.observable\_dict module 24 | ------------------------------------------- 25 | 26 | .. automodule:: imagine.observables.observable_dict 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: imagine.observables 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /doc/source/imagine.likelihoods.rst: -------------------------------------------------------------------------------- 1 | imagine.likelihoods package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.likelihoods.ensemble\_likelihood module 8 | ----------------------------------------------- 9 | 10 | .. automodule:: imagine.likelihoods.ensemble_likelihood 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.likelihoods.likelihood module 16 | ------------------------------------- 17 | 18 | .. automodule:: imagine.likelihoods.likelihood 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.likelihoods.simple\_likelihood module 24 | --------------------------------------------- 25 | 26 | .. automodule:: imagine.likelihoods.simple_likelihood 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | Module contents 32 | --------------- 33 | 34 | .. automodule:: imagine.likelihoods 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | -------------------------------------------------------------------------------- /imagine/fields/hamx/tereg_ymw16.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # IMAGINE imports 3 | from imagine.fields import DummyField, FieldFactory 4 | 5 | # All declaration 6 | __all__ = ['TEregYMW16', 'TEregYMW16Factory'] 7 | 8 | 9 | # %% CLASS DEFINITIONS 10 | class TEregYMW16(DummyField): 11 | """ 12 | This dummy field instructs the :py:class:`Hammurabi ` 13 | simulator class to use the HammurabiX's thermal electron density model YMW16 14 | """ 15 | # Class attributes 16 | NAME = 'tereg_ymw16' 17 | FIELD_CHECKLIST = {} 18 | SIMULATOR_CONTROLLIST = {'cue': (['thermalelectron', 'regular'], 19 | {'cue': '1'}), 20 | 'type': (['thermalelectron', 'regular'], 21 | {'type': 'ymw16'})} 22 | 23 | 24 | class TEregYMW16Factory(FieldFactory): 25 | """ 26 | Field factory that produces the dummy field :py:class:`TEregYMW16` 27 | (see its docs for details). 28 | """ 29 | # Class attributes 30 | FIELD_CLASS = TEregYMW16 31 | DEFAULT_PARAMETERS = {} 32 | PRIORS = {} 33 | -------------------------------------------------------------------------------- /doc/source/imagine.fields.hamx.rst: -------------------------------------------------------------------------------- 1 | imagine.fields.hamx package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.fields.hamx.breg\_lsa module 8 | ------------------------------------ 9 | 10 | .. automodule:: imagine.fields.hamx.breg_lsa 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.fields.hamx.brnd\_es module 16 | ----------------------------------- 17 | 18 | .. automodule:: imagine.fields.hamx.brnd_es 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.fields.hamx.cre\_analytic module 24 | ---------------------------------------- 25 | 26 | .. automodule:: imagine.fields.hamx.cre_analytic 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | imagine.fields.hamx.tereg\_ymw16 module 32 | --------------------------------------- 33 | 34 | .. automodule:: imagine.fields.hamx.tereg_ymw16 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | Module contents 40 | --------------- 41 | 42 | .. automodule:: imagine.fields.hamx 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | -------------------------------------------------------------------------------- /imagine/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Import core modules 5 | from . import config 6 | from .config import * 7 | 8 | # Import base modules 9 | from . import ( 10 | carrier_mapper, class_tools, covariance_estimator, io, masker, 11 | misc, mpi_helper, parallel_ops, random_seed, timer, visualization) 12 | from .carrier_mapper import * 13 | from .class_tools import * 14 | from .covariance_estimator import * 15 | from .io import * 16 | from .masker import * 17 | from .misc import * 18 | from .mpi_helper import * 19 | from .parallel_ops import * 20 | from .random_seed import * 21 | from .timer import * 22 | from .visualization import * 23 | 24 | # All declaration 25 | __all__ = ['carrier_mapper', 'class_tools', 'config', 'covariance_estimator', 26 | 'io', 'masker', 'misc', 'mpi_helper', 'parallel_ops', 27 | 'random_seed', 'timer'] 28 | __all__.extend(carrier_mapper.__all__) 29 | __all__.extend(class_tools.__all__) 30 | __all__.extend(config.__all__) 31 | __all__.extend(covariance_estimator.__all__) 32 | __all__.extend(io.__all__) 33 | __all__.extend(masker.__all__) 34 | __all__.extend(mpi_helper.__all__) 35 | __all__.extend(parallel_ops.__all__) 36 | __all__.extend(random_seed.__all__) 37 | __all__.extend(timer.__all__) 38 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Example IMAGINE scripts 2 | 3 | This directory contains two example IMAGINE scripts 4 | which complement the tutorials. 5 | 6 | * `basic_pipeline.py` 7 | - Contains the same setup as the ["Basic elements..." tutorial][tutorial_one], 8 | but runs using both `MultiNestPipeline` and `UltraNestPipeline`. 9 | Both these pipelines support MPI, which allows to speed up the computation. 10 | * `example_pipeline.py` 11 | - Contains a setup similar to ["Example pipeline" tutorial][tutorial_wmap], 12 | but also including a stochastic field component and varying one parameter 13 | associated with it. The script is meant to be run in two steps: 14 | executing it first to prepare (and test) the pipeline, and 15 | using it again to run the pipeline. 16 | * `example_pipeline.batch` 17 | - Contains an example of [SLURM][slurm_wiki] batch script which could be 18 | used to start IMAGINE runs of `example_pipeline.py` in a 19 | typical HPC environment. 20 | 21 | 22 | [tutorial_wmap]: https://github.com/IMAGINE-Consortium/imagine/blob/master/tutorials/tutorial_wmap.ipynb 23 | [tutorial_one]: https://github.com/IMAGINE-Consortium/imagine/blob/master/tutorials/tutorial_one.ipynb 24 | [slurm_wiki]: https://en.wikipedia.org/wiki/Slurm_Workload_Manager 25 | -------------------------------------------------------------------------------- /doc/source/imagine.pipelines.rst: -------------------------------------------------------------------------------- 1 | imagine.pipelines package 2 | ========================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.pipelines.dynesty\_pipeline module 8 | ------------------------------------------ 9 | 10 | .. automodule:: imagine.pipelines.dynesty_pipeline 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.pipelines.emcee\_pipeline module 16 | ---------------------------------------- 17 | 18 | .. automodule:: imagine.pipelines.emcee_pipeline 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.pipelines.multinest\_pipeline module 24 | -------------------------------------------- 25 | 26 | .. automodule:: imagine.pipelines.multinest_pipeline 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | imagine.pipelines.pipeline module 32 | --------------------------------- 33 | 34 | .. automodule:: imagine.pipelines.pipeline 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | imagine.pipelines.ultranest\_pipeline module 40 | -------------------------------------------- 41 | 42 | .. automodule:: imagine.pipelines.ultranest_pipeline 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | Module contents 48 | --------------- 49 | 50 | .. automodule:: imagine.pipelines 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | -------------------------------------------------------------------------------- /imagine/tools/carrier_mapper.py: -------------------------------------------------------------------------------- 1 | """ 2 | The mapper module is designed for implementing distribution mapping functions. 3 | """ 4 | 5 | # %% IMPORTS 6 | # Built-in imports 7 | import logging as log 8 | 9 | # Package imports 10 | import numpy as np 11 | 12 | # All declaration 13 | __all__ = ['exp_mapper', 'unity_mapper'] 14 | 15 | 16 | # %% FUNCTION DEFINITIONS 17 | def unity_mapper(x, a=0., b=1.): 18 | """ 19 | Maps x from [0, 1] into the interval [a, b]. 20 | 21 | Parameters 22 | ---------- 23 | x : float 24 | The variable to be mapped. 25 | a : float 26 | The lower parameter value limit. 27 | b : float 28 | The upper parameter value limit. 29 | 30 | Returns 31 | ------- 32 | numpy.float64 33 | The mapped parameter value. 34 | """ 35 | log.debug('@ carrier_mapper::unity_mapper') 36 | return np.float64(x) * (np.float64(b)-np.float64(a)) + np.float64(a) 37 | 38 | 39 | def exp_mapper(x, a=0, b=1): 40 | """ 41 | Maps x from [0, 1] into the interval [exp(a), exp(b)]. 42 | 43 | Parameters 44 | ---------- 45 | x : float 46 | The variable to be mapped. 47 | a : float 48 | The lower parameter value limit. 49 | b : float 50 | The upper parameter value limit. 51 | 52 | Returns 53 | ------- 54 | numpy.float64 55 | The mapped parameter value. 56 | """ 57 | log.debug('@ carrier_mapper::exp_mapper') 58 | return np.exp(unity_mapper(x, a, b)) 59 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Built-in imports 2 | from codecs import open 3 | import re 4 | 5 | # Package imports 6 | from setuptools import find_packages, setup 7 | 8 | # Get the requirements list 9 | with open('requirements.txt', 'r') as f: 10 | requirements = f.read().splitlines() 11 | 12 | # Read the __version__.py file 13 | with open('imagine/__version__.py', 'r') as f: 14 | vf = f.read() 15 | 16 | # Obtain version from read-in __version__.py file 17 | version = re.search(r"^_*version_* = ['\"]([^'\"]*)['\"]", vf, re.M).group(1) 18 | 19 | setup(name="imagine", 20 | version=version, 21 | description="Interstellar MAGnetic field INference Engine", 22 | license="GPLv3", 23 | url="https://github.com/IMAGINE-Consortium/imagine", 24 | author="IMAGINE Consortium", 25 | author_email="jiaxin.wang@sjtu.edu.cn, luizfelippesr@gmail.com", 26 | maintainer="Jiaxin Wang, Luiz Felippe S. Rodrigues", 27 | maintainer_email="jiaxin.wang@sjtu.edu.cn, luizfelippesr@gmail.com", 28 | packages=find_packages(), 29 | include_package_data=True, 30 | platforms="UNIX", 31 | python_requires='>=3.5', 32 | install_requires=requirements, 33 | zip_safe=False, 34 | classifiers=["Development Status :: 3 - Alpha", 35 | "Intended Audience :: Science/Research", 36 | "Topic :: Utilities", 37 | "License :: OSI Approved :: GNU General Public License v3 " 38 | "or later (GPLv3+)"],) 39 | -------------------------------------------------------------------------------- /doc/source/imagine.fields.rst: -------------------------------------------------------------------------------- 1 | imagine.fields package 2 | ====================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | :maxdepth: 4 9 | 10 | imagine.fields.hamx 11 | 12 | Submodules 13 | ---------- 14 | 15 | imagine.fields.base\_fields module 16 | ---------------------------------- 17 | 18 | .. automodule:: imagine.fields.base_fields 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.fields.basic\_fields module 24 | ----------------------------------- 25 | 26 | .. automodule:: imagine.fields.basic_fields 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | imagine.fields.field module 32 | --------------------------- 33 | 34 | .. automodule:: imagine.fields.field 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | imagine.fields.field\_factory module 40 | ------------------------------------ 41 | 42 | .. automodule:: imagine.fields.field_factory 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | imagine.fields.grid module 48 | -------------------------- 49 | 50 | .. automodule:: imagine.fields.grid 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | imagine.fields.test\_field module 56 | --------------------------------- 57 | 58 | .. automodule:: imagine.fields.test_field 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | Module contents 64 | --------------- 65 | 66 | .. automodule:: imagine.fields 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | -------------------------------------------------------------------------------- /imagine/tools/class_tools.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | # %% IMPORTS 5 | # Built-in imports 6 | from inspect import currentframe 7 | 8 | # All declaration 9 | __all__ = ['BaseClass', 'req_attr'] 10 | 11 | 12 | # %% CLASS DEFINITIONS 13 | # Define a base class that automatically checks for missing attributes 14 | class BaseClass(object): 15 | # Class attributes 16 | REQ_ATTRS = [] 17 | 18 | def __init__(self): 19 | # Check if all required class attributes are defined 20 | self._check_class_attrs() 21 | 22 | # This function checks if all required class attributes are available 23 | def _check_class_attrs(self): 24 | # Loop over all required attributes in REQ_ATTRS and check if it exists 25 | for attr in self.REQ_ATTRS: 26 | if not hasattr(self, attr): 27 | # Raise error if attribute is not found 28 | raise AttributeError("Required class attribute %r is not " 29 | "defined!" % (attr)) 30 | 31 | 32 | # %% FUNCTION DEFINITIONS 33 | def req_attr(meth): 34 | # Obtain the REQ_ATTRS attribute of the class of given 'meth' 35 | frame = currentframe().f_back 36 | req_attrs = frame.f_locals.get('REQ_ATTRS') 37 | 38 | # If req_attrs is None, add it to the class first 39 | if req_attrs is None: 40 | req_attrs = [] 41 | frame.f_locals['REQ_ATTRS'] = req_attrs 42 | 43 | # Add capitalized version of method name to req_attrs 44 | req_attrs.append(meth.__name__.upper()) 45 | 46 | # Return method 47 | return(meth) 48 | -------------------------------------------------------------------------------- /imagine/tools/timer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Timer class is designed for time recording. 3 | """ 4 | 5 | # %% IMPORTS 6 | # Built-in imports 7 | import time 8 | 9 | # All declaration 10 | __all__ = ['Timer'] 11 | 12 | 13 | # %% CLASS DEFINITIONS 14 | class Timer(object): 15 | """ 16 | Class designed for time recording. 17 | 18 | Simply provide an event name to the `tick` method to start recording. 19 | The `tock` method stops the recording and the `record` property allow 20 | one to access the recorded time. 21 | """ 22 | def __init__(self): 23 | self._record = dict() 24 | 25 | @property 26 | def record(self): 27 | """ 28 | Dictionary of recorded times using event name as keys. 29 | """ 30 | return self._record 31 | 32 | @record.setter 33 | def record(self, record): 34 | raise NotImplementedError 35 | 36 | def tick(self, event): 37 | """ 38 | Starts timing with a given event name. 39 | 40 | Parameters 41 | ---------- 42 | event : str 43 | Event name (will be key of the record attribute). 44 | """ 45 | self._record[event] = time.perf_counter() 46 | 47 | def tock(self, event): 48 | """ 49 | Stops timing of the given event. 50 | 51 | Parameters 52 | ---------- 53 | event : str 54 | Event name (will be key of the record attribute). 55 | """ 56 | assert (event in self._record.keys()) 57 | self._record[event] = time.perf_counter() - self._record[event] 58 | return self._record[event] 59 | -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # %% IMPORTS 4 | # Package imports 5 | import matplotlib as mpl 6 | from py.path import local 7 | import _pytest 8 | import pytest 9 | 10 | 11 | # Set MPL backend 12 | mpl.use('Agg') 13 | 14 | 15 | # %% PYTEST CUSTOM CONFIGURATION PLUGINS 16 | # This makes the pytest report header mention the tested IMAGINE version 17 | def pytest_report_header(config): 18 | from imagine.__version__ import __version__ 19 | return("IMAGINE: %s" % (__version__)) 20 | 21 | 22 | # Add the incremental marker 23 | def pytest_configure(config): 24 | config.addinivalue_line("markers", 25 | "incremental: Mark test suite to xfail all " 26 | "remaining tests when one fails.") 27 | 28 | 29 | # This introduces a marker that auto-fails tests if a previous one failed 30 | def pytest_runtest_makereport(item, call): 31 | if "incremental" in item.keywords: 32 | if(call.excinfo is not None and 33 | call.excinfo.type is not _pytest.outcomes.Skipped): 34 | parent = item.parent 35 | parent._previousfailed = item 36 | 37 | 38 | # This makes every marked test auto-fail if a previous one failed as well 39 | def pytest_runtest_setup(item): 40 | if "incremental" in item.keywords: 41 | previousfailed = getattr(item.parent, "_previousfailed", None) 42 | if previousfailed is not None: 43 | pytest.xfail("Previous test failed (%s)" % (previousfailed.name)) 44 | 45 | 46 | # %% PYTEST SETTINGS 47 | # Set the current working directory to the temporary directory 48 | local.get_temproot().chdir() 49 | -------------------------------------------------------------------------------- /imagine/tools/random_seed.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a time-thread dependent seed value. 3 | 4 | For the testing suites, please turn to "imagine/tests/tools_tests.py". 5 | """ 6 | 7 | # %% IMPORTS 8 | # Built-in imports 9 | import logging as log 10 | import threading 11 | import time 12 | 13 | # Package imports 14 | import numpy as np 15 | 16 | # All declaration 17 | __all__ = ['ensemble_seed_generator', 'seed_generator'] 18 | 19 | 20 | # %% FUNCTION DEFINITIONS 21 | def seed_generator(trigger): 22 | """ 23 | Sets trigger as 0 will generate time-thread dependent method 24 | otherwise returns the trigger as seed. 25 | 26 | Parameters 27 | ---------- 28 | trigger : int 29 | Non-negative pre-fixed seed. 30 | 31 | Returns 32 | ------- 33 | seed : int 34 | A random seed value. 35 | """ 36 | log.debug('@ random_seed::seed_generator') 37 | if trigger > 0: 38 | return int(trigger) 39 | elif trigger == 0: 40 | return round(time.time()*1E+9) % int(1E+8) + threading.get_ident() % int(1E+8) 41 | else: 42 | raise ValueError('unsupported random seed value') 43 | 44 | def ensemble_seed_generator(size): 45 | """ 46 | Generates fixed random seed values for each realization in ensemble. 47 | 48 | Parameters 49 | ---------- 50 | size : int 51 | Number of realizations in ensemble. 52 | 53 | Returns 54 | ------- 55 | seeds : numpy.ndarray 56 | An array of random seeds. 57 | """ 58 | log.debug('@ random_seed::ensemble_seed_generator') 59 | # the uint32 is defined by the random generator's capacity 60 | return np.random.randint(low=1, high=np.uint32(-1)//3, size=np.uint64(size)) 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | docs/build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # dotenv 85 | .env 86 | 87 | # virtualenv 88 | .venv 89 | venv/ 90 | ENV/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | .spyproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # mkdocs documentation 100 | /site 101 | 102 | # mypy 103 | .mypy_cache/ 104 | -------------------------------------------------------------------------------- /imagine/fields/hamx/breg_lsa.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # IMAGINE imports 3 | from imagine.fields import DummyField, FieldFactory 4 | from imagine.priors import FlatPrior 5 | 6 | # All declaration 7 | __all__ = ['BregLSA', 'BregLSAFactory'] 8 | 9 | 10 | # %% CLASS DEFINITIONS 11 | class BregLSA(DummyField): 12 | """ 13 | This dummy field instructs the :py:class:`Hammurabi ` 14 | simulator class to use the HammurabiX's builtin regular magnetic field 15 | WMAP-3yr LSA. 16 | """ 17 | 18 | # Class attributes 19 | NAME = 'breg_lsa' 20 | FIELD_CHECKLIST = {'b0': (['magneticfield', 'regular', 'lsa', 'b0'], 21 | 'value'), 22 | 'psi0': (['magneticfield', 'regular', 'lsa', 'psi0'], 23 | 'value'), 24 | 'psi1': (['magneticfield', 'regular', 'lsa', 'psi1'], 25 | 'value'), 26 | 'chi0': (['magneticfield', 'regular', 'lsa', 'chi0'], 27 | 'value')} 28 | SIMULATOR_CONTROLLIST = {'cue': (['magneticfield', 'regular'], 29 | {'cue': '1'}), 30 | 'type': (['magneticfield', 'regular'], 31 | {'type': 'lsa'})} 32 | 33 | 34 | class BregLSAFactory(FieldFactory): 35 | """ 36 | Field factory that produces the dummy field :py:class:`BregLSA` 37 | (see its docs for details). 38 | """ 39 | 40 | # Class attributes 41 | FIELD_CLASS = BregLSA 42 | DEFAULT_PARAMETERS = {'b0': 6.0, 43 | 'psi0': 27.0, 44 | 'psi1': 0.9, 45 | 'chi0': 25.0} 46 | PRIORS = {'b0': FlatPrior(xmin=0., xmax=10.), 47 | 'psi0': FlatPrior(xmin=0., xmax=50.), 48 | 'psi1': FlatPrior(xmin=0., xmax=5.), 49 | 'chi0': FlatPrior(xmin=-25., xmax=50.)} 50 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | push: 5 | # Publish `master` as Docker `latest` image. 6 | branches: 7 | - master 8 | 9 | # Publish `v1.2.3` tags as releases. 10 | tags: 11 | - v* 12 | 13 | env: 14 | IMAGE_NAME: imagine 15 | 16 | jobs: 17 | # Push image to GitHub Packages. 18 | # See also https://docs.docker.com/docker-hub/builds/ 19 | push: 20 | runs-on: ubuntu-latest 21 | if: github.event_name == 'push' 22 | 23 | steps: 24 | - uses: actions/checkout@v2 25 | 26 | - name: Build image 27 | run: | 28 | # Strip git ref prefix from version 29 | BRANCH=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 30 | docker build docker --tag $IMAGE_NAME --build-arg BRANCH=$BRANCH --build-arg REPO=${{ github.repository }} 31 | 32 | - name: Test image 33 | run: docker run -t $IMAGE_NAME /bin/bash -c "source ~/.bashrc && cd /home/lab/imagine && pytest" 34 | 35 | - name: Log into registry 36 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin 37 | 38 | - name: Push image 39 | run: | 40 | IMAGE_ID=docker.pkg.github.com/${{ github.repository }}/$IMAGE_NAME 41 | 42 | # Change all uppercase to lowercase 43 | IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') 44 | 45 | # Strip git ref prefix from version 46 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 47 | 48 | # Strip "v" prefix from tag name 49 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') 50 | 51 | # Use Docker `latest` tag convention 52 | [ "$VERSION" == "master" ] && VERSION=latest 53 | 54 | echo IMAGE_ID=$IMAGE_ID 55 | echo VERSION=$VERSION 56 | 57 | docker tag $IMAGE_NAME $IMAGE_ID:$VERSION 58 | docker push $IMAGE_ID:$VERSION 59 | -------------------------------------------------------------------------------- /imagine/fields/hamx/cre_analytic.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # IMAGINE imports 3 | from imagine.fields import DummyField, FieldFactory 4 | from imagine.priors import FlatPrior 5 | 6 | # All declaration 7 | __all__ = ['CREAna', 'CREAnaFactory'] 8 | 9 | 10 | # %% CLASS DEFINITIONS 11 | class CREAna(DummyField): 12 | """ 13 | This dummy field instructs the :py:class:`Hammurabi ` 14 | simulator class to use the HammurabiX's builtin analytic cosmic ray electron 15 | distribution 16 | """ 17 | NAME = 'cre_ana' 18 | 19 | FIELD_CHECKLIST = {'alpha': (['cre', 'analytic', 'alpha'], 'value'), 20 | 'beta': (['cre', 'analytic', 'beta'], 'value'), 21 | 'theta': (['cre', 'analytic', 'theta'], 'value'), 22 | 'r0': (['cre', 'analytic', 'r0'], 'value'), 23 | 'z0': (['cre', 'analytic', 'z0'], 'value'), 24 | 'E0': (['cre', 'analytic', 'E0'], 'value'), 25 | 'j0': (['cre', 'analytic', 'j0'], 'value')} 26 | SIMULATOR_CONTROLLIST = {'cue': (['cre'], {'cue': '1'}), 27 | 'type': (['cre'], {'type': 'analytic'})} 28 | 29 | 30 | class CREAnaFactory(FieldFactory): 31 | """ 32 | Field factory that produces the dummy field :py:class:`CREAna` 33 | (see its docs for details). 34 | """ 35 | 36 | # Class attributes 37 | FIELD_CLASS = CREAna 38 | DEFAULT_PARAMETERS = {'alpha': 3, 39 | 'beta': 0, 40 | 'theta': 0, 41 | 'r0': 5, 42 | 'z0': 1, 43 | 'E0': 20.6, 44 | 'j0': 0.0217} 45 | PRIORS = {'alpha': FlatPrior(xmin=2, xmax=4), 46 | 'beta': FlatPrior(xmin=-1, xmax=1), 47 | 'theta': FlatPrior(xmin=-1, xmax=1), 48 | 'r0': FlatPrior(xmin=0.1, xmax=10), 49 | 'z0': FlatPrior(xmin=0.1, xmax=3), 50 | 'E0': FlatPrior(xmin=10, xmax=30), 51 | 'j0': FlatPrior(xmin=0, xmax=0.1)} 52 | -------------------------------------------------------------------------------- /imagine/templates/thermal_electrons_template.py: -------------------------------------------------------------------------------- 1 | from imagine.fields import ThermalElectronDensityField 2 | import numpy as np 3 | import MY_GALAXY_MODEL # Substitute this by your own code 4 | 5 | 6 | class ThermalElectronsDensityTemplate(ThermalElectronDensityField): 7 | """ Here comes the description of the electron density model """ 8 | 9 | # Class attributes 10 | NAME = 'name_of_the_thermal_electrons_field' 11 | 12 | # Is this field stochastic or not. Only necessary if True 13 | STOCHASTIC_FIELD = True 14 | # If there are any dependencies, they should be included in this list 15 | DEPENDENCIES_LIST = [] 16 | # List of all parameters for the field 17 | PARAMETER_NAMES = ['Parameter_A', 'Parameter_B'] 18 | 19 | def compute_field(self, seed): 20 | # If this is an stochastic field, the integer `seed `must be 21 | # used to set the random seed for a single realisation. 22 | # Otherwise, `seed` should be ignored. 23 | 24 | # The coordinates can be accessed from an internal grid object 25 | x_coord = self.grid.x 26 | y_coord = self.grid.y 27 | z_coord = self.grid.y 28 | # Alternatively, one can use cylindrical or spherical coordinates 29 | r_cyl_coord = self.grid.r_cylindrical 30 | r_sph_coord = self.grid.r_spherical 31 | theta_coord = self.grid.theta 32 | phi_coord = self.grid.phi 33 | 34 | # One can access the parameters supplied in the following way 35 | param_A = self.parameters['Parameter_A'] 36 | param_B = self.parameters['Parameter_B'] 37 | 38 | # Now you can interface with previous code or implement here 39 | # your own model for the thermal electrons distribution. 40 | # Returns the electron number density at each grid point 41 | # in units of (or convertible to) cm**-3 42 | return MY_GALAXY_MODEL.compute_ne(param_A, param_B, 43 | r_sph_coord, theta_coord, phi_coord, 44 | # If the field is stochastic 45 | # it can use the seed 46 | # to generate a realisation 47 | seed) 48 | -------------------------------------------------------------------------------- /imagine/simulators/test_simulator.py: -------------------------------------------------------------------------------- 1 | """ 2 | For testing purposes only 3 | """ 4 | 5 | 6 | # %% IMPORTS 7 | # Package imports 8 | import astropy.units as u 9 | import numpy as np 10 | from scipy.interpolate import RegularGridInterpolator 11 | 12 | # IMAGINE imports 13 | from imagine.simulators import Simulator 14 | 15 | # All declaration 16 | __all__ = ['TestSimulator'] 17 | 18 | 19 | # %% CLASS DEFINITIONS 20 | class TestSimulator(Simulator): 21 | r""" 22 | Example simulator for illustration and testing 23 | 24 | Computes a Faraday-depth-like property at a given point without 25 | performing the integration, i.e. computes: 26 | 27 | .. math :: 28 | $t(x,y,z) = B_y\,n_e\,$ 29 | 30 | """ 31 | 32 | # Class attributes 33 | SIMULATED_QUANTITIES = ['test'] 34 | REQUIRED_FIELD_TYPES = ['magnetic_field', 'thermal_electron_density'] 35 | ALLOWED_GRID_TYPES = ['cartesian'] 36 | 37 | def __init__(self, measurements, LoS_axis='y'): 38 | # Send the measurenents to parent class 39 | super().__init__(measurements) 40 | if LoS_axis=='y': 41 | self.B_axis = 1 42 | elif LoS_axis=='z': 43 | self.B_axis = 2 44 | else: 45 | raise ValueError 46 | 47 | def simulate(self, key, coords_dict, realization_id, output_units): 48 | # Accesses fields and grid 49 | 50 | Bpara = self.fields['magnetic_field'][:,:,:,self.B_axis] 51 | ne = self.fields['thermal_electron_density'] 52 | x = self.grid.x[:,0,0].to_value(u.kpc) 53 | y = self.grid.y[0,:,0].to_value(u.kpc) 54 | z = self.grid.z[0,0,:].to_value(u.kpc) 55 | 56 | fd = (Bpara*ne).to_value(output_units) 57 | 58 | # Converts the grids to a format compatible with the interpolator 59 | # (comment: this is a bit silly, but what is the native numpy alternative?) 60 | fd_interp = RegularGridInterpolator(points=(x, y, z), 61 | values=fd, method='nearest') 62 | 63 | interp_points = np.array([coords_dict[c].to_value(u.kpc) 64 | for c in ('x', 'y', 'z')]).T 65 | 66 | with np.errstate(invalid='ignore', divide='ignore'): 67 | results = fd_interp(interp_points)*output_units 68 | 69 | return results 70 | # Prevents pytest from treating this class as a test 71 | __test__ = False 72 | -------------------------------------------------------------------------------- /imagine/templates/magnetic_field_template.py: -------------------------------------------------------------------------------- 1 | from imagine.fields import MagneticField 2 | import astropy.units as u 3 | import numpy as np 4 | # Substitute this by your own code 5 | import MY_GMF_MODEL 6 | 7 | 8 | class MagneticFieldTemplate(MagneticField): 9 | """ Here comes the description of the magnetic field model """ 10 | 11 | # Class attributes 12 | NAME = 'name_of_the_magnetic_field' 13 | 14 | # Is this field stochastic or not. Only necessary if True 15 | STOCHASTIC_FIELD = True 16 | # If there are any dependencies, they should be included in this list 17 | DEPENDENCIES_LIST = [] 18 | # List of all parameters for the field 19 | PARAMETER_NAMES = ['Parameter_A', 'Parameter_B'] 20 | 21 | def compute_field(self, seed): 22 | # If this is an stochastic field, the integer `seed `must be 23 | # used to set the random seed for a single realisation. 24 | # Otherwise, `seed` should be ignored. 25 | 26 | # The coordinates can be accessed from an internal grid object 27 | x_coord = self.grid.x 28 | y_coord = self.grid.y 29 | z_coord = self.grid.y 30 | # Alternatively, one can use cylindrical or spherical coordinates 31 | r_cyl_coord = self.grid.r_cylindrical 32 | r_sph_coord = self.grid.r_spherical 33 | theta_coord = self.grid.theta; phi_coord = self.grid.phi 34 | 35 | # One can access the parameters supplied in the following way 36 | param_A = self.parameters['Parameter_A'] 37 | param_B = self.parameters['Parameter_B'] 38 | 39 | # Now one can interface with previous code, or implement a 40 | # particular magnetic field 41 | Bx, By, Bz = MY_GMF_MODEL.compute_B(param_A, param_B, 42 | x_coord, y_coord, z_coord, 43 | # If the field is stochastic 44 | # it can use the seed 45 | # to generate a realisation 46 | seed) 47 | 48 | # Creates an empty output magnetic field Quantity with 49 | # the correct shape and units 50 | MF_array = np.empty(self.data_shape) * u.microgauss 51 | # and saves the pre-computed components 52 | MF_array[:,:,:,0] = Bx 53 | MF_array[:,:,:,1] = By 54 | MF_array[:,:,:,2] = Bz 55 | 56 | return MF_array 57 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. IMAGINE documentation master file, created by 2 | sphinx-quickstart on Wed Nov 6 13:05:25 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | IMAGINE: Interstellar MAGnetic field INference Engine 7 | ====================================================== 8 | 9 | .. image:: elephant.png 10 | :align: center 11 | :alt: The IMAGINE pipeline 12 | :width: 400px 13 | 14 | 15 | Welcome to the documentation of the 16 | `IMAGINE software package `_, 17 | a publicly available Bayesian platform that allows 18 | using a variety of observational data sets to constrain models for 19 | the main ingredients of the interstellar medium of the Galaxy. 20 | IMAGINE calculates simulated data sets from the galaxy models and compares these to 21 | the observational data sets through a likelihood evaluation. 22 | It then samples this multi-dimensional likelihood space, which allows one to 23 | update prior knowledge, and thus to find the position with the 24 | :ref:`best-fit model ` parameters and/or compute the 25 | :ref:`model evidence ` (which enables rigorous comparison of competing 26 | models). 27 | 28 | IMAGINE is developed and maintained by the 29 | `IMAGINE consortium `_, 30 | a diverse group of researchers whose common interest revolves 31 | around developing an integrated understanding of the various components of the 32 | Galactic interstellar medium (with emphasis on the Galactic magnetic field and 33 | its interaction with cosmic rays). For more details on IMAGINE science case, 34 | please refer to the 35 | `IMAGINE whitepaper `_. 36 | 37 | 38 | .. toctree:: 39 | :maxdepth: 2 40 | :caption: Contents: 41 | 42 | installation 43 | design 44 | components 45 | posterior 46 | evidence 47 | parallel 48 | 49 | .. toctree:: 50 | :maxdepth: 1 51 | :caption: Tutorials: 52 | 53 | tutorial_one 54 | tutorial_datasets 55 | tutorial_fields 56 | tutorial_simulator 57 | tutorial_hammurabi 58 | tutorial_priors 59 | tutorial_masks 60 | tutorial_wmap 61 | 62 | 63 | .. toctree:: 64 | :maxdepth: 4 65 | :caption: API Reference 66 | 67 | imagine 68 | 69 | 70 | 71 | Indices and tables 72 | ================== 73 | 74 | * :ref:`genindex` 75 | * :ref:`modindex` 76 | * :ref:`search` 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Documentation Status](https://readthedocs.org/projects/imagine-code/badge/?version=latest)](https://imagine-code.readthedocs.io/en/latest/?badge=latest) 2 | [![Build Status](https://travis-ci.com/IMAGINE-Consortium/imagine.svg?branch=master)](https://travis-ci.com/IMAGINE-Consortium/imagine) 3 | [![Docker](https://github.com/IMAGINE-Consortium/imagine/workflows/Docker/badge.svg)](https://github.com/IMAGINE-Consortium/imagine/actions?query=workflow%3ADocker) 4 | 5 | # IMAGINE 6 | 7 | IMAGINE, the **I**nterstellar **MAG**netic Field **IN**ference **E**ngine, 8 | is an open-source (GNU General Public License v3) software designed for 9 | analysing the Galactic components with observables. 10 | It is a modular framework for doing inference on generic (non)parametric models of the Galaxy. 11 | The analysing pipeline works with MPI support and MultiNest sampler. 12 | 13 | The Galactic magnetic field (GMF) has a huge impact on the evolution of the Milky Way, 14 | yet currently there exists no standard model for it as its structure is not fully understood. 15 | In the past many parametric GMF models of varying complexity have been developed that 16 | all have been fitted to an individual set of observational data complicating comparability. 17 | 18 | We are part of the [IMAGINE Consortium], a diverse group of researchers with expertise 19 | encompassing the properties of the interstellar medium, Galactic magnetic fields and 20 | cosmic rays. 21 | Our goal is to systematize parametric inference of Galactic field models. 22 | We want to enable a statistical comparison of different models in the future, 23 | allow for simple refitting with respect to newly available data sets and thereby increase the research area’s transparency. 24 | We aim to make the state-of-art Bayesian methods easily available and in particular to treat the statistics related to the random components of the GMF correctly. 25 | The IMAGINE software package is developed and maintained by the [technical workgroup] of the [IMAGINE Consortium]. 26 | 27 | Please check the [**documentation**](https://imagine-code.readthedocs.io/) for a usage guide and 28 | detailed technical information. 29 | 30 | [IMAGINE Consortium]: https://www.astro.ru.nl/imagine/ 31 | [technical workgroup]: https://www.astro.ru.nl/imagine/imagineprojects.html 32 | 33 | ### contact 34 | *bug reports and code contributions are warmly welcomed, feel free to contact* 35 | 36 | - [Jiaxin Wang](http://www.sissa.it/app/members.php?ID=222) 37 | - [Tess Jaffe](https://science.gsfc.nasa.gov/sed/bio/tess.jaffe) 38 | - [Torsten Ensslin](https://wwwmpa.mpa-garching.mpg.de/~ensslin/) 39 | - [Luiz Felippe S. Rodrigues](https://luizfelippesr.github.io/) 40 | -------------------------------------------------------------------------------- /doc/source/imagine.tools.rst: -------------------------------------------------------------------------------- 1 | imagine.tools package 2 | ===================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | imagine.tools.carrier\_mapper module 8 | ------------------------------------ 9 | 10 | .. automodule:: imagine.tools.carrier_mapper 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | imagine.tools.class\_tools module 16 | --------------------------------- 17 | 18 | .. automodule:: imagine.tools.class_tools 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | imagine.tools.config module 24 | --------------------------- 25 | 26 | .. automodule:: imagine.tools.config 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | imagine.tools.covariance\_estimator module 32 | ------------------------------------------ 33 | 34 | .. automodule:: imagine.tools.covariance_estimator 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | imagine.tools.io module 40 | ----------------------- 41 | 42 | .. automodule:: imagine.tools.io 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | imagine.tools.masker module 48 | --------------------------- 49 | 50 | .. automodule:: imagine.tools.masker 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | imagine.tools.misc module 56 | ------------------------- 57 | 58 | .. automodule:: imagine.tools.misc 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | imagine.tools.mpi\_helper module 64 | -------------------------------- 65 | 66 | .. automodule:: imagine.tools.mpi_helper 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | imagine.tools.parallel\_ops module 72 | ---------------------------------- 73 | 74 | .. automodule:: imagine.tools.parallel_ops 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | imagine.tools.random\_seed module 80 | --------------------------------- 81 | 82 | .. automodule:: imagine.tools.random_seed 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | imagine.tools.timer module 88 | -------------------------- 89 | 90 | .. automodule:: imagine.tools.timer 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | imagine.tools.visualization module 96 | ---------------------------------- 97 | 98 | .. automodule:: imagine.tools.visualization 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | Module contents 104 | --------------- 105 | 106 | .. automodule:: imagine.tools 107 | :members: 108 | :undoc-members: 109 | :show-inheritance: 110 | -------------------------------------------------------------------------------- /imagine/likelihoods/simple_likelihood.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | from copy import deepcopy 4 | import logging as log 5 | 6 | # Package imports 7 | import numpy as np 8 | 9 | # IMAGINE imports 10 | from imagine.likelihoods import Likelihood 11 | from imagine.observables.observable_dict import Simulations 12 | from imagine.tools.parallel_ops import pslogdet, plu_solve 13 | 14 | # All declaration 15 | __all__ = ['SimpleLikelihood'] 16 | 17 | 18 | # %% CLASS DEFINITIONS 19 | class SimpleLikelihood(Likelihood): 20 | """ 21 | A simple Likelihood class 22 | 23 | Parameters 24 | ---------- 25 | measurement_dict : imagine.observables.observable_dict.Measurements 26 | Measurements 27 | covariance_dict : imagine.observables.observable_dict.Covariances 28 | Covariances 29 | mask_dict : imagine.observables.observable_dict.Masks 30 | Masks 31 | """ 32 | 33 | def call(self, simulations_dict): 34 | """ 35 | SimpleLikelihood object call function 36 | 37 | Parameters 38 | ---------- 39 | simulations_dict : imagine.observables.observable_dict.Simulations 40 | Simulations object 41 | 42 | Returns 43 | ------- 44 | likelicache : float 45 | log-likelihood value (copied to all nodes) 46 | """ 47 | log.debug('@ simple_likelihood::__call__') 48 | assert isinstance(simulations_dict, Simulations) 49 | # check dict entries 50 | assert set(simulations_dict.keys()).issubset(self._measurement_dict.keys()) 51 | 52 | if self.covariance_dict is not None: 53 | covariance_dict = self._covariance_dict 54 | else: 55 | covariance_dict = {} 56 | 57 | likelicache = 0 58 | for name in self._measurement_dict: 59 | obs_data = simulations_dict[name].ensemble_mean 60 | meas_data = self._measurement_dict[name].data 61 | diff = meas_data - obs_data 62 | 63 | if name in covariance_dict: 64 | if self._covariance_dict[name].dtype == 'variance': 65 | # If only the variance was originally specified, 66 | # use it *without constructing the full covariance* 67 | meas_var = self._covariance_dict[name].var 68 | 69 | sign = np.sign(meas_var).prod() 70 | logdet = np.log(meas_var*2.*np.pi).sum() 71 | 72 | likelicache += -0.5*np.vdot(diff, 1./meas_var * diff) - 0.5*sign*logdet 73 | else: 74 | # If a full covariance matrix was originally specified, use it! 75 | meas_cov = self._covariance_dict[name].data 76 | 77 | sign, logdet = pslogdet(meas_cov*2*np.pi) 78 | 79 | likelicache += -0.5*np.vdot(diff, plu_solve(meas_cov, diff)) - 0.5*sign*logdet 80 | else: 81 | # some measurement may not have cov 82 | likelicache += -0.5*np.vdot(diff, diff) 83 | 84 | return likelicache 85 | -------------------------------------------------------------------------------- /imagine/tools/randmodule.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /** 10 | * this is a C++ module for random value/seed generation in IMAGINE 11 | * in Python Numpy module, the random status is a global attribute 12 | * a stand alone C++ module for random generation in IMAGINE is neccssary 13 | * for avoid unexpected interference between IMAGINE and its ported sampler, ie, Dynesty 14 | */ 15 | class RandModule{ 16 | public: 17 | RandModuel() = default; 18 | RandModule(const RandModule &) = delete; 19 | RandModule(RandModule &&) = delete; 20 | RandModule& operator= (RandModule &&) = delete; 21 | RandModule& operator= (const RandModule &) = delete; 22 | virtual ~RandModule () = default; 23 | /** 24 | * generate a random seed 25 | * if given non-zero integer, take it as the seed 26 | * if given zero, generate a time-thread dependent seed 27 | */ 28 | virtual std::size_t seed (const std::size_t&) const; 29 | /** 30 | * generate a vector of random non-negative integers 31 | * given the seed for generating, if zero, do time-thread dependent seed 32 | * given the size of the vector 33 | * given lower value limit 34 | * given upper value limit 35 | */ 36 | virtual std::unique_ptr> unif_double_list (const std::size_t&, 37 | const std::size_t&, 38 | const double&, 39 | const double&) const; 40 | } 41 | 42 | // offer time-thread dependent random seed 43 | std::size_t RandModule::generate_seed (const std::size_t& seed) const{ 44 | assert(seed>=0); 45 | if(seed==0){ 46 | auto p = std::chrono::system_clock::now(); 47 | // valid until 19 January, 2038 03:14:08 UTC 48 | time_t today_time = std::chrono::system_clock::to_time_t(p); 49 | // casting thread id into unsinged long 50 | std::stringstream ss; 51 | ss << std::this_thread::get_id(); 52 | auto th_id = std::stoul(ss.str()); 53 | // precision in (thread,second) 54 | return (th_id + today_time); 55 | } 56 | return seed; 57 | } 58 | 59 | // offer an array of random floating numbers 60 | std::unique_ptr> RandModule::unif_double_list (const std::size_t& seed, 61 | const std::size_t& size, 62 | const double& low, 63 | const double& high) const{ 64 | auto cache = std::make_unique>(size); 65 | gsl_rng *r {gsl_rng_alloc(gsl_rng_taus)}; 66 | gsl_rng_set (r, this->generate_seed(seed)); 67 | for (std::size_t i=0;i`, which represent 17 | both observational and mock data. 18 | 19 | New observational data are included in IMAGINE using the :ref:`Datasets`, 20 | which help effortlessly adjusting the format of the data to the standard 21 | specifications (and are internally easily converted into :ref:`Observables `) 22 | Also, a collection of *Datasets* contributed by the community can be found in 23 | the Consortium's dedicated `Dataset repository `_. 24 | 25 | The connection between a theory and reality is done by one of the :ref:`Simulators`. 26 | Each of these corresponds to a mapping from a set of model *Fields* into a mock 27 | *Observables*. The available simulators, importantly, 28 | include `Hammurabi `_, 29 | which can compute Faraday rotation measure and diffuse synchrotron and thermal 30 | dust emission. 31 | 32 | Each of these :ref:`IMAGINE Components` (:ref:`Fields`, 33 | :ref:`Observables `, :ref:`Datasets`, :ref:`Simulators`) 34 | are represented by a Python class in IMAGINE. 35 | Therefore, in order to extend IMAGINE with a specific new field or 36 | including a new observational dataset, one needs to create a *subclass* of 37 | one of IMAGINE's base classes. This subclass will, very often, be 38 | a `wrapper `_ around 39 | already existing code or scripts. To preserve the modularity and 40 | flexibility of IMAGINE, one should try to use 41 | (:ref:`as far as possible `) only the provided base classes. 42 | 43 | 44 | .. figure:: imagine_design.png 45 | :name: IMAGINE 46 | :align: center 47 | :alt: The IMAGINE pipeline 48 | :width: 100% 49 | 50 | The structure of the IMAGINE pipeline. 51 | 52 | :numref:`IMAGINE` describes the typical workflow of IMAGINE and introduces other key base classes. 53 | Mock and measured data, in the form of :ref:`Observables `, are used 54 | to compute a likelihood through a :ref:`Likelihood` class. This, supplemented by 55 | :ref:`Priors`, allows a :ref:`Pipeline` object to sample the parameter space and compute 56 | posterior distributions and Bayesian evidences for the models. The generation 57 | of different realisations of each Field is managed by the corresponding 58 | :ref:`Field Factory` class. Likewise, :ref:`Observable Dictionaries ` help one 59 | organising and manipulating *Observables*. 60 | 61 | 62 | -------------------------------------------------------------------------------- /imagine/tools/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | IMAGINE global configuration 4 | ---------------------------- 5 | 6 | The default behaviour of some aspects of IMAGINE can be set using 7 | global `rc` configuration variables. 8 | 9 | These can be accessed and modified using the 10 | :py:data:`imagine.rc ` dictionary or setting the 11 | corresponding environment variables (named 'IMAGINE\_'+RC_VAR_NAME). 12 | 13 | For example to set the default path for the hamx executable, 14 | one can either do:: 15 | 16 | import imagine 17 | imagine.rc.hammurabi_hamx_path = 'my_desired_path' 18 | 19 | or, alternatively, set this as an environment variable 20 | before the exectution of the script:: 21 | 22 | export IMAGINE_HAMMURABI_HAMX_PATH='my_desired_path' 23 | 24 | The following list describes all the available global settings variables. 25 | 26 | IMAGINE rc variables 27 | temp_dir 28 | Default temporary directory used by IMAGINE. If not set, a temporary 29 | directory will be created at /tmp/ with a safe name. 30 | distributed_arrays 31 | If `True`, arrays containing covariances are distributed among 32 | different MPI processes (and so are the corresponding array operations). 33 | pipeline_default_seed 34 | The default value for the master seed used by a Pipeline object 35 | (see :py:data:`Pipeline.master_seed `). 36 | pipeline_distribute_ensemble 37 | The default value of 38 | (see :py:data:`Pipeline.distribute_ensemble `). 39 | hammurabi_hamx_path 40 | Default location of the Hammurabi X executable file, `hamx`. 41 | """ 42 | 43 | # %% IMPORTS 44 | # Built-in imports 45 | import os 46 | import tempfile 47 | 48 | # All declaration 49 | __all__ = ['rc'] 50 | 51 | # Sets default values of configuration parameters 52 | rc = {'temp_dir': None, 53 | 'distributed_arrays': False, 54 | 'pipeline_default_seed': 1, 55 | 'pipeline_distribute_ensemble': False, 56 | 'hammurabi_hamx_path': None} 57 | 58 | 59 | # %% FUNCTION DEFINITIONS 60 | def _str_to_python(v): 61 | """ 62 | Attempts to convert a string to a python basic type 63 | """ 64 | # Tries to convert to a number 65 | try: 66 | v = float(v) 67 | # Converts to integer if needed 68 | if v.is_integer(): 69 | v = int(v) 70 | except ValueError: 71 | pass 72 | 73 | # Converts to boolean if needed 74 | if v in ('True','T','TRUE'): 75 | v = True 76 | elif v in ('False','F','FALSE'): 77 | v = False 78 | 79 | return v 80 | 81 | 82 | def read_rc_from_env(): 83 | """ 84 | Updates the rc configuration dictionary using current 85 | environment variables 86 | """ 87 | global rc 88 | for var in rc: 89 | env_var = 'IMAGINE_'+var.upper() 90 | 91 | try: 92 | rc[var] = _str_to_python(os.environ[env_var]) 93 | except KeyError: 94 | pass 95 | 96 | 97 | read_rc_from_env() 98 | 99 | # If a temp directory was not set using environment variables, create one 100 | if rc['temp_dir'] is None: 101 | rc['temp_dir_obj'] = tempfile.TemporaryDirectory(prefix='imagine_') 102 | rc['temp_dir'] = rc['temp_dir_obj'].name 103 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM continuumio/miniconda3 2 | ARG BRANCH=master 3 | ARG REPO=IMAGINE-Consortium/imagine 4 | 5 | RUN echo REPO=$REPO BRANCH=$BRANCH 6 | 7 | # Install build environment (for hamx) 8 | RUN apt-get update 9 | RUN apt-get install -y --fix-missing git wget vim 10 | RUN apt-get install -y --fix-missing build-essential autoconf automake cmake gfortran 11 | RUN apt-get install -y --fix-missing gsl-bin libgsl-dev 12 | 13 | # IMAGINE download 14 | RUN mkdir /home/lab 15 | WORKDIR /home/lab 16 | RUN echo BRANCH: $BRANCH && echo REPO $REPO 17 | RUN git clone -b $BRANCH https://github.com/$REPO.git imagine 18 | 19 | # IMAGINE dependencies 20 | WORKDIR /home/lab/imagine 21 | RUN conda env create --file=imagine_conda_env.yml 22 | # Make RUN commands use the 'imagine' environment by default: 23 | SHELL ["conda", "run", "-n", "imagine", "/bin/bash", "-c"] 24 | 25 | # Python extra dependencies (for installation, testing and tutorials) 26 | # nomkl slightly reduces the size of the image 27 | RUN conda install -y -c conda-forge pip seaborn nomkl pytest-cov pytest-mpl nbval astroquery requests jupyterlab 28 | RUN python -m ipykernel install --user --name imagine --display-name 'Python (imagine)' 29 | WORKDIR /home/lab 30 | 31 | ### ---- for hammurabi X ----- 32 | # FFTW 33 | RUN mkdir /home/lab/fftw 34 | WORKDIR fftw 35 | RUN wget http://www.fftw.org/fftw-3.3.8.tar.gz && tar -xzf fftw-3.3.8.tar.gz --strip-components 1 36 | RUN ./configure --enable-threads --enable-openmp --enable-shared --prefix=/tmp/local/ && make && make install 37 | WORKDIR /home/lab 38 | 39 | # HAMMURABI X 40 | RUN mkdir /home/lab/hamx 41 | WORKDIR /home/lab/hamx 42 | RUN wget https://github.com/hammurabi-dev/hammurabiX/archive/v2.4.2.tar.gz && tar -xzf v2.4.2.tar.gz --strip-components 1 && rm v2.4.2.tar.gz 43 | # Deactivates testing (and together with it, the google tests dependency) 44 | RUN sed -E 's/(OPTION.ENABLE_TESTING .+) ON/\1 OFF/g' CMakeLists.txt > CMakeLists_new.txt 45 | RUN mv CMakeLists_new.txt CMakeLists.txt 46 | RUN mkdir /home/lab/hamx/build 47 | WORKDIR /home/lab/hamx/build 48 | RUN cmake .. && make install 49 | WORKDIR /home/lab/hamx 50 | RUN pip install -e . 51 | ### ----- end of hammurabi X ----- 52 | 53 | # INSTALL IMAGINE! 54 | WORKDIR /home/lab/imagine 55 | RUN pip install -e . 56 | WORKDIR /home/lab 57 | 58 | # IMAGINE datasets extension (installs latest version) 59 | RUN pip install git+https://github.com/IMAGINE-Consortium/imagine-datasets.git 60 | RUN mkdir /home/lab/.imagine_datasets_cache 61 | ENV IMAGINE_DATASETS_CACHE_DIR /home/lab/.imagine_datasets_cache 62 | 63 | # CLEAN PACKAGES 64 | RUN conda clean -afy && find /opt/conda/ -follow -type f -name '*.pyc' -delete 65 | RUN rm -f *.tar.gz 66 | RUN rm -rf /home/lab/fftw 67 | RUN apt-get autoremove && apt-get clean 68 | 69 | # SET PATH 70 | ENV PATH /tmp/local/hammurabi/bin:${PATH} 71 | ENV LD_LIBRARY_PATH /tmp/local/lib:/tmp/local/hammurabi/lib:${LD_LIBRARY_PATH} 72 | 73 | 74 | # MISC 75 | # Activates imagine conda environment by default 76 | RUN sed -i 's/conda activate base/conda activate imagine/g' ~/.bashrc 77 | # Alias to make mpirun work 78 | RUN echo 'alias mpirun='\''mpirun --mca btl ^vader --allow-run-as-root'\''' >> ~/.bashrc 79 | # Launching script to simplify starting jupyterlab 80 | RUN echo '#!/bin/bash' >> ~/jupyterlab.bash 81 | RUN echo 'source ~/.bashrc && jupyter-lab --notebook-dir=/home/lab --ip='\''*'\'' --port=8888 --no-browser --allow-root' >> ~/jupyterlab.bash 82 | RUN chmod +x ~/jupyterlab.bash 83 | # Alias to simplify launching the jupyterlab in interactive mode 84 | RUN echo "alias jupyter-lab='~/jupyterlab.bash'" >> ~/.bashrc 85 | -------------------------------------------------------------------------------- /imagine/priors/basic_priors.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | import logging as log 4 | 5 | # Package imports 6 | import numpy as np 7 | from scipy.stats import norm 8 | import astropy.units as u 9 | 10 | # IMAGINE imports 11 | from imagine.priors import Prior, ScipyPrior 12 | from imagine.tools import unit_checker 13 | # All declaration 14 | __all__ = ['FlatPrior', 'GaussianPrior'] 15 | 16 | 17 | # %% CLASS DEFINITIONS 18 | class FlatPrior(Prior): 19 | """ 20 | Prior distribution where any parameter values within the valid interval 21 | have the same prior probability. 22 | 23 | Parameters 24 | ---------- 25 | xmin, xmax : float 26 | A pair of points representing, respectively, the minimum/maximum 27 | parameter values to be considered. 28 | unit : astropy.units.Unit, optional 29 | If present, sets the units used for this parameter. If absent, this 30 | is inferred from `xmin` and `xmax`. 31 | wrapped : bool 32 | Specify whether the parameter is periodic (i.e. the range is supposed 33 | to "wrap-around"). 34 | 35 | """ 36 | def __init__(self, xmin, xmax, unit=None, wrapped=False): 37 | # Updates ranges 38 | super().__init__(xmin=xmin, xmax=xmax, wrapped=wrapped, unit=unit) 39 | # Computes this from `range`, after the base Prior class has 40 | # already dealt with units 41 | self.vol = self.range[1] - self.range[0] 42 | # Constant pdf (for illustration) 43 | self._pdf = lambda x: np.ones(x.shape)/self.vol.value 44 | 45 | def __call__(self, cube): 46 | log.debug('@ flat_prior::__call__') 47 | 48 | unit, [cube_val] = unit_checker(self.unit, [cube]) 49 | # Rescales to the correct interval 50 | cube_val = cube_val * (self.range[1].value - self.range[0].value) 51 | cube_val += self.range[0].value 52 | 53 | return cube_val << unit 54 | 55 | 56 | class GaussianPrior(ScipyPrior): 57 | """ 58 | Normal prior distribution. 59 | 60 | This can operate either as a regular Gaussian distribution 61 | (defined from -infinity to infinity) or, if `xmin` and `xmax` values 62 | are set, as a trucated Gaussian distribution. 63 | 64 | Parameters 65 | ---------- 66 | mu : float 67 | The position of the mode (mean, if the truncation is symmetric) 68 | of the Gaussian 69 | sigma : float 70 | Width of the distribution (standard deviation, if there was no tuncation) 71 | xmin, xmax : float 72 | A pair of points representing, respectively, the minimum/maximum 73 | parameter values to be considered (i.e. the truncation interval). 74 | If these are not provided (or set to `None`), the prior range is 75 | assumed to run from -infinity to infinity 76 | unit : astropy.units.Unit, optional 77 | If present, sets the units used for this parameter. If absent, this 78 | is inferred from `mu` and `sigma`. 79 | wrapped : bool 80 | Specify whether the parameter is periodic (i.e. the range is supposed 81 | to "wrap-around"). 82 | 83 | """ 84 | 85 | def __init__(self, mu=None, sigma=None, xmin=None, xmax=None, unit=None, 86 | wrapped=False, **kwargs): 87 | 88 | assert mu is not None, 'A value for mu must be provided' 89 | assert sigma is not None, 'A value for sigma must be provided' 90 | 91 | unit, [mu_val, sigma_val] = unit_checker(unit, [mu, sigma]) 92 | 93 | super().__init__(distr=norm, loc=mu, scale=sigma, unit=unit, 94 | xmin=xmin, xmax=xmax, **kwargs) 95 | -------------------------------------------------------------------------------- /imagine/tools/masker.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module defines methods related to masking out distributed data 3 | and/or the associated covariance matrix. 4 | For the testing suits, please turn to "imagine/tests/tools_tests.py". 5 | 6 | Implemented with numpy.ndarray raw data. 7 | """ 8 | 9 | # %% IMPORTS 10 | # Built-in imports 11 | from copy import deepcopy 12 | import logging as log 13 | 14 | # Package imports 15 | import numpy as np 16 | 17 | # IMAGINE imports 18 | from imagine.tools.mpi_helper import mpi_arrange 19 | 20 | # All declaration 21 | __all__ = ['mask_cov', 'mask_var', 'mask_obs'] 22 | 23 | 24 | # %% FUNCTION DEFINITIONS 25 | def mask_obs(obs, mask): 26 | """ 27 | Applies a mask to an observable. 28 | 29 | Parameters 30 | ---------- 31 | data : distributed numpy.ndarray 32 | Ensemble of observables, in global shape (ensemble size, data size) 33 | each node contains part of the global rows. 34 | 35 | mask : numpy.ndarray 36 | Copied mask map in shape (1, data size) on each node. 37 | 38 | Returns 39 | ------- 40 | numpy.ndarray 41 | Masked observable of shape (ensemble size, masked data size). 42 | """ 43 | log.debug('@ masker::mask_data') 44 | assert (obs.shape[0] >= 1) 45 | assert (mask.shape[0] == 1) 46 | assert (obs.shape[1] == mask.shape[1]) 47 | 48 | # Creates a boolean mask 49 | bool_mask = mask[0].astype(bool) 50 | 51 | return obs[:, bool_mask] 52 | 53 | def mask_var(var, mask): 54 | """ 55 | Applies a mask to an observable. 56 | 57 | Parameters 58 | ---------- 59 | var : numpy.ndarray 60 | Variance data 61 | 62 | mask : numpy.ndarray 63 | Copied mask map in shape (1, data size) on each node. 64 | 65 | Returns 66 | ------- 67 | numpy.ndarray 68 | Masked observable of shape (masked data size). 69 | """ 70 | log.debug('@ masker::mask_var') 71 | assert (mask.shape[0] == 1) 72 | assert (var.size == mask.shape[1]) 73 | 74 | # Creates a boolean mask 75 | bool_mask = mask[0].astype(bool) 76 | 77 | return var[bool_mask] 78 | 79 | 80 | def mask_cov(cov, mask): 81 | """ 82 | Applies mask to the observable covariance. 83 | 84 | Parameters 85 | ---------- 86 | cov : (distributed) numpy.ndarray 87 | Covariance matrix of observables in global shape (data size, data size) 88 | each node contains part of the global rows 89 | (if `imagine.rc['distributed_arrays']=True`). 90 | mask : numpy.ndarray 91 | Copied mask map in shape (1, data size). 92 | 93 | Returns 94 | ------- 95 | masked_cov : numpy.ndarray 96 | Masked covariance matrix of shape (masked data size, masked data size). 97 | """ 98 | log.debug('@ masker::mask_cov') 99 | assert (mask.shape[0] == 1) 100 | assert (cov.shape[1] == mask.shape[1]) 101 | 102 | # Creates a 1D boolean mask 103 | bool_mask_1D = mask[0].astype(bool) 104 | # Constructs a 2D boolean mask and replaces 1D mask 105 | bool_mask = np.outer(bool_mask_1D, bool_mask_1D) 106 | 107 | # If mpi distributed_arrays are being used, the shape of the mask 108 | # needs to be adjusted, as each node accesses only some rows 109 | row_min, row_max = mpi_arrange(bool_mask_1D.size) 110 | nrows, ncolumns = bool_mask_1D[row_min:row_max].sum(), bool_mask_1D.sum() 111 | bool_mask = bool_mask[row_min:row_max, :] 112 | 113 | # Applies the mask and reshapes 114 | masked_cov = cov[bool_mask].reshape((nrows, ncolumns)) 115 | 116 | return masked_cov 117 | -------------------------------------------------------------------------------- /imagine/tools/misc.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | from math import floor, log10 4 | 5 | # Package imports 6 | import astropy.units as apu 7 | 8 | # All declaration 9 | __all__ = ['adjust_error_intervals', 'is_notebook', 'unit_checker'] 10 | 11 | 12 | # %% FUNCTION DEFINITIONS 13 | def is_notebook(): 14 | """ 15 | Finds out whether python is running in a Jupyter notebook 16 | or as a shell. 17 | """ 18 | try: 19 | shell = get_ipython().__class__.__name__ 20 | if shell == 'ZMQInteractiveShell': 21 | return True # Jupyter notebook or qtconsole 22 | elif shell == 'TerminalInteractiveShell': 23 | return False # Terminal running IPython 24 | else: 25 | return False # Other type (?) 26 | except NameError: 27 | return False # Probably standard Python interpreter 28 | 29 | 30 | def adjust_error_intervals(value, errlo, errup, sdigits=2, return_ndec=False): 31 | r""" 32 | Takes the value of a quantity `value` with associated errors `errlo` and 33 | `errup`; and prepares them to be reported as 34 | :math:`v^{+err\,up}_{-err\,down}`. This is done by adjusting the number 35 | of decimal places of all the argumetns so that the errors have at least 36 | `sdigits` significant digits. Optionally, this number of decimal places 37 | may be returned. 38 | 39 | 40 | Parameters 41 | ---------- 42 | value : int or float or astropy.Quantity 43 | Value of quantity. 44 | errlo, errup : int or float or astropy.Quantity 45 | Associated lower and upper errors of `value`. 46 | sdigits : int, optional 47 | Minimum number of significant digits in the errors 48 | return_ndec : bool, optional 49 | If True, also returns the number of decimal points used 50 | 51 | Returns 52 | ------- 53 | value : float 54 | Rounded value 55 | errlo, errup : float 56 | Assimetric error values 57 | n : int 58 | If `return_ndec` is `True`, the number of decimal places is returned 59 | """ 60 | unit, [value, errlo, errup] = unit_checker(None, [value, errlo, errup]) 61 | get_rounding = lambda x: -int(floor(log10(abs(x)))) + (sdigits - 1) 62 | 63 | if unit is None: 64 | unit = 1.0 65 | 66 | n = max(get_rounding(errlo), get_rounding(errup)) 67 | 68 | value, errlo, errup = (round(x,n)*unit for x in (value, errlo, errup)) 69 | 70 | if not return_ndec: 71 | return value, errlo, errup 72 | else: 73 | return value, errlo, errup, n 74 | 75 | 76 | def unit_checker(unit, list_of_quant): 77 | """ 78 | Checks the consistency of units of a list of quantities, converting them 79 | all to the same units, if needed. 80 | 81 | Parameters 82 | ---------- 83 | unit : astropy.Unit 84 | Unit to be used for the quantities in the list. If set to `None`, the 85 | units of the first list item are used. 86 | list_of_quant : list 87 | List of quantities to be checked. 88 | 89 | Returns 90 | ------- 91 | unit : astropy.Unit 92 | The common unit used 93 | list_of_values : 94 | Contains the quantities of `list_of_quant` converted to floats using 95 | the common unit `unit` 96 | """ 97 | ul = [] 98 | for uq in list_of_quant: 99 | if isinstance(uq, apu.Quantity): 100 | if unit is None: 101 | unit = uq.unit 102 | else: 103 | uq.to(unit) 104 | ul.append(uq.to_value(unit)) 105 | else: 106 | ul.append(uq) 107 | return unit, ul 108 | -------------------------------------------------------------------------------- /imagine/simulators/test/bi_simulator.py: -------------------------------------------------------------------------------- 1 | """ 2 | built only for testing purpose 3 | 4 | """ 5 | #field model 6 | #mimicking emission intensity 7 | #field = square( gaussian_rand(mean=a,std=b)_x * sin(x) ) 8 | 9 | #x in [0,2pi] 10 | #a and b are free parameters 11 | import numpy as np 12 | 13 | from imagine.simulators.simulator import Simulator 14 | from imagine.fields.test_field.test_field import TestField 15 | from imagine.observables.observable_dict import Measurements, Simulations 16 | from imagine.tools.random_seed import seed_generator 17 | from imagine.tools.icy_decorator import icy 18 | 19 | 20 | @icy 21 | class BiSimulator(Simulator): 22 | r""" 23 | Mock model mimicking Faraday rotation 24 | 25 | .. math:: 26 | F = [\mathcal{G}(a, b) \sin(x)]^2 27 | 28 | where :math:`\mathcal{G}` is a Gaussian process with mean 29 | :math:`\mu=a` and standard deviation :math:`\sigma=b`, and 30 | :math:`x\in [0,2pi]`. 31 | 32 | Parameters 33 | ---------- 34 | measurements 35 | Measurements object 36 | for testing, only key ('test',...,...,...) is valid 37 | 38 | Notes 39 | ----- 40 | Instances of this class are callable 41 | """ 42 | def __init__(self, measurements): 43 | self.output_checklist = measurements 44 | 45 | @property 46 | def output_checklist(self): 47 | return self._output_checklist 48 | 49 | @output_checklist.setter 50 | def output_checklist(self, measurements): 51 | assert isinstance(measurements, Measurements) 52 | self._output_checklist = tuple(measurements.keys()) 53 | 54 | def __call__(self, field_list): 55 | """ 56 | Generates observables with parameter info from input field list 57 | 58 | Parameters 59 | ---------- 60 | field_list 61 | list/tuple of field object 62 | 63 | Returns 64 | ------- 65 | imagine.observables.observable_dict.Simulations 66 | Simulations object 67 | """ 68 | assert (len(self._output_checklist) == 1) 69 | assert (self._output_checklist[0][0] == 'test') 70 | obsdim = int(self._output_checklist[0][2]) 71 | # check input 72 | assert isinstance(field_list, (list, tuple)) 73 | assert (len(field_list) == 1) 74 | assert isinstance(field_list[0], TestField) 75 | ensize = field_list[0].ensemble_size 76 | # assemble Simulations object 77 | output = Simulations() 78 | # core function for producing observables 79 | obs_arr = self.obs_generator(field_list, ensize, obsdim) 80 | # not using healpix structure 81 | output.append(self._output_checklist[0], obs_arr, True) 82 | return output 83 | 84 | def obs_generator(self, field_list, ensemble_size, obs_size): 85 | """ 86 | Applies field model and generate observable raw data 87 | 88 | Parameters 89 | ---------- 90 | field_list 91 | list of field objects 92 | ensemble_size 93 | number of realizations in ensemble 94 | obs_size 95 | size of observable 96 | 97 | Returns 98 | ------- 99 | numpy.ndarray 100 | """ 101 | # coordinates 102 | raw_arr = np.zeros((ensemble_size, obs_size)) 103 | coo_x = np.linspace(0., 2.*np.pi, obs_size) 104 | for i in range(ensemble_size): 105 | pars = field_list[0].report_parameters(i) 106 | # double check parameter keys 107 | assert (pars.keys() == field_list[0].field_checklist.keys()) 108 | # extract parameters 109 | par_a = pars['a'] 110 | par_b = pars['b'] 111 | par_s = pars['random_seed'] 112 | # get thread-time dependent random number 113 | np.random.seed(seed_generator(par_s)) 114 | raw_arr[i, :] = np.square(np.multiply(np.sin(coo_x), 115 | np.random.normal(loc=par_a, scale=par_b, size=obs_size))) 116 | return raw_arr 117 | -------------------------------------------------------------------------------- /imagine/fields/test_field.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Package imports 3 | import astropy.units as u 4 | import numpy as np 5 | import scipy.stats as stats 6 | 7 | # IMAGINE imports 8 | from imagine.fields import FieldFactory 9 | from imagine.fields.base_fields import ( 10 | MagneticField, ThermalElectronDensityField) 11 | from imagine.priors import FlatPrior 12 | 13 | # All declaration 14 | __all__ = ['CosThermalElectronDensity', 'CosThermalElectronDensityFactory', 15 | 'NaiveGaussianMagneticField', 'NaiveGaussianMagneticFieldFactory'] 16 | 17 | 18 | # %% CLASS DEFINITIONS 19 | class CosThermalElectronDensity(ThermalElectronDensityField): 20 | r""" 21 | Toy model for naively oscilating thermal electron distribution following: 22 | 23 | .. math:: 24 | 25 | n_e(x,y,z) = n_0 [1+\cos (a x + \alpha)][1+\cos (b y + \beta)][1+\cos(c y + \gamma)] 26 | 27 | The field parameters are: 'n0', which corresponds to :math:`n_0`; and 28 | 'a', 'b', 'c', 'alpha', 'beta', 'gamma', which are 29 | :math:`a`, :math:`b`, :math:`c`, :math:`\alpha`, :math:`\beta`, :math:`\gamma`, respectively. 30 | """ 31 | 32 | # Class attributes 33 | NAME = 'cos_therm_electrons' 34 | PARAMETER_NAMES = ['n0', 'a', 'alpha', 'b', 'beta', 'c', 'gamma'] 35 | 36 | def compute_field(self, seed): 37 | x = self.grid.x 38 | y = self.grid.y 39 | z = self.grid.z 40 | p = self.parameters 41 | return ((p['n0']) * (1 + np.cos(p['a'] * x + p['alpha'])) 42 | * (1 + np.cos(p['b'] * y + p['beta'])) 43 | * (1 + np.cos(p['c'] * z + p['gamma']))) 44 | 45 | 46 | class CosThermalElectronDensityFactory(FieldFactory): 47 | """ 48 | Field factory associated with the :py:class:`CosThermalElectronDensity` 49 | class 50 | """ 51 | 52 | # Class attributes 53 | FIELD_CLASS = CosThermalElectronDensity 54 | DEFAULT_PARAMETERS = {'n0': 1*u.cm**-3, 55 | 'a': 0.0/u.kpc*u.rad, 56 | 'b': 0.0/u.kpc*u.rad, 57 | 'c': 0.0/u.kpc*u.rad, 58 | 'alpha': 0.*u.rad, 59 | 'beta': 0.*u.rad, 60 | 'gamma': 0.*u.rad} 61 | k = FlatPrior(xmin=0.01*u.rad/u.kpc, xmax=100*u.rad/u.kpc) 62 | d = FlatPrior(xmin=0*u.rad/u.kpc, xmax=2*np.pi*u.rad/u.kpc) 63 | PRIORS = {'n0': FlatPrior(xmin=0*u.cm**-3, xmax=10*u.cm**-3), 64 | 'a': k, 'b': k, 'c': k, 65 | 'alpha': d, 'beta': d, 'gamma': d} 66 | 67 | 68 | class NaiveGaussianMagneticField(MagneticField): 69 | r""" 70 | Toy model for naive Gaussian random field for testing. 71 | 72 | The values of each of the magnetic field components are individually 73 | drawn from a Gaussian distribution with mean 'a0' and 74 | standard deviation 'b0'. 75 | 76 | Warning: divergence may be non-zero! 77 | """ 78 | 79 | # Class attributes 80 | NAME = 'naive_gaussian_magnetic_field' 81 | STOCHASTIC_FIELD = True 82 | PARAMETER_NAMES = ['a0', 'b0'] 83 | 84 | def compute_field(self, seed): 85 | 86 | # Creates an empty array to store the result 87 | B = np.empty(self.data_shape) * self.units 88 | 89 | mu = self.parameters['a0'] 90 | sigma = self.parameters['b0'] 91 | # Draws values from a normal distribution with these parameters 92 | # using the seed provided in the argument 93 | distr = stats.norm(loc=mu, scale=sigma) 94 | B = distr.rvs(size=self.data_shape, random_state=seed) 95 | 96 | return B*self.units 97 | 98 | 99 | class NaiveGaussianMagneticFieldFactory(FieldFactory): 100 | """ 101 | Field factory associated with the :py:class:`NaiveGaussianMagneticField` 102 | class 103 | """ 104 | 105 | # Class attributes 106 | FIELD_CLASS = NaiveGaussianMagneticField 107 | DEFAULT_PARAMETERS = {'a0': 1*u.microgauss, 108 | 'b0': 0.1*u.microgauss} 109 | PRIORS = {'a0': FlatPrior(xmin=20*u.microgauss, xmax=20*u.microgauss), 110 | 'b0': FlatPrior(xmin=20*u.microgauss, xmax=20*u.microgauss)} 111 | -------------------------------------------------------------------------------- /imagine/fields/hamx/brnd_es.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # IMAGINE imports 3 | from imagine.fields import DummyField, FieldFactory 4 | from imagine.priors import FlatPrior 5 | 6 | # All declaration 7 | __all__ = ['BrndES', 'BrndESFactory'] 8 | 9 | 10 | # %% CLASS DEFINITIONS 11 | class BrndES(DummyField): 12 | """ 13 | This dummy field instructs the :py:class:`Hammurabi ` 14 | simulator class to use the HammurabiX's builtin random magnetic field 15 | ES random GMF 16 | """ 17 | 18 | # Class attributes 19 | NAME = 'brnd_ES' 20 | SIMULATOR_CONTROLLIST = None # Unused, see simulator_controllist property 21 | FIELD_CHECKLIST = None # Unused, see field_checklist property 22 | 23 | def __init__(self, *args, grid_nx=None, grid_ny=None, grid_nz=None, 24 | **kwargs): 25 | super().__init__(*args, **kwargs) 26 | # Default controllist 27 | self._controllist = {'cue': (['magneticfield', 'random'], {'cue': '1'}), 28 | 'type': (['magneticfield', 'random'], {'type': 'global'}), 29 | 'method': (['magneticfield', 'random', 'global'], {'type': 'es'})} 30 | self.set_grid_size(nx=grid_nx, ny=grid_ny, nz=grid_nz) 31 | 32 | def set_grid_size(self, nx=None, ny=None, nz=None): 33 | """ 34 | Changes the size of the grid used for the evaluation of the random field 35 | """ 36 | if nx is not None: 37 | self._controllist['box_brnd_nx'] = (['grid', 'box_brnd', 'nx'],{'value': str(nx)}) 38 | if ny is not None: 39 | self._controllist['box_brnd_ny'] = (['grid', 'box_brnd', 'ny'],{'value': str(ny)}) 40 | if nz is not None: 41 | self._controllist['box_brnd_nz'] = (['grid', 'box_brnd', 'nz'],{'value': str(nz)}) 42 | 43 | @property 44 | def field_checklist(self): 45 | """ 46 | Hammurabi XML locations of physical parameters 47 | """ 48 | checklist = {'rms': (['magneticfield', 'random', 'global', 'es', 'rms'], 'value'), 49 | 'k0': (['magneticfield', 'random', 'global', 'es', 'k0'], 'value'), 50 | 'a0': (['magneticfield', 'random', 'global', 'es', 'a0'], 'value'), 51 | 'k1': (['magneticfield', 'random', 'global', 'es', 'k1'], 'value'), 52 | 'a1': (['magneticfield', 'random', 'global', 'es', 'a1'], 'value'), 53 | 'rho': (['magneticfield', 'random', 'global', 'es', 'rho'], 'value'), 54 | 'r0': (['magneticfield', 'random', 'global', 'es', 'r0'], 'value'), 55 | 'z0': (['magneticfield', 'random', 'global', 'es', 'z0'], 'value'), 56 | 'random_seed': (['magneticfield', 'random'], 'seed')} 57 | return checklist 58 | 59 | @property 60 | def simulator_controllist(self): 61 | """ 62 | Hammurabi XML locations of logical parameters 63 | """ 64 | return self._controllist 65 | 66 | 67 | class BrndESFactory(FieldFactory): 68 | """ 69 | Field factory that produces the dummy field :py:class:`BrndES` 70 | (see its docs for details). 71 | """ 72 | 73 | # Class attributes 74 | FIELD_CLASS = BrndES 75 | DEFAULT_PARAMETERS = {'rms': 2, 76 | 'k0': 10, 77 | 'a0': 1.7, 78 | 'k1': 0.1, 79 | 'a1': 0, 80 | 'rho': 0.5, 81 | 'r0': 8, 82 | 'z0': 1} 83 | PRIORS = {'rms': FlatPrior(xmin=0, xmax=4), 84 | 'k0': FlatPrior(xmin=0.1, xmax=1), 85 | 'a0': FlatPrior(xmin=1, xmax=3), 86 | 'k1': FlatPrior(xmin=0.01, xmax=1), 87 | 'a1': FlatPrior(xmin=0, xmax=3), 88 | 'rho': FlatPrior(xmin=0, xmax=1), 89 | 'r0': FlatPrior(xmin=2, xmax=10), 90 | 'z0': FlatPrior(xmin=0.1, xmax=3)} 91 | 92 | def __init__(self, *args, grid_nx=None, 93 | grid_ny=None, grid_nz=None, **kwargs): 94 | super().__init__(*args, **kwargs, 95 | field_kwargs={'grid_nx': grid_nx, 96 | 'grid_ny': grid_ny, 97 | 'grid_nz': grid_nz}) 98 | -------------------------------------------------------------------------------- /imagine/fields/hamx/breg_jf12.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # IMAGINE imports 3 | from imagine.fields import DummyField, FieldFactory 4 | from imagine.priors import FlatPrior 5 | 6 | # All declaration 7 | __all__ = ['BregJF12', 'BregJF12Factory'] 8 | 9 | class BregJF12(DummyField): 10 | """ 11 | This dummy field instructs the :py:class:`Hammurabi ` 12 | simulator class to use the HammurabiX's builtin regular magnetic field 13 | for the Jansson & Farrar model, https://ui.adsabs.harvard.edu/abs/2012ApJ...757...14J/abstract 14 | """ 15 | NAME = 'breg_jf12' 16 | 17 | FIELD_CHECKLIST = {'b_arm1': (['magneticfield', 'regular', 'jf12', 'b_arm1'], 'value'), 18 | 'b_arm2': (['magneticfield', 'regular', 'jf12', 'b_arm2'], 'value'), 19 | 'b_arm3': (['magneticfield', 'regular', 'jf12', 'b_arm3'], 'value'), 20 | 'b_arm4': (['magneticfield', 'regular', 'jf12', 'b_arm4'], 'value'), 21 | 'b_arm5': (['magneticfield', 'regular', 'jf12', 'b_arm5'], 'value'), 22 | 'b_arm6': (['magneticfield', 'regular', 'jf12', 'b_arm6'], 'value'), 23 | 'b_arm7': (['magneticfield', 'regular', 'jf12', 'b_arm7'], 'value'), 24 | 'b_ring': (['magneticfield', 'regular', 'jf12', 'b_ring'], 'value'), 25 | 'h_disk': (['magneticfield', 'regular', 'jf12', 'h_disk'], 'value'), 26 | 'w_disk': (['magneticfield', 'regular', 'jf12', 'w_disk'], 'value'), 27 | 'Bn': (['magneticfield', 'regular', 'jf12', 'Bn'], 'value'), 28 | 'Bs': (['magneticfield', 'regular', 'jf12', 'Bs'], 'value'), 29 | 'rn': (['magneticfield', 'regular', 'jf12', 'rn'], 'value'), 30 | 'rs': (['magneticfield', 'regular', 'jf12', 'rs'], 'value'), 31 | 'wh': (['magneticfield', 'regular', 'jf12', 'wh'], 'value'), 32 | 'z0': (['magneticfield', 'regular', 'jf12', 'z0'], 'value'), 33 | 'B0_X': (['magneticfield', 'regular', 'jf12', 'B0_X'], 'value'), 34 | 'Xtheta': (['magneticfield', 'regular', 'jf12', 'Xtheta'], 'value'), 35 | 'rpc_X': (['magneticfield', 'regular', 'jf12', 'rpc_X'], 'value'), 36 | 'r0_X': (['magneticfield', 'regular', 'jf12', 'r0_X'], 'value')} 37 | 38 | SIMULATOR_CONTROLLIST = {'cue': (['magneticfield', 'regular'], {'cue': '1'}), 39 | 'type': (['magneticfield', 'regular'], {'type': 'jf12'})} 40 | 41 | class BregJF12Factory(FieldFactory): 42 | """ 43 | Field factory that produces the dummy field :py:class:`BregJF12` 44 | (see its docs for details). 45 | """ 46 | FIELD_CLASS = BregJF12 47 | DEFAULT_PARAMETERS = {'b_arm1': 0.1, 'b_arm2': 3.0, 'b_arm3': -0.9, 'b_arm4': -0.8, 48 | 'b_arm5': -2.0, 'b_arm6': -4.2, 'b_arm7': 0.0, 'b_ring': 0.1, 'h_disk': 0.40, 'w_disk': 0.27, 49 | 'Bn': 1.4, 'Bs': -1.1, 'rn': 9.22, 'rs': 16.7, 'wh': 0.20, 'z0': 5.3, 50 | 'B0_X': 4.6,'Xtheta': 49, 'rpc_X': 4.8, 'r0_X': 2.9} 51 | PRIORS = { 'b_arm1': FlatPrior(xmin =-1., xmax =1.), 52 | 'b_arm2': FlatPrior(xmin = 2., xmax = 4.), 53 | 'b_arm3': FlatPrior(xmin =-1.,xmax =1.), 54 | 'b_arm4': FlatPrior(xmin =-1.,xmax =1.), 55 | 'b_arm5': FlatPrior(xmin =-3.,xmax =-1.), 56 | 'b_arm6': FlatPrior(xmin =-5.,xmax =-3.), 57 | 'b_arm7': FlatPrior(xmin =-1.,xmax =1.), 58 | 'b_ring': FlatPrior(xmin =-1.,xmax =1.), 59 | 'h_disk': FlatPrior(xmin =-1.,xmax =1.), 60 | 'w_disk': FlatPrior(xmin =-1.,xmax =1.), 61 | 'Bn' : FlatPrior(xmin =0.,xmax =2.), 62 | 'Bs' : FlatPrior(xmin =-2.,xmax =0.), 63 | 'rn' : FlatPrior(xmin =8.,xmax =10.), 64 | 'rs' : FlatPrior(xmin =15.,xmax =17.), 65 | 'wh' : FlatPrior(xmin =0.,xmax =2.), 66 | 'z0' : FlatPrior(xmin =4.,xmax =6.), 67 | 'B0_X' : FlatPrior(xmin =4.,xmax =6.), 68 | 'Xtheta': FlatPrior(xmin =48.,xmax =50.), 69 | 'rpc_X' : FlatPrior(xmin =4.,xmax =6.), 70 | 'r0_X': FlatPrior(xmin =2., xmax =4.)} 71 | -------------------------------------------------------------------------------- /imagine/tests/test_observable.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Package imports 3 | from mpi4py import MPI 4 | import numpy as np 5 | import pytest 6 | 7 | # IMAGINE imports 8 | from imagine.observables import Observable 9 | 10 | # Globals 11 | comm = MPI.COMM_WORLD 12 | mpisize = comm.Get_size() 13 | mpirank = comm.Get_rank() 14 | 15 | # Marks tests in this module as quick 16 | pytestmark = pytest.mark.quick 17 | 18 | # %% PYTEST DEFINITIONS 19 | class TestObservables(object): 20 | def test_init_measure(self): 21 | arr = np.random.rand(1,128) 22 | test_obs = Observable(arr, 'measured') 23 | assert test_obs.dtype == 'measured' 24 | assert test_obs.shape == (mpisize, 128) 25 | assert np.allclose(arr[0], test_obs.data[0]) 26 | assert np.allclose(arr[0], test_obs.ensemble_mean[0]) 27 | 28 | def test_init_covariance(self): 29 | arr = np.random.rand(1,mpisize) 30 | test_obs = Observable(arr, 'covariance') 31 | assert test_obs.dtype == 'covariance' 32 | assert test_obs.shape == (mpisize, mpisize) 33 | assert np.allclose(arr[0], test_obs.data[0]) 34 | assert test_obs.size == mpisize 35 | 36 | def test_append_ndarray(self): 37 | if not mpirank: 38 | arr = np.random.rand(2,128) 39 | else: 40 | arr = np.random.rand(1,128) 41 | test_obs = Observable(arr, 'simulated') 42 | brr = np.random.rand(1,128) 43 | test_obs.append(brr) 44 | global_shape = test_obs.shape 45 | globalrr = test_obs.global_data 46 | if not mpirank: 47 | assert global_shape == globalrr.shape 48 | fullrr = np.vstack([arr,brr]) 49 | assert np.allclose(fullrr, test_obs.data) 50 | 51 | def test_append_obs(self): 52 | if not mpirank: 53 | arr = np.random.rand(2,128) 54 | else: 55 | arr = np.random.rand(1,128) 56 | test_obs = Observable(arr, 'simulated') 57 | if not mpirank: 58 | brr = np.random.rand(1,128) 59 | else: 60 | brr = np.random.rand(1,128) 61 | test_obs2 = Observable(brr, 'simulated') 62 | test_obs2.append(test_obs) 63 | global_shape = test_obs2.shape 64 | globalrr = test_obs2.global_data 65 | if not mpirank: 66 | assert global_shape == globalrr.shape 67 | fullrr = np.vstack([arr,brr]) 68 | assert np.alltrue(np.isin(test_obs2.data, fullrr)) 69 | 70 | def test_append_twice(self): 71 | if not mpirank: 72 | arr = np.random.rand(2,128) 73 | else: 74 | arr = np.random.rand(1,128) 75 | test_obs = Observable(arr, 'simulated') 76 | brr = np.random.rand(1,128) 77 | test_obs.append(brr) 78 | crr = np.random.rand(2,128) 79 | test_obs.append(crr) 80 | global_shape = test_obs.shape 81 | globalrr = test_obs.global_data 82 | if not mpirank: 83 | assert global_shape == globalrr.shape 84 | fullrr = np.vstack([arr, brr, crr]) 85 | assert np.alltrue(np.isin(test_obs.data, fullrr)) 86 | 87 | def test_append_with_rewrite(self): 88 | if not mpirank: 89 | arr = np.random.rand(2,128) 90 | else: 91 | arr = np.random.rand(1,128) 92 | test_obs = Observable(arr, 'simulated') 93 | test_obs.rw_flag = True 94 | brr = np.random.rand(1,128) 95 | test_obs.append(brr) 96 | global_shape = test_obs.shape 97 | globalrr = test_obs.global_data 98 | if not mpirank: 99 | assert global_shape == globalrr.shape 100 | assert np.allclose(test_obs.data[0], brr[0]) 101 | 102 | def test_append_after_rewrite(self): 103 | arr = np.random.rand(1,128) 104 | test_obs = Observable(arr, 'simulated') 105 | if not mpirank: 106 | brr = np.random.rand(2,128) 107 | else: 108 | brr = np.random.rand(1,128) 109 | test_obs.rw_flag = True 110 | test_obs.append(brr) 111 | crr = np.random.rand(1,128) 112 | # rw_flag must have be switched off 113 | test_obs.append(crr) 114 | global_shape = test_obs.shape 115 | globalrr = test_obs.global_data 116 | if not mpirank: 117 | assert global_shape == globalrr.shape 118 | fullrr = np.vstack([brr, crr]) 119 | assert np.alltrue(np.isin(test_obs.data, fullrr)) 120 | -------------------------------------------------------------------------------- /imagine/fields/hamx/brnd_jf12.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # IMAGINE imports 3 | from imagine.fields import DummyField, FieldFactory 4 | from imagine.priors import FlatPrior 5 | 6 | # All declaration 7 | __all__ = ['BrndJF12', 'BrndJF12Factory'] 8 | 9 | class BrndJF12(DummyField): 10 | """ This dummy field instructs the :py:class:`Hammurabi ` 11 | simulator class to use the HammurabiX's builtin random magnetic field 12 | for the Jansson & Farrar model, https://ui.adsabs.harvard.edu/abs/2012ApJ...761L..11J/abstract 13 | """ 14 | NAME = 'brnd_jf12' 15 | 16 | FIELD_CHECKLIST = {'rms': (['magneticfield', 'random', 'global','jf12', 'rms'], 'value'), 17 | 'k0': (['magneticfield', 'random', 'global','jf12', 'k0'], 'value'), 18 | 'k1': (['magneticfield', 'random', 'global','jf12', 'k1'], 'value'), 19 | 'a0': (['magneticfield', 'random', 'global','jf12', 'a0'], 'value'), 20 | 'a1': (['magneticfield', 'random', 'global','jf12', 'a1'], 'value'), 21 | 'rho': (['magneticfield', 'random', 'global','jf12', 'rho'], 'value'), 22 | 'b0_1': (['magneticfield', 'random', 'global','jf12', 'b0_1'], 'value'), 23 | 'b0_2': (['magneticfield', 'random', 'global','jf12', 'b0_2'], 'value'), 24 | 'b0_3': (['magneticfield', 'random', 'global','jf12', 'b0_3'], 'value'), 25 | 'b0_4': (['magneticfield', 'random', 'global','jf12', 'b0_4'], 'value'), 26 | 'b0_5': (['magneticfield', 'random', 'global','jf12', 'b0_5'], 'value'), 27 | 'b0_6': (['magneticfield', 'random', 'global','jf12', 'b0_6'], 'value'), 28 | 'b0_7': (['magneticfield', 'random', 'global','jf12', 'b0_7'], 'value'), 29 | 'b0_8': (['magneticfield', 'random', 'global','jf12', 'b0_8'], 'value'), 30 | 'b0_int': (['magneticfield', 'random', 'global','jf12', 'b0_int'], 'value'), 31 | 'z0_spiral': (['magneticfield', 'random', 'global','jf12', 'z0_spiral'], 'value'), 32 | 'b0_halo': (['magneticfield', 'random', 'global','jf12', 'b0_halo'], 'value'), 33 | 'r0_halo': (['magneticfield', 'random', 'global','jf12', 'r0_halo'], 'value'), 34 | 'z0_halo': (['magneticfield', 'random', 'global','jf12', 'z0_halo'], 'value'), 35 | 'random_seed':(['magneticfield','random'],'seed')} 36 | 37 | SIMULATOR_CONTROLLIST = {'cue': (['magneticfield', 'random'], {'cue': '1'}), 38 | 'type': (['magneticfield', 'random'], {'type': 'global'}), 39 | 'method':(['magneticfield','random','global'],{'type':'jf12'})} 40 | 41 | 42 | class BrndJF12Factory(FieldFactory): 43 | """ 44 | Field factory that produces the dummy field :py:class:`BrndJF12` 45 | (see its docs for details). 46 | """ 47 | FIELD_CLASS = BrndJF12 48 | DEFAULT_PARAMETERS = {'rms':1.0, 'k0':10.0,'k1':0.1,'a0':1.7, 'a1':0.0, 'rho':1.0, 49 | 'b0_1':10.81, 'b0_2':6.96,'b0_3':9.59,'b0_4':6.96, 'b0_5':1.96, 'b0_6':16.34, 'b0_7':37.29, 'b0_8':10.35, 50 | 'b0_int':7.63, 'z0_spiral':0.61, 'b0_halo':4.68, 'r0_halo':10.97, 'z0_halo': 2.84} 51 | PRIORS = { 'rms': FlatPrior(xmin =0.5, xmax =2.5), 52 | 'k0': FlatPrior(xmin =9.0, xmax =11.0), 53 | 'k1': FlatPrior(xmin =0.0, xmax =2.0), 54 | 'a0': FlatPrior(xmin =1.0, xmax =3.0), 55 | 'a1': FlatPrior(xmin =0.0, xmax =1.0), 56 | 'rho': FlatPrior(xmin =0.0, xmax =2.0), 57 | 'b0_1': FlatPrior(xmin =10.0, xmax =12.0), 58 | 'b0_2': FlatPrior(xmin =6.0, xmax =8.0), 59 | 'b0_3': FlatPrior(xmin =9.0, xmax =11.0), 60 | 'b0_4': FlatPrior(xmin =6.0, xmax =8.0), 61 | 'b0_5': FlatPrior(xmin =1.0, xmax =3.0), 62 | 'b0_6': FlatPrior(xmin =15.0, xmax =17.0), 63 | 'b0_7': FlatPrior(xmin =36.0, xmax =38.0), 64 | 'b0_8': FlatPrior(xmin =10.0, xmax =12.0), 65 | 'b0_int': FlatPrior(xmin =7.0, xmax =9.0), 66 | 'z0_spiral': FlatPrior(xmin =0.0, xmax =2.0), 67 | 'b0_halo': FlatPrior(xmin =4.0, xmax =6.0), 68 | 'r0_halo': FlatPrior(xmin =10.0, xmax =12.0), 69 | 'z0_halo': FlatPrior(xmin =2.0, xmax =4.0)} 70 | -------------------------------------------------------------------------------- /imagine/fields/basic_fields.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Package imports 3 | import numpy as np 4 | import scipy.stats as stats 5 | 6 | # IMAGINE imports 7 | from imagine.fields.base_fields import ( 8 | MagneticField, ThermalElectronDensityField) 9 | 10 | # All declaration 11 | __all__ = ['ConstantMagneticField', 'ConstantThermalElectrons', 12 | 'ExponentialThermalElectrons', 'RandomThermalElectrons'] 13 | 14 | 15 | # %% CLASS DEFINITIONS 16 | class ConstantMagneticField(MagneticField): 17 | """ 18 | Constant magnetic field 19 | 20 | The field parameters are: 21 | 'Bx', 'By', 'Bz', which correspond to the fixed components 22 | :math:`B_x`, :math:`B_x` and :math:`B_z`. 23 | """ 24 | 25 | # Class attributes 26 | NAME = 'constant_B' 27 | PARAMETER_NAMES = ['Bx', 'By', 'Bz'] 28 | 29 | def compute_field(self, seed): 30 | # Creates an empty array to store the result 31 | B = np.empty(self.data_shape) * self.parameters['Bx'].unit 32 | # For a magnetic field, the output must be of shape: 33 | # (Nx,Ny,Nz,Nc) where Nc is the index of the component. 34 | # Computes Bx 35 | B[:, :, :, 0] = self.parameters['Bx'] 36 | # Computes By 37 | B[:, :, :, 1] = self.parameters['By'] 38 | # Computes Bz 39 | B[:, :, :, 2] = self.parameters['Bz'] 40 | return B 41 | 42 | 43 | class ConstantThermalElectrons(ThermalElectronDensityField): 44 | """ 45 | Constant thermal electron density field 46 | 47 | The field parameters are: 48 | 'ne', the number density of thermal electrons 49 | """ 50 | 51 | # Class attributes 52 | NAME = 'constant_TE' 53 | PARAMETER_NAMES = ['ne'] 54 | 55 | def compute_field(self, seed): 56 | return np.ones(self.data_shape)*self.parameters['ne'] 57 | 58 | 59 | class ExponentialThermalElectrons(ThermalElectronDensityField): 60 | """ 61 | Thermal electron distribution in a double exponential disc 62 | characterized by a scale-height and a scale-radius, i.e. 63 | 64 | ..math:: 65 | 66 | n_e(R) = n_0 e^{-R/R_e} e^{-|z|/h_e} 67 | 68 | where :math:`R` is the cylindrical radius and :math:`z` is the vertical 69 | coordinate. 70 | 71 | The field parameters are: the 'central_density', `n_0`; 72 | 'scale_radius`, :math:`R_e`; and 'scale_height', :math:`h_e`. 73 | """ 74 | 75 | # Class attributes 76 | NAME = 'exponential_disc_thermal_electrons' 77 | PARAMETER_NAMES = ['central_density', 78 | 'scale_radius', 79 | 'scale_height'] 80 | 81 | def compute_field(self, seed): 82 | R = self.grid.r_cylindrical 83 | z = self.grid.z 84 | Re = self.parameters['scale_radius'] 85 | he = self.parameters['scale_height'] 86 | n0 = self.parameters['central_density'] 87 | 88 | return n0*np.exp(-R/Re)*np.exp(-np.abs(z/he)) 89 | 90 | 91 | class RandomThermalElectrons(ThermalElectronDensityField): 92 | """ 93 | Thermal electron densities drawn from a Gaussian distribution 94 | 95 | NB This may lead to negative densities depending on the choice of 96 | parameters. This may be controlled with the 'min_ne' parameter 97 | which sets a minimum value for the density field (i.e. 98 | any value smaller than the minimum density is set to min_ne). 99 | 100 | The field parameters are: 'mean', the mean of the distribution; 'std', the 101 | standard deviation of the distribution; and 'min_ne', the 102 | aforementioned minimum density. To disable the minimum density requirement, 103 | it may be set to NaN. 104 | """ 105 | 106 | # Class attributes 107 | NAME = 'random_thermal_electrons' 108 | STOCHASTIC_FIELD = True 109 | PARAMETER_NAMES = ['mean', 'std', 'min_ne'] 110 | 111 | def compute_field(self, seed): 112 | # Converts dimensional parameters into numerical values 113 | # in the correct units (stats norm does not like units) 114 | mu = self.parameters['mean'].to_value(self.units) 115 | sigma = self.parameters['std'].to_value(self.units) 116 | minimum_density = self.parameters['min_ne'].to_value(self.units) 117 | 118 | # Draws values from a normal distribution with these parameters 119 | # using the seed provided in the argument 120 | distr = stats.norm(loc=mu, scale=sigma) 121 | result = distr.rvs(size=self.data_shape, random_state=seed) 122 | 123 | # Applies minimum density, if present 124 | if np.isfinite(minimum_density): 125 | result[result < minimum_density] = minimum_density 126 | 127 | return result << self.units # Restores units 128 | -------------------------------------------------------------------------------- /imagine/templates/simulator_template.py: -------------------------------------------------------------------------------- 1 | from imagine.simulators import Simulator 2 | import numpy as np 3 | import MY_SIMULATOR # Substitute this by your own code 4 | 5 | class SimulatorTemplate(Simulator): 6 | """ 7 | Detailed description of the simulator 8 | """ 9 | # The quantity that will be simulated (e.g. 'fd', 'sync', 'dm') 10 | # Any observable quantity absent in this list is ignored by the simulator 11 | SIMULATED_QUANTITIES = ['my_observable_quantity'] 12 | # A list or set of what is required for the simulator to work 13 | REQUIRED_FIELD_TYPES = ['dummy', 'magnetic_field'] 14 | # Fields which may be used if available 15 | OPTIONAL_FIELD_TYPES = ['thermal_electron_density'] 16 | # One must specify which grid is compatible with this simulator 17 | ALLOWED_GRID_TYPES = ['cartesian'] 18 | # Tells whether this simulator supports using different grids 19 | USE_COMMON_GRID = False 20 | 21 | def __init__(self, measurements, **extra_args): 22 | # Send the measurements to parent class 23 | super().__init__(measurements) 24 | # Any initialization task involving **extra_args can be done *here* 25 | pass 26 | 27 | def simulate(self, key, coords_dict, realization_id, output_units): 28 | """ 29 | This is the main function you need to override to create your simulator. 30 | The simulator will cycle through a series of Measurements and create 31 | mock data using this `simulate` function for each of them. 32 | 33 | Parameters 34 | ---------- 35 | key : tuple 36 | Information about the observable one is trying to simulate 37 | coords_dict : dictionary 38 | If the trying to simulate data associated with discrete positions 39 | in the sky, this dictionary contains arrays of coordinates. 40 | realization_id : int 41 | The index associated with the present realisation being computed. 42 | output_units : astropy.units.Unit 43 | The requested output units. 44 | """ 45 | # The argument key provide extra information about the specific 46 | # measurement one is trying to simulate 47 | obs_quantity, freq_Ghz, Nside, tag = key 48 | 49 | # If the simulator is working on tabular data, the observed 50 | # coordinates can be accessed from coords_dict, e.g. 51 | lat, lon = coords_dict['lat'], coords_dict['lon'] 52 | 53 | # Fields can be accessed from a dictionary stored in self.fields 54 | B_field_values = self.fields['magnetic_field'] 55 | # If a dummy field is being used, instead of an actual realisation, 56 | # the parameters can be accessed from self.fields['dummy'] 57 | my_dummy_field_parameters = self.fields['dummy'] 58 | # Checklists allow _dummy fields_ to send specific information to 59 | # simulators about specific parameters 60 | checklist_params = self.field_checklist 61 | # Controllists in dummy fields contain a dict of simulator settings 62 | simulator_settings = self.controllist 63 | 64 | # If a USE_COMMON_GRID is set to True, the grid it can be accessed from 65 | # grid = self.grid 66 | 67 | # Otherwise, if fields are allowed to use different grids, one can 68 | # get the grid from the self.grids dictionary and the field type 69 | grid_B = self.grids['magnetic_field'] 70 | 71 | # Finally we can _simulate_, using whichever information is needed 72 | # and your own MY_SIMULATOR code: 73 | results = MY_SIMULATOR.simulate(simulator_settings, 74 | grid_B.x, grid_B.y, grid_B.z, 75 | lat, lon, freq_Ghz, B_field_values, 76 | my_dummy_field_parameters, 77 | checklist_params) 78 | # The results should be in a 1-D array of size compatible with 79 | # your dataset. I.e. for tabular data: results.size = lat.size 80 | # (or any other coordinate) 81 | # and for HEALPix data results.size = 12*(Nside**2) 82 | 83 | # Note: Awareness of other observables 84 | # While this method will be called for each individual observable 85 | # the other observables can be accessed from self.observables 86 | # Thus, if your simulator is capable of computing multiple observables 87 | # at the same time, the results can be saved to an attribute on the first 88 | # call of `simulate` and accessed from this cache later. 89 | # To break the degeneracy between multiple realisations (which will 90 | # request the same key), the realisation_id can be used 91 | # (see Hammurabi implementation for an example) 92 | return results 93 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # http://www.sphinx-doc.org/en/master/config 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | from codecs import open 14 | import os 15 | import sys 16 | from unittest import mock 17 | from pkg_resources import parse_requirements 18 | sys.path.insert(0, os.path.abspath('../../')) 19 | 20 | 21 | # Read in the IMAGINE requirements 22 | with open('../../requirements.txt', 'r') as file: 23 | requirements = file.read().splitlines() 24 | with open('../requirements_RTD.txt', 'r') as file: 25 | docs_requirements = file.read().splitlines() 26 | 27 | # Parse the requirements 28 | parsed_reqs = parse_requirements(requirements) 29 | parsed_docs_reqs = parse_requirements(docs_requirements) 30 | 31 | # Determine the names of all requirements 32 | req_names = set(map(lambda x: x.name, parsed_reqs)) 33 | docs_req_names = set(map(lambda x: x.name, parsed_docs_reqs)) 34 | 35 | # Take the difference between req_names and docs_req_names 36 | names = req_names.difference(docs_req_names) 37 | 38 | # Create mock modules for all modules IMAGINE needs but the docs do not 39 | for mod_name in names: 40 | sys.modules[mod_name] = mock.Mock() 41 | 42 | # -- Project information ----------------------------------------------------- 43 | 44 | project = 'IMAGINE' 45 | copyright = '2019-2021, IMAGINE Consortium' 46 | author = 'IMAGINE Consortium' 47 | 48 | 49 | # -- General configuration --------------------------------------------------- 50 | 51 | # Add any Sphinx extension module names here, as strings. They can be 52 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 53 | # ones. 54 | extensions = [ 55 | 'sphinx.ext.autodoc', 56 | 'sphinx.ext.doctest', 57 | 'sphinx.ext.napoleon', 58 | 'sphinx.ext.intersphinx', 59 | 'sphinx.ext.coverage', 60 | 'sphinx.ext.mathjax', 61 | 'sphinx.ext.viewcode', 62 | 'sphinx.ext.autosummary', 63 | 'sphinx.ext.autosectionlabel', 64 | 'nbsphinx', 65 | 'nbsphinx_link', 66 | ] 67 | 68 | # Add any paths that contain templates here, relative to this directory. 69 | templates_path = ['_templates'] 70 | 71 | # List of patterns, relative to source directory, that match files and 72 | # directories to ignore when looking for source files. 73 | # This pattern also affects html_static_path and html_extra_path. 74 | exclude_patterns = [] 75 | 76 | # The master toctree document. 77 | master_doc = 'index' 78 | 79 | #html_logo = 'logo.png' 80 | 81 | 82 | dd_function_parentheses = True 83 | add_module_names = True 84 | numfig = True 85 | 86 | # Reference formatting 87 | numfig_format = {'figure': "Fig. %s"} 88 | 89 | 90 | # ------------------------------------------------------------------ 91 | # Autodoc configuration 92 | autodoc_default_options = {'members': None, 93 | 'special-members': '__call__'} 94 | 95 | autodoc_member_order = 'groupwise' 96 | autodoc_inherit_docstrings = True 97 | autosectionlabel_prefix_document = True 98 | 99 | # Napoleon configuration 100 | napoleon_include_private_with_doc = False 101 | napoleon_include_init_with_doc = True 102 | napoleon_include_special_with_doc = True 103 | 104 | napoleon_use_param = False 105 | napoleon_custom_sections = [('Sampling controllers', 'Other Parameters')] 106 | 107 | intersphinx_mapping = {'numpy': ('https://numpy.org/doc/stable/', None), 108 | 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 109 | 'astropy': ('https://docs.astropy.org/en/stable/', None), 110 | 'python': ('https://docs.python.org/3', None), 111 | 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), 112 | 'ultranest': ('https://johannesbuchner.github.io/UltraNest/', None)} 113 | 114 | # -- Options for HTML output ------------------------------------------------- 115 | 116 | # The theme to use for HTML and HTML Help pages. See the documentation for 117 | # a list of builtin themes. 118 | # 119 | 120 | 121 | html_theme = 'sphinx_rtd_theme' 122 | 123 | 124 | # Add any paths that contain custom static files (such as style sheets) here, 125 | # relative to this directory. They are copied after the builtin static files, 126 | # so a file named "default.css" will overwrite the builtin "default.css". 127 | html_static_path = ['_static'] 128 | -------------------------------------------------------------------------------- /imagine/tests/mocks_for_templates.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | import astropy.units as u 4 | from imagine.fields import DummyField 5 | 6 | # -------------------------------------------------------------------------- 7 | # For testing the magnetic_field_template 8 | def MY_GMF_MODEL_compute_B(param_A, param_B, x_coord, y_coord, z_coord, seed): 9 | # Checks interface with physical parameters 10 | Bx = param_A 11 | # Checks interface with dimensionless parameters 12 | By = param_B * 1e-10 *u.tesla # This will also test unit conversions.. 13 | # Checks cartesian coordinates 14 | Bz = 42*(x_coord.value*y_coord.value*z_coord.value) * u.microgauss 15 | return Bx, By, Bz 16 | 17 | MY_GMF_MODEL = type(sys)('MY_GMF_MODEL') 18 | MY_GMF_MODEL.compute_B = MY_GMF_MODEL_compute_B 19 | 20 | 21 | # -------------------------------------------------------------------------- 22 | # For testing the thermal_electrons_template 23 | def MY_GALAXY_MODEL_compute_ne(param_A, param_B, r, theta, phi, seed): 24 | # Checks interface with physical parameters 25 | A = param_A.to_value(u.kpc) 26 | # Checks interface with dimensionless parameters 27 | B = param_B * 1*u.cm**-3 28 | # Checks spherical coordinates 29 | return A*B*(r.value*theta.value*phi.value)*seed 30 | 31 | MY_GALAXY_MODEL = type(sys)('MY_GALAXY_MODEL') 32 | MY_GALAXY_MODEL.compute_ne = MY_GALAXY_MODEL_compute_ne 33 | 34 | # -------------------------------------------------------------------------- 35 | # For testing the simulator_template 36 | def MY_SIMULATOR_simulate(simulator_settings, x, y, z, 37 | lat, lon, freq_Ghz, 38 | B_field_values, 39 | my_dummy_field_parameters, 40 | checklist_params): 41 | # Tests whether the shapes arrive here correctly 42 | assert x.shape == y.shape == z.shape 43 | # Tests checklists and the shape of the coordinate array 44 | mock_sim = np.empty(lat.size)*checklist_params['value'] 45 | # Tests the controllist and the freq_Ghz arg 46 | assert isinstance(freq_Ghz, float) 47 | mock_sim[0] = simulator_settings['mock']['start_value']*float(freq_Ghz) 48 | # Tests reading a Field 49 | mock_sim[1] = B_field_values[0,0,0,0].to_value(u.microgauss) 50 | # Tests working with a DummyField 51 | return mock_sim * my_dummy_field_parameters['units'] 52 | 53 | 54 | 55 | MY_SIMULATOR = type(sys)('MY_SIMULATOR') 56 | MY_SIMULATOR.simulate = MY_SIMULATOR_simulate 57 | 58 | class MockDummy(DummyField): 59 | """ 60 | Used in the test_simulator_template function 61 | """ 62 | NAME = 'mock' 63 | FIELD_CHECKLIST = {'value': 101010, 'units': None} 64 | SIMULATOR_CONTROLLIST = {'start_value': 17} 65 | 66 | 67 | 68 | 69 | # -------------------------------------------------------------------------- 70 | # For testing the field_factory_template 71 | class MY_PACKAGE_MY_FIELD_CLASS(DummyField): 72 | 73 | # Class attributes 74 | NAME = 'name_of_the_dummy_field' 75 | FIELD_CHECKLIST = {'Parameter_A': 'parameter_A_settings', 76 | 'Parameter_B': None} 77 | SIMULATOR_CONTROLLIST = {} 78 | 79 | 80 | MY_PACKAGE = type(sys)('MY_PACKAGE') 81 | MY_PACKAGE.MY_FIELD_CLASS = MY_PACKAGE_MY_FIELD_CLASS 82 | 83 | MY_PACKAGE.A_std_val = 1*u.K 84 | MY_PACKAGE.A_min = 0*u.K 85 | MY_PACKAGE.A_max = 2*u.K 86 | 87 | MY_PACKAGE.B_std_val = 1*u.Msun 88 | MY_PACKAGE.B_sig = 1*u.Msun 89 | MY_PACKAGE.B_min = 0*u.Msun 90 | MY_PACKAGE.B_max = 2*u.Msun 91 | 92 | 93 | # -------------------------------------------------------------------------- 94 | # For testing the pipeline_template 95 | class MY_SAMPLER_Sampler: 96 | def __init__(self, **kwargs): 97 | # Saves all initialization parameters 98 | for name, val in kwargs.items(): 99 | setattr(self, name, val) 100 | 101 | def run(self, **kwargs): 102 | # Checks active parameters 103 | assert self.param_names == ('fake_rnd_TE_param', 'constant_B_Bx', 'constant_B_By') 104 | # Checks prior_transform (which includes FlatPrior and GaussianPrior 105 | assert np.allclose(self.prior_transform(np.array([0.5,0.15,0.25])), 106 | [5., 0.98587462, 1.16275512]) 107 | # Checks the prior_pdf 108 | print('thingy!', self.prior_pdf([1.5,1.5,1.5])) 109 | assert np.isclose(self.prior_pdf([1.5,1.5,1.5]), 0.0636616797) 110 | # Checks likelihood_function 111 | assert np.allclose(self.loglike(np.array([0.5, 10.5, 0.5])), 112 | -55.581767201053125) 113 | assert self.seed == 1 114 | 115 | # Returns fake results 116 | return {'samples': np.array([[0.000001,0.5,.999999]]*3).T, 117 | 'logz': 42.0, 'logzerr': 17.0} 118 | 119 | MY_SAMPLER = type(sys)('MY_SAMPLER') 120 | MY_SAMPLER.Sampler = MY_SAMPLER_Sampler 121 | 122 | -------------------------------------------------------------------------------- /imagine/tools/io.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | import os 4 | from copy import copy, deepcopy 5 | import logging as log 6 | import warnings 7 | 8 | # Package imports 9 | import cloudpickle 10 | import hickle 11 | from mpi4py import MPI 12 | import numpy as np 13 | 14 | # IMAGINE imports 15 | from imagine.tools import rc 16 | from imagine.tools.parallel_ops import distribute_matrix 17 | 18 | __all__ = ['save_pipeline', 'load_pipeline'] 19 | 20 | # GLOBALS 21 | comm = MPI.COMM_WORLD 22 | mpirank = comm.Get_rank() 23 | 24 | # %% FUNCTION DEFINITIONS 25 | def save_pipeline(pipeline, use_hickle=False): 26 | """ 27 | Saves the state of a Pipeline object 28 | 29 | Parameters 30 | ---------- 31 | pipeline : imagine.pipelines.pipeline.Pipeline 32 | The pipeline object one would like to save 33 | use_hickle : bool 34 | If `False` (default) the state is saved using the `cloudpickle` package. 35 | Otherwise, experimental support to `hickle` is enabled. 36 | """ 37 | # Works on a (shallow) copy 38 | pipeline = copy(pipeline) 39 | 40 | # Adjusts paths: we want everything to be relative to run_directory 41 | # Hidden variables are used to avoid checks 42 | run_directory, pipeline._run_directory = pipeline._run_directory, '.' 43 | pipeline._chains_directory = os.path.relpath(pipeline._chains_directory, 44 | run_directory) 45 | 46 | # Adjusts observational data, if using distributed arrays 47 | if rc['distributed_arrays']: 48 | # Covariances need to be "undistributed" 49 | # First, makes sure we are working on a copy 50 | # (this is done as shallow as possible to save memory) 51 | pipeline.likelihood = copy(pipeline.likelihood) 52 | pipeline.likelihood.covariance_dict = deepcopy(pipeline.likelihood.covariance_dict) 53 | 54 | # Gathers all distributed data -- i.e. turns global (distributed) into local 55 | for k in pipeline.likelihood.covariance_dict.keys(): 56 | pipeline.likelihood.covariance_dict[k]._data = pipeline.likelihood.covariance_dict[k].global_data 57 | # NB any process with mpirank!=0 will store None in the above operation 58 | 59 | # Hammurabi-specific path adjustment 60 | if hasattr(pipeline.simulator, 'hamx_path'): 61 | # In the case hamx path is the system default, it will use the 62 | # system default the next time it is loaded. 63 | pipeline.simulator = copy(pipeline.simulator) 64 | if pipeline.simulator.hamx_path == rc['hammurabi_hamx_path']: 65 | pipeline.simulator._hamx_path = None 66 | pipeline.simulator._ham._exe_path = None 67 | 68 | if mpirank == 0: 69 | if not use_hickle: 70 | with open(os.path.join(run_directory,'pipeline.pkl'), 'wb') as f: 71 | cloudpickle.dump(pipeline, f) 72 | else: 73 | hickle.dump(pipeline, os.path.join(run_directory,'pipeline.hkl')) 74 | 75 | return pipeline 76 | 77 | def load_pipeline(directory_path='.'): 78 | """ 79 | Loads the state of a Pipeline object 80 | 81 | Parameters 82 | ---------- 83 | directory_path : str 84 | Path to the directory where the Pipeline state should be saved 85 | """ 86 | if os.path.isfile(os.path.join(directory_path, 'pipeline.hkl')): 87 | pipeline = hickle.load(os.path.join(directory_path, 'pipeline.hkl')) 88 | else: 89 | with open(os.path.join(directory_path,'pipeline.pkl'), 'rb') as f: 90 | pipeline = cloudpickle.load(f) 91 | 92 | # Adjusts paths (hidden variables are used to avoid checks) 93 | pipeline._run_directory = os.path.join(directory_path, pipeline._run_directory) 94 | pipeline._chains_directory = os.path.join(directory_path, pipeline._chains_directory) 95 | 96 | # Adjust observational data, if using distributed arrays 97 | if rc['distributed_arrays']: 98 | # Distributes the covariance data 99 | for k in pipeline.likelihood.covariance_dict.keys(): 100 | cov = pipeline.likelihood.covariance_dict[k]._data 101 | pipeline.likelihood.covariance_dict[k]._data = distribute_matrix(cov) 102 | 103 | # Hammurabi-specific path adjustment 104 | if hasattr(pipeline.simulator, 'hamx_path'): 105 | # In the case hamx path is the system default, it will use the 106 | # system default the next time it is loaded. 107 | if pipeline.simulator.hamx_path is None: 108 | pipeline.simulator.hamx_path = rc['hammurabi_hamx_path'] 109 | # The following refreshes the path to the XML template internally 110 | # using the xml_path property setter 111 | if pipeline.simulator.hamx_path is None: 112 | pipeline.xml_path = None 113 | 114 | # Avoids synchronization problems after loading the pipeline when using MPI 115 | comm.Barrier() 116 | 117 | return pipeline 118 | -------------------------------------------------------------------------------- /imagine/tests/test_dependencies.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Package imports 3 | import astropy.units as u 4 | import numpy as np 5 | import pytest 6 | 7 | # IMAGINE imports 8 | from imagine.fields import ( 9 | DummyField, MagneticField, ThermalElectronDensityField, UniformGrid) 10 | from imagine.observables import Measurements, TabularDataset 11 | from imagine.simulators import Simulator 12 | 13 | # Marks tests in this module as quick 14 | pytestmark = pytest.mark.quick 15 | 16 | # %% HELPER DEFINITIONS 17 | 18 | # First, we create some fields with the structure: 19 | # A - independent - electron density 20 | # B - independent - electron density 21 | # C -> B - dummy 22 | # D -> (C, B) - magnetic field 23 | # E -> electron density (A,B) - magnetic field 24 | # F - independent - magnetic field 25 | 26 | 27 | class A(ThermalElectronDensityField): 28 | """Independent electron density""" 29 | 30 | NAME = 'A' 31 | PARAMETER_NAMES = [] 32 | 33 | def compute_field(self, seed): 34 | return np.ones(self.data_shape)*self.units 35 | 36 | 37 | class B(ThermalElectronDensityField): 38 | """Independent electron density""" 39 | 40 | NAME = 'B' 41 | PARAMETER_NAMES = [] 42 | 43 | def compute_field(self, seed): 44 | self.secret = 9 # Example of shared information 45 | return np.ones(self.data_shape)*self.units/2. 46 | 47 | 48 | class C(DummyField): 49 | """Dummy field dependent on B""" 50 | 51 | NAME = 'C' 52 | FIELD_CHECKLIST = {} 53 | SIMULATOR_CONTROLLIST = {} 54 | DEPENDENCIES_LIST = [B] 55 | 56 | 57 | class D(MagneticField): 58 | """ 59 | Magnetic field, dependent on B and C 60 | 61 | Each component takes the secret number saved during the evaluation of B 62 | """ 63 | 64 | NAME = 'D' 65 | PARAMETER_NAMES = [] 66 | DEPENDENCIES_LIST = [B, C] 67 | 68 | def compute_field(self, seed): 69 | result = np.ones(self.data_shape)*self.units 70 | 71 | return self.dependencies[B].secret * result 72 | 73 | 74 | class E(MagneticField): 75 | """ 76 | Magnetic field, dependent total thermal electron density 77 | 78 | Each component takes the numerical value of the electron density 79 | """ 80 | 81 | NAME = 'E' 82 | PARAMETER_NAMES = [] 83 | DEPENDENCIES_LIST = ['thermal_electron_density'] 84 | 85 | def compute_field(self, seed): 86 | te_density = self.dependencies['thermal_electron_density'] 87 | B = np.empty(self.data_shape) 88 | for i in range(3): 89 | B[..., i] = te_density.value 90 | return B*u.microgauss 91 | 92 | 93 | class F(MagneticField): 94 | """Independent magnetic field""" 95 | 96 | NAME = 'F' 97 | DEPENDENCIES_LIST = [B, C] 98 | PARAMETER_NAMES = [] 99 | 100 | def compute_field(self, seed): 101 | return np.ones(self.data_shape)*0.1*u.microgauss 102 | 103 | 104 | # We initalize a common grid for all the tests 105 | grid = UniformGrid([[0, 1]]*3*u.kpc, resolution=[1]*3) 106 | 107 | 108 | class DummySimulator(Simulator): 109 | # Class attributes 110 | SIMULATED_QUANTITIES = ['nothing'] 111 | REQUIRED_FIELD_TYPES = ['dummy', 'magnetic_field', 112 | 'thermal_electron_density'] 113 | ALLOWED_GRID_TYPES = ['cartesian'] 114 | 115 | def simulate(self, key, coords_dict, realization_id, output_units): 116 | 117 | results = np.zeros(coords_dict['lat'].size) 118 | 119 | print('The supplied (total) fields are:\n') 120 | for name in self.fields: 121 | print(name) 122 | print('\t', self.fields[name]) 123 | if name != 'dummy': 124 | results += self.fields[name].value.sum() 125 | print() 126 | 127 | return results*output_units 128 | 129 | 130 | # %% PYTEST DEFINITIONS 131 | def test_Field_dependency(): 132 | # Initializes the fields 133 | a = A(grid) 134 | b = B(grid) 135 | c = C(grid) 136 | d = D(grid) 137 | e = E(grid) 138 | 139 | # Checks whether dependencies on classes are working 140 | b.get_data() # Needs to evaluate these once 141 | c.get_data(dependencies={B: b}) # Needs to evaluate these once 142 | result = d.get_data(dependencies={B: b, C: c}) 143 | assert np.all(result == [[[9]*3]]*u.microgauss) 144 | 145 | # Checks whether dependencies on types are working 146 | te_density = a.get_data()+b.get_data() 147 | result = e.get_data(dependencies={'thermal_electron_density': te_density}) 148 | assert np.all(result == [[[1.5]*3]]*u.microgauss) 149 | 150 | 151 | def test_Simulator_dependency_resolution(): 152 | dat = TabularDataset({'data': [0], 'lat': 0, 'lon': 0, 'err': 0.1}, 153 | name='nothing', 154 | units=u.rad, 155 | data_col='data', 156 | err_col='err') 157 | mea = Measurements() 158 | mea.append(dat) 159 | 160 | sim = DummySimulator(mea) 161 | 162 | fields_list = list(map(lambda x: x(grid), (F, E, D, C, B, A))) 163 | obs = sim(fields_list) 164 | 165 | assert obs[('nothing', None, 'tab', None)].global_data[0][0] == 33.3 166 | -------------------------------------------------------------------------------- /imagine/tools/parallel_ops.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interface module which allows automatically switching between the routines in 3 | the :py:mod:`imagine.tools.mpi_helper` module and their:py:mod:`numpy` 4 | or pure Python equivalents, depending on the contents of 5 | :py:data:`imagine.rc['distributed_arrays']` 6 | """ 7 | 8 | # %% IMPORTS 9 | # Package imports 10 | from e13tools import add_to_all 11 | import numpy as np 12 | 13 | # IMAGINE imports 14 | from imagine.tools import mpi_helper as m, rc 15 | 16 | # All declaration 17 | __all__ = [] 18 | 19 | 20 | # %% FUNCTION DEFINITIONS 21 | @add_to_all 22 | def pshape(data): 23 | """ 24 | :py:func:`imagine.tools.mpi_helper.mpi_shape` or 25 | :py:meth:`numpy.ndarray.shape` 26 | depending on :py:data:`imagine.rc['distributed_arrays']`. 27 | """ 28 | if rc['distributed_arrays']: 29 | return m.mpi_shape(data) 30 | else: 31 | return data.shape 32 | 33 | 34 | @add_to_all 35 | def prosecutor(data): 36 | """ 37 | :py:func:`imagine.tools.mpi_helper.mpi_prosecutor` or *nothing* 38 | depending on :py:data:`imagine.rc['distributed_arrays']`. 39 | """ 40 | if rc['distributed_arrays']: 41 | m.mpi_prosecutor(data) 42 | 43 | 44 | @add_to_all 45 | def pmean(data): 46 | """ 47 | :py:func:`imagine.tools.mpi_helper.mpi_mean` or :py:func:`numpy.mean` 48 | depending on :py:data:`imagine.rc['distributed_arrays']`. 49 | """ 50 | if rc['distributed_arrays']: 51 | return m.mpi_mean(data) 52 | else: 53 | return (np.mean(data, axis=0)).reshape(1, -1) 54 | 55 | 56 | @add_to_all 57 | def pvar(data): 58 | """ 59 | :py:func:`imagine.tools.mpi_helper.mpi_var` or :py:func:`numpy.var` 60 | depending on :py:data:`imagine.rc['distributed_arrays']`. 61 | """ 62 | if rc['distributed_arrays']: 63 | # This will be done later! 64 | raise NotImplementedError 65 | else: 66 | return data.var(axis=0) 67 | 68 | 69 | @add_to_all 70 | def ptrans(data): 71 | """ 72 | :py:func:`imagine.tools.mpi_helper.mpi_mean` or :py:meth:`numpy.ndarray.T` 73 | depending on :py:data:`imagine.rc['distributed_arrays']`. 74 | """ 75 | if rc['distributed_arrays']: 76 | return m.mpi_trans(data) 77 | else: 78 | return data.T 79 | 80 | 81 | @add_to_all 82 | def pmult(left, right): 83 | """ 84 | :py:func:`imagine.tools.mpi_helper.mpi_mult` or :py:meth:`numpy.matmul` 85 | depending on :py:data:`imagine.rc['distributed_arrays']`. 86 | """ 87 | if rc['distributed_arrays']: 88 | return m.mpi_mult(left, right) 89 | else: 90 | return left @ right 91 | 92 | 93 | @add_to_all 94 | def ptrace(data): 95 | """ 96 | :py:func:`imagine.tools.mpi_helper.mpi_trace` or :py:func:`numpy.trace` 97 | depending on :py:data:`imagine.rc['distributed_arrays']`. 98 | """ 99 | if rc['distributed_arrays']: 100 | return m.mpi_trace(data) 101 | else: 102 | return np.trace(data) 103 | 104 | @add_to_all 105 | def pdiag(data): 106 | """ 107 | :py:func:`imagine.tools.mpi_helper.mpi_diag` or :py:func:`numpy.diagonal` 108 | depending on :py:data:`imagine.rc['distributed_arrays']`. 109 | """ 110 | if rc['distributed_arrays']: 111 | return m.mpi_diag(data) 112 | else: 113 | return data.diagonal() 114 | 115 | @add_to_all 116 | def pnewdiag(data): 117 | """ 118 | :py:func:`imagine.tools.mpi_helper.mpi_new_diag` or :py:func:`numpy.diag` 119 | depending on :py:data:`imagine.rc['distributed_arrays']`. 120 | """ 121 | if rc['distributed_arrays']: 122 | return m.mpi_new_diag(data) 123 | else: 124 | return np.diag(data) 125 | 126 | 127 | @add_to_all 128 | def peye(size): 129 | """ 130 | :py:func:`imagine.tools.mpi_helper.mpi_eye` or :py:func:`numpy.eye` 131 | depending on :py:data:`imagine.rc['distributed_arrays']`. 132 | """ 133 | if rc['distributed_arrays']: 134 | return m.mpi_eye(size) 135 | else: 136 | return np.eye(size) 137 | 138 | 139 | @add_to_all 140 | def distribute_matrix(full_matrix): 141 | """ 142 | :py:func:`imagine.tools.mpi_helper.mpi_distribute_matrix` or *nothing* 143 | depending on :py:data:`imagine.rc['distributed_arrays']`. 144 | """ 145 | if rc['distributed_arrays']: 146 | return m.mpi_distribute_matrix(full_matrix) 147 | else: 148 | return full_matrix 149 | 150 | 151 | @add_to_all 152 | def plu_solve(operator, source): 153 | """ 154 | :py:func:`imagine.tools.mpi_helper.mpi_lu_solve` or :py:func:`numpy.linalg.solve` 155 | depending on :py:data:`imagine.rc['distributed_arrays']`. 156 | 157 | Notes 158 | ----- 159 | In the non-distributed case, the source is transposed before the calculation 160 | """ 161 | if rc['distributed_arrays']: 162 | return m.mpi_lu_solve(operator, source) 163 | else: 164 | return np.linalg.solve(operator, source.T) 165 | 166 | 167 | @add_to_all 168 | def pslogdet(data): 169 | """ 170 | :py:func:`imagine.tools.mpi_helper.mpi_slogdet` or :py:func:`numpy.linalg.slogdet` 171 | depending on :py:data:`imagine.rc['distributed_arrays']`. 172 | """ 173 | if rc['distributed_arrays']: 174 | return m.mpi_slogdet(data) 175 | else: 176 | return np.linalg.slogdet(data) 177 | 178 | 179 | @add_to_all 180 | def pglobal(data): 181 | """ 182 | :py:func:`imagine.tools.mpi_helper.mpi_global` or *nothing* 183 | depending on :py:data:`imagine.rc['distributed_arrays']`. 184 | """ 185 | if rc['distributed_arrays']: 186 | return m.mpi_global(data) 187 | else: 188 | return data 189 | 190 | 191 | @add_to_all 192 | def plocal(data): 193 | """ 194 | :py:func:`imagine.tools.mpi_helper.mpi_local` or *nothing* 195 | depending on :py:data:`imagine.rc['distributed_arrays']`. 196 | """ 197 | if rc['distributed_arrays']: 198 | return m.mpi_local(data) 199 | else: 200 | return data 201 | -------------------------------------------------------------------------------- /imagine/fields/base_fields.py: -------------------------------------------------------------------------------- 1 | r""" 2 | This module contains basic base classes that can be used to include new fields 3 | in IMAGINE. The classes found here here correspond to the physical fields most 4 | commonly found by members of the IMAGINE community and may be improved in the 5 | future. 6 | 7 | A brief summary of the module: 8 | 9 | * :py:class:`MagneticField` — for models of the galactic/Galactic Magnetic Field, :math:`\mathbf{B}(\mathbf{r})` 10 | * :py:class:`ThermalElectronDensityField` — for models of the density of thermal electrons, :math:`n_e(\mathbf{r})` 11 | * :py:class:`CosmicRayElectronDensityField`— for models of the density/flux of cosmic ray electrons, :math:`n_{\rm cr}(\mathbf{r})` 12 | * :py:class:`DummyField` — allows passing parameters to a :py:obj:`Simulator ` without having to evaluate anything on a :py:obj:`Grid ` 13 | 14 | 15 | See also :doc:`IMAGINE Components ` section of the docs. 16 | """ 17 | 18 | # %% IMPORTS 19 | # Built-in imports 20 | import abc 21 | 22 | # Package imports 23 | import astropy.units as u 24 | 25 | # IMAGINE imports 26 | from imagine.fields import Field 27 | from imagine.tools import req_attr 28 | 29 | # All declaration 30 | __all__ = ['MagneticField', 'ThermalElectronDensityField', 'DummyField'] 31 | 32 | 33 | # %% CLASS DEFINITIONS 34 | class MagneticField(Field): 35 | """ 36 | Base class for the inclusion of new models for magnetic fields. 37 | It should be subclassed following the template provided. 38 | 39 | For more details, check the :ref:`components:Magnetic Fields` Section 40 | of the documentation. 41 | 42 | Parameters 43 | ---------- 44 | 45 | grid : imagine.fields.grid.BaseGrid 46 | Instance of :py:class:`imagine.fields.grid.BaseGrid` containing a 3D 47 | grid where the field is evaluated 48 | parameters : dict 49 | Dictionary of full parameter set {name: value} 50 | ensemble_size : int 51 | Number of realisations in field ensemble 52 | ensemble_seeds 53 | Random seed(s) for generating random field realisations 54 | """ 55 | 56 | # Class attributes 57 | TYPE = 'magnetic_field' 58 | UNITS = u.microgauss 59 | 60 | @property 61 | def data_description(self): 62 | return(['grid_x', 'grid_y', 'grid_z', 'component (x,y,z)']) 63 | 64 | @property 65 | def data_shape(self): 66 | return(*self.grid.shape, 3) 67 | 68 | 69 | class ThermalElectronDensityField(Field): 70 | """ 71 | Base class for the inclusion of models for spatial distribution of thermal 72 | electrons. It should be subclassed following the template provided. 73 | 74 | For more details, check the :ref:`components:Thermal electrons` 75 | Section of the documentation. 76 | 77 | Parameters 78 | ---------- 79 | grid : imagine.fields.grid.BaseGrid 80 | Instance of :py:class:`imagine.fields.grid.BaseGrid` containing a 3D 81 | grid where the field is evaluated 82 | parameters : dict 83 | Dictionary of full parameter set {name: value} 84 | ensemble_size : int 85 | Number of realisations in field ensemble 86 | ensemble_seeds 87 | Random seed(s) for generating random field realisations 88 | 89 | """ 90 | 91 | # Class attributes 92 | TYPE = 'thermal_electron_density' 93 | UNITS = u.cm**(-3) 94 | 95 | @property 96 | def data_description(self): 97 | return(['grid_x', 'grid_y', 'grid_z']) 98 | 99 | @property 100 | def data_shape(self): 101 | return tuple(self.grid.shape) 102 | 103 | 104 | class CosmicRayElectronDensityField(Field): 105 | """ 106 | Not yet implemented 107 | """ 108 | 109 | # Class attributes 110 | TYPE = 'cosmic_ray_electron_density' 111 | 112 | def __init__(): 113 | raise NotImplementedError 114 | 115 | 116 | class DummyField(Field, metaclass=abc.ABCMeta): 117 | """ 118 | Base class for a dummy Field used for sending parameters and settings to 119 | specific Simulators rather than computing and storing a physical field. 120 | """ 121 | 122 | # Class attributes 123 | TYPE = 'dummy' 124 | UNITS = None 125 | PARAMETER_NAMES = None 126 | 127 | def __init__(self, *args, **kwargs): 128 | kwargs['grid'] = None 129 | super().__init__(**kwargs) 130 | 131 | @property 132 | def data_description(self): 133 | return([]) 134 | 135 | @property 136 | def data_shape(self): 137 | return(None) 138 | 139 | @property 140 | def parameter_names(self): 141 | """Parameters of the field""" 142 | return list(self.field_checklist) 143 | 144 | @property 145 | @req_attr 146 | def field_checklist(self): 147 | """Parameters of the dummy field""" 148 | return self.FIELD_CHECKLIST 149 | 150 | @property 151 | @req_attr 152 | def simulator_controllist(self): 153 | """ 154 | Dictionary containing fixed Simulator settings 155 | """ 156 | return self.SIMULATOR_CONTROLLIST 157 | 158 | def compute_field(self, *args, **kwargs): 159 | pass 160 | 161 | def get_data(self, i_realization=0, dependencies={}): 162 | """ 163 | Mock evaluation of the dummy field defined by this class. 164 | 165 | Parameters 166 | ---------- 167 | i_realization : int 168 | Index of the current realization 169 | dependencies : dict 170 | If the :py:data:`dependencies_list` is non-empty, a dictionary containing 171 | the requested dependencies must be provided. 172 | 173 | Returns 174 | ------- 175 | parameters : dict 176 | Dictionary of containing a copy of the Field parameters including 177 | an extra entry with the random seed that should be used with the 178 | present realization (under the key: 'random_seed') 179 | """ 180 | self._update_dependencies(dependencies) 181 | parameters = self._parameters.copy() 182 | parameters['random_seed'] = self.ensemble_seeds[i_realization] 183 | 184 | return parameters 185 | -------------------------------------------------------------------------------- /doc/source/installation.rst: -------------------------------------------------------------------------------- 1 | ***************************** 2 | Installation and dependencies 3 | ***************************** 4 | 5 | Here you can find basic instructions for the installation of IMAGINE. 6 | There are two main installation routes: 7 | 8 | 1. one can pull and run a :ref:`DockerInstallation` which allows 9 | you to setup and run IMAGINE by typing only two lines. 10 | IMAGINE will run in a container, i.e. separate from your system. 11 | 2. one can :ref:`download and install ` IMAGINE and all 12 | the dependencies alongside your system. 13 | 14 | The first option is particularly useful when one is a newcomer, interested 15 | experimenting or when one is deploying IMAGINE in a cloud service or multiple 16 | machines. 17 | 18 | The second option is better if one wants to use ones pre-installed tools and 19 | packages, or if one is interested in running on a computing cluster (running 20 | docker images in some typical cluster settings may be difficult or impossible). 21 | 22 | `Let us know `_ 23 | if you face major difficulties. 24 | 25 | .. _DockerInstallation: 26 | 27 | Docker installation 28 | ------------------- 29 | 30 | This is a very convenient and fast way of deploying IMAGINE. You must first 31 | pull the image of `one of IMAGINE's versions from GitHub `_, for example, the latest (*development*) version can be pulled 32 | using: 33 | 34 | .. code-block:: console 35 | 36 | sudo docker pull ghcr.io/imagine-consortium/imagine/imagine:latest 37 | 38 | If you would like to start working (or testing IMAGINE) immediately, a 39 | jupyter-lab session can be launched using: 40 | 41 | .. code-block:: console 42 | 43 | sudo docker run -i -t -p 8888:8888 ghcr.io/imagine-consortium/imagine/imagine:latest /bin/bash -c "source ~/jupyterlab.bash" 44 | 45 | After running this, you will see a link to a file and an URL at the end of the terminal output. 46 | You may copy and paste the link with a token to a browser, 47 | which will allow you to access the jupyter-lab session. From there you may, 48 | for instance, navigate to the `imagine/tutorials` directory. 49 | 50 | 51 | .. _StandardInstallation: 52 | 53 | Standard installation 54 | --------------------- 55 | 56 | Download 57 | ^^^^^^^^ 58 | 59 | A copy of IMAGINE source can be downloaded from its main 60 | `GitHub repository `_. 61 | If one does not intend to contribute to the development, one should download 62 | and unpack the 63 | `latest release `_: 64 | 65 | .. code-block:: console 66 | 67 | wget https://github.com/IMAGINE-Consortium/imagine/archive/v2.0.0-alpha.3.tar.gz 68 | tar -xvvzf v2.0.0-alpha.3.tar.gz 69 | 70 | 71 | Alternatively, if one is interested in getting involved with the development, 72 | we recommend cloning the git repository 73 | 74 | .. code-block:: console 75 | 76 | git clone git@github.com:IMAGINE-Consortium/imagine.git 77 | 78 | 79 | 80 | Setting up the environment with conda 81 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 82 | 83 | IMAGINE depends on a number of different python packages. The easiest way of 84 | setting up your environment is using the *conda* package manager. This allows 85 | one to setup a dedicated, contained, python environment in the user area. 86 | 87 | Conda is the package manager of the `Anaconda `_ 88 | Python distribution, which by default comes with a large number of packages 89 | frequently used in data science and scientific computing, as well as a GUI 90 | installer and other tools. 91 | 92 | A lighter, recommended, alternative is the 93 | `Miniconda `_ distribution, 94 | which allows one to use the conda commands to install only what is actually 95 | needed. 96 | 97 | Once one has installed (mini)conda, one can download and install the IMAGINE 98 | environment in the following way: 99 | 100 | .. code-block:: console 101 | 102 | conda env create --file=imagine_conda_env.yml 103 | conda activate imagine 104 | python -m ipykernel install --user --name imagine --display-name "Python (imagine)" 105 | 106 | The (optional) last line creates a Jupyter kernel linked to the new conda 107 | environment (which is required, for example, for executing the tutorial 108 | Jupyter notebooks). 109 | 110 | Whenever one wants to run an IMAGINE script, one has to first activate the 111 | associated environment with the command `conda activate imagine`. 112 | To leave this environment one can simply run `conda deactivate` 113 | 114 | 115 | Hammurabi X 116 | ^^^^^^^^^^^ 117 | 118 | A key dependency of IMAGINE is the 119 | `Hammurabi X `_ code, 120 | a `HEALPix `_-based 121 | numeric simulator for Galactic polarized emission 122 | (`arXiv:1907.00207 `_). 123 | 124 | Before proceeding with the IMAGINE installation, it is necessary to install 125 | Hammurabi X following the instructions on its project 126 | `wiki `_. 127 | Then, one needs to install the `hampyx` python wrapper: 128 | 129 | .. code-block:: console 130 | 131 | conda activate imagine # if using conda 132 | cd PATH_TO_HAMMURABI 133 | pip install -e . 134 | 135 | 136 | Installing 137 | ^^^^^^^^^^ 138 | 139 | After downloading, setting up the environment and installing Hammurabi X, 140 | IMAGINE can finally be installed through: 141 | 142 | .. code-block:: console 143 | 144 | conda activate imagine # if using conda 145 | cd IMAGINE_PATH 146 | pip install . 147 | 148 | If one does not have admistrator/root privileges/permissions, one may instead 149 | want to use 150 | 151 | .. code-block:: console 152 | 153 | pip install --user . 154 | 155 | Also, if you are working on further developing or modifying IMAGINE for your own needs, you may wish to use the `-e` flag, to keep links to the source directory instead of copying the files, 156 | 157 | .. code-block:: console 158 | 159 | pip install -e . 160 | 161 | 162 | -------------------------------------------------------------------------------- /imagine/likelihoods/likelihood.py: -------------------------------------------------------------------------------- 1 | """ 2 | Likelihood class defines likelihood posterior function 3 | to be used in Bayesian analysis 4 | 5 | member fuctions: 6 | 7 | __init__ 8 | 9 | requires 10 | Measurements object 11 | Covariances object (optional) 12 | Masks object (optional) 13 | 14 | call 15 | 16 | running LOG-likelihood calculation requires 17 | ObservableDict object 18 | """ 19 | 20 | # %% IMPORTS 21 | # Built-in imports 22 | import abc 23 | 24 | # Package imports 25 | import numpy as np 26 | 27 | # IMAGINE imports 28 | from imagine.observables.observable_dict import ( 29 | Measurements, Covariances, Masks) 30 | from imagine.tools import BaseClass 31 | 32 | # All declaration 33 | __all__ = ['Likelihood'] 34 | 35 | 36 | # %% CLASS DEFINITIONS 37 | class Likelihood(BaseClass, metaclass=abc.ABCMeta): 38 | """ 39 | Base class that defines likelihood posterior function 40 | to be used in Bayesian analysis 41 | 42 | Parameters 43 | ---------- 44 | measurement_dict : imagine.observables.observable_dict.Measurements 45 | A :py:obj:`Measurements ` 46 | dictionary containing observational data. 47 | covariance_dict : imagine.observables.observable_dict.Covariances 48 | A :py:obj:`Covariances ` 49 | dictionary containing observed covariance data. 50 | If set to `None` (the usual case), the :py:obj:`Likelihood` will try 51 | to find the :py:obj:`Covariances ` 52 | in the :py:data:`cov` attribute of the supplied `measurement_dict`. 53 | mask_dict : imagine.observables.observable_dict.Masks 54 | A :py:obj:`Masks ` dictionary 55 | which should be applied to the measured and simulated data. 56 | compute_dispersion : bool 57 | If True, calling the Likelihood object will return the likelihood value 58 | and the dispersion estimated by bootstrapping the simulations object 59 | and computing the sample standard deviation. 60 | If False (default), only the likelihood value is returned. 61 | n_bootstrap : int 62 | Number of resamples used in the bootstrapping of the simulations if 63 | compute_dispersion is set to `True`. 64 | """ 65 | def __init__(self, measurement_dict, covariance_dict=None, mask_dict=None, 66 | compute_dispersion=False, n_bootstrap=150): 67 | # Call super constructor 68 | super().__init__() 69 | 70 | self._check_units(measurement_dict, covariance_dict) 71 | self.mask_dict = mask_dict 72 | self.measurement_dict = measurement_dict 73 | if covariance_dict is None: 74 | covariance_dict = measurement_dict.cov 75 | self.covariance_dict = covariance_dict 76 | self.compute_dispersion = compute_dispersion 77 | self.n_bootstrap = n_bootstrap 78 | 79 | def __call__(self, observable_dict, **kwargs): 80 | if self.mask_dict is not None: 81 | observable_dict = self.mask_dict(observable_dict) 82 | 83 | likelihood = self.call(observable_dict, **kwargs) 84 | 85 | if not self.compute_dispersion: 86 | return likelihood 87 | else: 88 | bootstrap_sample = [self._bootstrapped_likelihood(observable_dict, 89 | **kwargs) 90 | for _ in range(self.n_bootstrap)] 91 | dispersion = np.std(bootstrap_sample) 92 | return likelihood, dispersion 93 | 94 | def _bootstrapped_likelihood(self, simulations, **kwargs): 95 | # Gets ensemble size from first entry in the ObservableDict 96 | size, _ = simulations[list(simulations.keys())[0]].shape 97 | # Resamples with replacement 98 | idx = np.random.randint(0, size, size) 99 | sims_new = simulations.sub_sim(idx) 100 | return self.call(sims_new, **kwargs) 101 | 102 | @property 103 | def mask_dict(self): 104 | """ 105 | :py:obj:`Masks ` dictionary associated with 106 | this object 107 | """ 108 | return self._mask_dict 109 | 110 | @mask_dict.setter 111 | def mask_dict(self, mask_dict): 112 | if mask_dict is not None: 113 | assert isinstance(mask_dict, Masks) 114 | self._mask_dict = mask_dict 115 | 116 | @property 117 | def measurement_dict(self): 118 | """ 119 | :py:obj:`Measurements ` dictionary associated with 120 | this object 121 | 122 | NB If a mask is used, only the masked version is stored 123 | """ 124 | return self._measurement_dict 125 | 126 | @measurement_dict.setter 127 | def measurement_dict(self, measurement_dict): 128 | assert isinstance(measurement_dict, Measurements) 129 | self._measurement_dict = measurement_dict 130 | if self._mask_dict is not None: # apply mask 131 | self._measurement_dict = self.mask_dict(self._measurement_dict) 132 | 133 | @property 134 | def covariance_dict(self): 135 | """ 136 | :py:obj:`Covariances ` dictionary associated with 137 | this object 138 | 139 | NB If a mask is used, only the masked version is stored 140 | """ 141 | return self._covariance_dict 142 | 143 | @covariance_dict.setter 144 | def covariance_dict(self, covariance_dict): 145 | if covariance_dict is not None: 146 | assert isinstance(covariance_dict, Covariances) 147 | self._covariance_dict = covariance_dict 148 | if (self._mask_dict is not None) and (self._covariance_dict is not None): 149 | self._covariance_dict = self.mask_dict(self._covariance_dict) 150 | 151 | @abc.abstractmethod 152 | def call(self, observable_dict): 153 | """ 154 | Parameters 155 | ---------- 156 | observable_dict : imagine.observables.observable_dict 157 | variables 158 | """ 159 | raise NotImplementedError 160 | 161 | @staticmethod 162 | def _check_units(measurements, covariances): 163 | """ 164 | Makes sure that measurements and covariances units are compatible 165 | """ 166 | if covariances is None: 167 | return 168 | for k in measurements: 169 | if measurements[k].unit is None: 170 | assert covariances[k].unit is None 171 | else: 172 | assert (measurements[k].unit)**2 == covariances[k].unit 173 | -------------------------------------------------------------------------------- /imagine/pipelines/ultranest_pipeline.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | import logging as log 4 | import os 5 | from os import path 6 | 7 | # Package imports 8 | import ultranest 9 | 10 | # IMAGINE imports 11 | from imagine.pipelines import Pipeline 12 | 13 | # All declaration 14 | __all__ = ['UltranestPipeline'] 15 | 16 | 17 | # %% CLASS DEFINITIONS 18 | class UltranestPipeline(Pipeline): 19 | """ 20 | Bayesian analysis pipeline with 21 | `UltraNest `_ 22 | 23 | See base class for initialization details. 24 | 25 | The sampler behaviour is controlled using the `sampling_controllers` 26 | property. A description of these can be found below. 27 | 28 | Sampling controllers 29 | -------------------- 30 | resume : bool 31 | If False the Pipeline the sampling starts from the beginning, 32 | erasing any previous work in the `chains_directory`. Otherwise, 33 | tries to resume a previous run. 34 | dlogz : float 35 | Target evidence uncertainty. This is the std 36 | between bootstrapped logz integrators. 37 | dKL : float 38 | Target posterior uncertainty. This is the Kullback-Leibler 39 | divergence in nat between bootstrapped integrators. 40 | frac_remain : float 41 | Integrate until this fraction of the integral is left in the 42 | remainder. 43 | Set to a low number (1e-2 ... 1e-5) to make sure peaks are 44 | discovered. 45 | Set to a higher number (0.5) if you know the posterior is simple. 46 | Lepsilon : float 47 | Terminate when live point likelihoods are all the same, 48 | within Lepsilon tolerance. Increase this when your likelihood 49 | function is inaccurate, to avoid unnecessary search. 50 | min_ess : int 51 | Target number of effective posterior samples. 52 | max_iters : int 53 | maximum number of integration iterations. 54 | max_ncalls : int 55 | stop after this many likelihood evaluations. 56 | max_num_improvement_loops : int 57 | run() tries to assess iteratively where more samples are needed. 58 | This number limits the number of improvement loops. 59 | min_num_live_points : int 60 | minimum number of live points throughout the run 61 | cluster_num_live_points : int 62 | require at least this many live points per detected cluster 63 | num_test_samples : int 64 | test transform and likelihood with this number of 65 | random points for errors first. Useful to catch bugs. 66 | draw_multiple : bool 67 | draw more points if efficiency goes down. 68 | If set to False, few points are sampled at once. 69 | num_bootstraps : int 70 | number of logZ estimators and MLFriends region 71 | bootstrap rounds. 72 | update_interval_iter_fraction : float 73 | Update region after (update_interval_iter_fraction*nlive) 74 | iterations. 75 | 76 | Note 77 | ---- 78 | Instances of this class are callable. 79 | Look at the :py:meth:`UltranestPipeline.call` for details. 80 | """ 81 | 82 | # Class attributes 83 | SUPPORTS_MPI = True 84 | 85 | def call(self, **kwargs): 86 | """ 87 | Runs the IMAGINE pipeline using the 88 | `UltraNest `_ 89 | :py:class:`ReactiveNestedSampler `. 90 | 91 | Any keyword argument provided is used to update the 92 | `sampling_controllers`. 93 | 94 | Returns 95 | ------- 96 | results : dict 97 | UltraNest sampling results in a dictionary containing the keys: 98 | logZ (the log-evidence), logZerror (the error in log-evidence) and 99 | samples (equal weighted posterior) 100 | 101 | Notes 102 | ----- 103 | See base class for other attributes/properties and methods 104 | """ 105 | log.debug('@ ultranest_pipeline::__call__') 106 | 107 | default_init_params = { 108 | 'resume': True, 109 | 'num_test_samples': 2, 110 | 'num_bootstraps': 30, 111 | 'draw_multiple': True} 112 | 113 | default_run_params = { 114 | 'dlogz': 0.5, 115 | 'dKL': 0.5, 116 | 'frac_remain': 0.01, 117 | 'Lepsilon': 0.001, 118 | 'min_ess': 500, 119 | 'max_iters': None, 120 | 'max_ncalls': None, 121 | 'max_num_improvement_loops': -1, 122 | 'min_num_live_points': 400, 123 | 'cluster_num_live_points': 40, 124 | 'update_interval_volume_fraction': 0.2} 125 | 126 | # Keyword arguments can alter the sampling controllers 127 | self.sampling_controllers = kwargs # Updates the dict 128 | 129 | # Prepares initialization and run parameters from 130 | # defaults and sampling controllers 131 | init_params = { k : self.sampling_controllers.get(k, default) 132 | for k, default in default_init_params.items()} 133 | run_params = { k : self.sampling_controllers.get(k, default) 134 | for k, default in default_run_params.items()} 135 | 136 | # Updates the sampling controllers to reflect what is being used 137 | self.sampling_controllers = init_params # Updates the dict 138 | self.sampling_controllers = run_params # Updates the dict 139 | 140 | # Ultranest files directory 141 | ultranest_dir = path.join(self.chains_directory, 'ultranest') 142 | # Creates directory, if needed 143 | os.makedirs(ultranest_dir, exist_ok=True) 144 | # Cleans up the chains directory if not resuming 145 | if not init_params['resume']: 146 | init_params['resume'] = 'overwrite' 147 | # Removing manually as UltraNest's 'overwrite' option does not 148 | # seem to be working correctly 149 | self.clean_chains_directory() 150 | 151 | # Creates directory, if needed 152 | os.makedirs(ultranest_dir, exist_ok=True) 153 | 154 | # Runs UltraNest 155 | sampler = ultranest.ReactiveNestedSampler( 156 | param_names=list(self.active_parameters), 157 | loglike=self._likelihood_function, 158 | transform=self.prior_transform, 159 | log_dir=ultranest_dir, 160 | vectorized=False, 161 | wrapped_params=self.wrapped_parameters, 162 | **init_params) 163 | 164 | self.results = sampler.run(viz_callback=ultranest.viz.nicelogger, 165 | **run_params) 166 | 167 | self._samples_array = self.results['samples'] 168 | self._evidence = self.results['logz'] 169 | self._evidence_err = self.results['logzerr'] 170 | 171 | return self.results 172 | 173 | -------------------------------------------------------------------------------- /imagine/tools/covariance_estimator.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains estimation algorithms for the 3 | covariance matrix based on a finite number of samples. 4 | 5 | For the testing suits, please turn to "imagine/tests/tools_tests.py". 6 | """ 7 | 8 | # %% IMPORTS 9 | # Built-in imports 10 | import logging as log 11 | 12 | # Package imports 13 | import numpy as np 14 | 15 | # IMAGINE imports 16 | from imagine.tools.parallel_ops import ( 17 | pmean, ptrans, pmult, peye, ptrace, pshape) 18 | from imagine.tools.config import rc 19 | 20 | # All declaration 21 | __all__ = ['empirical_cov', 'empirical_mcov', 'oas_cov', 'oas_mcov', 22 | 'diagonal_cov', 'diagonal_mcov'] 23 | 24 | 25 | # %% FUNCTION DEFINITIONS 26 | def diagonal_cov(data): 27 | """ 28 | Assumes the covariance matrix is simply a diagonal matrix whose values 29 | correspond to the sample variances 30 | 31 | Parameters 32 | ---------- 33 | data : numpy.ndarray 34 | Ensemble of observables, in global shape (ensemble size, data size). 35 | 36 | Returns 37 | ------- 38 | cov : numpy.ndarray 39 | Covariance matrix 40 | """ 41 | # MPI version still to be implemented in the future 42 | if rc['distributed_arrays']: 43 | raise NotImplementedError 44 | 45 | return np.diag(data.var(axis=0)) 46 | 47 | 48 | def diagonal_mcov(data): 49 | """ 50 | Assumes the covariance matrix is simply a diagonal matrix whose values 51 | correspond to the sample variances 52 | 53 | Parameters 54 | ---------- 55 | data : numpy.ndarray 56 | Ensemble of observables, in global shape (ensemble size, data size). 57 | 58 | Returns 59 | ------- 60 | mean : numpy.ndarray 61 | Ensemble mean 62 | cov : numpy.ndarray 63 | Covariance matrix 64 | """ 65 | return pmean(data), diagonal_cov(data) 66 | 67 | 68 | def empirical_cov(data): 69 | r""" 70 | Empirical covariance estimator 71 | 72 | Given some data matrix, :math:`D`, where rows are different samples 73 | and columns different properties, the covariance can be 74 | estimated from 75 | 76 | .. math:: 77 | U_{ij} = D_{ij} - \overline{D}_j\,,\; 78 | \text{with}\; \overline{D}_j=\tfrac{1}{N} \sum_{i=1}^N D_{ij} 79 | 80 | .. math:: 81 | \text{cov} = \tfrac{1}{N} U^T U 82 | 83 | 84 | 85 | Notes 86 | ----- 87 | While conceptually simple, this is usually not the 88 | best option. 89 | 90 | Parameters 91 | ---------- 92 | data : numpy.ndarray 93 | Ensemble of observables, in global shape (ensemble size, data size). 94 | 95 | Returns 96 | ------- 97 | cov : numpy.ndarray 98 | Distributed (not copied) covariance matrix in global shape (data size, data size), 99 | each node takes part of the rows. 100 | """ 101 | log.debug('@ covariance_estimator::empirical_cov') 102 | _, cov = empirical_mcov(data) 103 | return cov 104 | 105 | 106 | def empirical_mcov(data): 107 | r""" 108 | Empirical covariance estimator 109 | 110 | Given some data matrix, :math:`D`, where rows are different samples 111 | and columns different properties, the covariance can be 112 | estimated from 113 | 114 | .. math:: 115 | U_{ij} = D_{ij} - \overline{D}_j\,,\; 116 | \text{with}\; \overline{D}_j=\tfrac{1}{N} \sum_{i=1}^N D_{ij} 117 | 118 | .. math:: 119 | \text{cov} = \tfrac{1}{N} U^T U 120 | 121 | Notes 122 | ----- 123 | While conceptually simple, this is usually not the 124 | best option. 125 | 126 | Parameters 127 | ---------- 128 | data : numpy.ndarray 129 | Ensemble of observables, in global shape (ensemble size, data size). 130 | 131 | Returns 132 | ------- 133 | mean : numpy.ndarray 134 | Copied ensemble mean (on all nodes). 135 | cov : numpy.ndarray 136 | Distributed (not copied) covariance matrix in global shape (data size, data size), 137 | each node takes part of the rows. 138 | """ 139 | log.debug('@ covariance_estimator::empirical_mcov') 140 | assert isinstance(data, np.ndarray) 141 | assert (len(data.shape) == 2) 142 | # Get ensemble size (i.e. the number of rows) 143 | ensemble_size, _ = pshape(data) 144 | # Calculates covariance 145 | mean = pmean(data) 146 | u = data - mean 147 | cov = pmult(ptrans(u), u) / ensemble_size 148 | return mean, cov 149 | 150 | 151 | def oas_cov(data): 152 | r""" 153 | Estimate covariance with the Oracle Approximating Shrinkage algorithm. 154 | 155 | Given some :math:`n\times m` data matrix, :math:`D`, 156 | where rows are different samples and columns different properties, 157 | the covariance can be estimated in the following way. 158 | 159 | .. math:: 160 | U_{ij} = D_{ij} - \overline{D}_j\,,\; 161 | \text{with}\; \overline{D}_j=\tfrac{1}{n} \sum_{i=1}^n D_{ij} 162 | 163 | Let 164 | 165 | .. math:: 166 | S = \tfrac{1}{n} U^T U\,,\; 167 | T = \text{tr}(S)\quad\text{and}\quad V = \text{tr}(S^2) 168 | 169 | .. math:: 170 | \tilde\rho = \min\left[1,\frac{(1-2/m)V + T^2}{ (n+1-2/m)(V-T^2/m)}\right] 171 | 172 | The covariance is given by 173 | 174 | .. math:: 175 | \text{cov}_\text{OAS} = (1-\rho)S + \tfrac{1}{N} \rho T I_m 176 | 177 | 178 | Parameters 179 | ---------- 180 | data : numpy.ndarray 181 | Distributed data in global shape (ensemble_size, data_size). 182 | 183 | Returns 184 | ------- 185 | cov : numpy.ndarray 186 | Covariance matrix in global shape (data_size, data_size). 187 | """ 188 | log.debug('@ covariance_estimator::oas_cov') 189 | _, cov = oas_mcov(data) 190 | 191 | return cov 192 | 193 | 194 | def oas_mcov(data): 195 | """ 196 | Estimate covariance with the Oracle Approximating Shrinkage algorithm. 197 | 198 | See `imagine.tools.covariance_estimator.oas_cov` for details. This 199 | function aditionally returns the computed ensemble mean. 200 | 201 | Parameters 202 | ---------- 203 | data : numpy.ndarray 204 | Distributed data in global shape (ensemble_size, data_size). 205 | 206 | Returns 207 | ------- 208 | mean : numpy.ndarray 209 | Copied ensemble mean (on all nodes). 210 | cov : numpy.ndarray 211 | Distributed covariance matrix in shape (data_size, data_size). 212 | """ 213 | log.debug('@ covariance_estimator::oas_mcov') 214 | assert isinstance(data, np.ndarray) 215 | assert (len(data.shape) == 2) 216 | 217 | # Finds ensemble size and data size 218 | data_size = data.shape[1] 219 | ensemble_size, _ = pshape(data) 220 | 221 | # Calculates OAS covariance extimator from empirical covariance estimator 222 | mean = pmean(data) 223 | u = data - mean 224 | s = pmult(ptrans(u), u) / ensemble_size 225 | trs = ptrace(s) 226 | trs2 = ptrace(pmult(s, s)) 227 | 228 | numerator = (1.0 - 2.0/data_size)*trs2 + trs*trs 229 | denominator = (ensemble_size +1.0-2.0/data_size)*(trs2 - (trs*trs)/data_size) 230 | 231 | if denominator == 0: 232 | rho = 1 233 | else: 234 | rho = np.min([1, numerator/denominator]) 235 | cov = (1.-rho)*s+peye(data_size)*rho*trs/data_size 236 | 237 | return mean, cov 238 | -------------------------------------------------------------------------------- /imagine/likelihoods/ensemble_likelihood.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | import logging as log 4 | 5 | # Package imports 6 | import numpy as np 7 | 8 | # IMAGINE imports 9 | from imagine.likelihoods import Likelihood 10 | from imagine.observables.observable_dict import Simulations 11 | from imagine.tools.covariance_estimator import oas_mcov 12 | from imagine.tools.parallel_ops import (pslogdet, plu_solve, ptrace, pdiag, 13 | pvar, pmean) 14 | 15 | # All declaration 16 | __all__ = ['EnsembleLikelihood','EnsembleLikelihoodDiagonal'] 17 | 18 | 19 | # %% CLASS DEFINITIONS 20 | class EnsembleLikelihood(Likelihood): 21 | r""" 22 | Computes the likelihood accounting for the effects of stochastic fields 23 | 24 | This is done by estimating the covariance associated the stochastic fields 25 | from an ensemble of simulations. 26 | 27 | Parameters 28 | ---------- 29 | measurement_dict : imagine.observables.observable_dict.Measurements 30 | Measurements 31 | covariance_dict : imagine.observables.observable_dict.Covariances 32 | The covariances associated with the measurements. If the keyword 33 | argument is absent, the covariances will be read from the attribute 34 | `measurement_dict.cov`. 35 | mask_dict : imagine.observables.observable_dict.Masks 36 | Masks which will be applied to the Measurements, Covariances and 37 | Simulations, before computing the likelihood 38 | cov_func : func 39 | A function which takes a (Nens, Ndata) data array (potentially 40 | MPI distributed) and returns a tuple comprising the mean and an 41 | estimated covariance matrix. 42 | If absent, :py:func:`imagine.tools.covariance_estimator.oas_mcov` will 43 | be used. 44 | use_trace_approximation : bool 45 | If True, the determinant of the combined covariance matrix is 46 | approximated using 47 | :math:`\ln(|A+B)\approx \text{tr}\left[\ln\left(A+C\right)\right]` 48 | (NB this assumes that the observed data covariance is diagonal). 49 | Otherwise (default), the determinant is calculated directly from the 50 | covariance matrix from the simulations. 51 | """ 52 | def __init__(self, measurement_dict, covariance_dict=None, mask_dict=None, 53 | cov_func=None, use_trace_approximation=False, **kwargs): 54 | 55 | super().__init__(measurement_dict, covariance_dict=covariance_dict, 56 | mask_dict=mask_dict, **kwargs) 57 | 58 | # Requires covariaces to be present when using this type of Likelihood 59 | assert self._covariance_dict is not None 60 | 61 | if cov_func is None: 62 | self.cov_func = oas_mcov 63 | else: 64 | self.cov_func = cov_func 65 | 66 | self.use_trace_approximation = use_trace_approximation 67 | 68 | def call(self, simulations_dict): 69 | """ 70 | EnsembleLikelihood class call function 71 | 72 | Parameters 73 | ---------- 74 | simulations_dict : imagine.observables.observable_dict.Simulations 75 | Simulations object 76 | 77 | Returns 78 | ------ 79 | likelicache : float 80 | log-likelihood value (copied to all nodes) 81 | """ 82 | log.debug('@ ensemble_likelihood::__call__') 83 | assert isinstance(simulations_dict, Simulations) 84 | assert set(simulations_dict.keys()).issubset(self._measurement_dict.keys()) 85 | assert set(simulations_dict.keys()).issubset(self._covariance_dict.keys()) 86 | 87 | likelicache = 0. 88 | for name in simulations_dict: 89 | # Estimated Galactic Covariance 90 | sim_mean, sim_cov = self.cov_func(simulations_dict[name].data) 91 | # Observed data/covariance 92 | meas_data, meas_cov = (self._measurement_dict[name].data, 93 | self._covariance_dict[name].data) 94 | 95 | diff = meas_data - sim_mean 96 | full_cov = meas_cov + sim_cov 97 | 98 | if not self.use_trace_approximation: 99 | sign, logdet = pslogdet(full_cov*2.*np.pi) 100 | else: 101 | meas_var = self._covariance_dict[name].var 102 | diag_sum = meas_var + simulations_dict[name].data.var(axis=0) 103 | sign, logdet = 1, (np.log(diag_sum*2.*np.pi)).sum() 104 | 105 | likelicache += -0.5*(np.vdot(diff, plu_solve(full_cov, diff)) + sign*logdet) 106 | 107 | return likelicache 108 | 109 | 110 | class EnsembleLikelihoodDiagonal(Likelihood): 111 | """ 112 | As `EnsembleLikelihood` but assuming that the covariance matrix is 113 | diagonal and well described by the sample variance. Likewise, only 114 | considers the diagonal of the observational covariance matrix. 115 | 116 | Parameters 117 | ---------- 118 | measurement_dict : imagine.observables.observable_dict.Measurements 119 | Measurements 120 | covariance_dict : imagine.observables.observable_dict.Covariances 121 | The covariances associated with the measurements. If the keyword 122 | argument is absent, the covariances will be read from the attribute 123 | `measurement_dict.cov`. 124 | mask_dict : imagine.observables.observable_dict.Masks 125 | Masks which will be applied to the Measurements, Covariances and 126 | Simulations, before computing the likelihood 127 | """ 128 | def __init__(self, measurement_dict, covariance_dict=None, mask_dict=None, **kwargs): 129 | 130 | super().__init__(measurement_dict, covariance_dict=covariance_dict, 131 | mask_dict=mask_dict, **kwargs) 132 | 133 | # Requires covariaces to be present when using this type of Likelihood 134 | assert self._covariance_dict is not None 135 | 136 | 137 | def call(self, simulations_dict): 138 | """ 139 | EnsembleLikelihood class call function 140 | 141 | Parameters 142 | ---------- 143 | simulations_dict : imagine.observables.observable_dict.Simulations 144 | Simulations object 145 | 146 | Returns 147 | ------- 148 | likelicache : float 149 | log-likelihood value (copied to all nodes) 150 | """ 151 | log.debug('@ ensemble_likelihood::__call__') 152 | assert isinstance(simulations_dict, Simulations) 153 | assert set(simulations_dict.keys()).issubset(self._measurement_dict.keys()) 154 | assert set(simulations_dict.keys()).issubset(self._covariance_dict.keys()) 155 | 156 | likelicache = 0. 157 | for name in simulations_dict.keys(): 158 | # Estimated Galactic Covariance 159 | sim_mean = pmean(simulations_dict[name].data) 160 | sim_var = pvar(simulations_dict[name].data) 161 | # Observed data/covariance 162 | meas_data = self._measurement_dict[name].data 163 | meas_var = self._covariance_dict[name].var 164 | 165 | diff = meas_data - sim_mean 166 | full_var = meas_var + sim_var 167 | 168 | sign = np.sign(full_var).prod() 169 | logdet = np.log(full_var*2.*np.pi).sum() 170 | 171 | likelicache += -0.5*np.vdot(diff, 1./full_var * diff) - 0.5*sign*logdet 172 | 173 | return likelicache 174 | -------------------------------------------------------------------------------- /imagine/observables/observable.py: -------------------------------------------------------------------------------- 1 | """ 2 | In the Observable class we define three data types, i.e., 3 | - 'measured' 4 | - 'simulated' 5 | - 'covariance' 6 | where 7 | 'measured' indicates the hosted data is from measurements, 8 | which has a single realization, 9 | 'simulated' indicates the hosted data is from simulations, 10 | which has multiple realizations, 11 | 'covariance' indicates the hosted data is a covariance matrix, 12 | which has a single realization but by default should not be stored/read/written 13 | by a single computing node. 14 | 15 | 'measred' data puts its identical copies on all computing nodes, 16 | which means each node has a full storage of 'measured' data. 17 | 18 | 'simulated' data puts different realizations on different nodes, 19 | which means each node has part of the full realizations, 20 | but at least a full version of one single realization. 21 | 22 | 'covariance' data distributes itself into all computing nodes, 23 | which means to have a full set of 'covariance' data, 24 | we have to collect pieces from all the computing nodes. 25 | """ 26 | 27 | # %% IMPORTS 28 | # Built-in imports 29 | from copy import deepcopy 30 | import logging as log 31 | 32 | # Package imports 33 | import astropy.units as u 34 | import numpy as np 35 | 36 | # IMAGINE imports 37 | from imagine.tools import pmean, pshape, prosecutor, pglobal, pnewdiag, pdiag 38 | 39 | # All declaration 40 | __all__ = ['Observable'] 41 | 42 | 43 | # %% CLASS DEFINITIONS 44 | class Observable(object): 45 | """ 46 | Observable class is designed for storing/manipulating distributed information. 47 | For the testing suits, please turn to "imagine/tests/observable_tests.py". 48 | 49 | Parameters 50 | ---------- 51 | data : numpy.ndarray 52 | distributed/copied data 53 | dtype : str 54 | Data type, must be either: 'measured', 'simulated' or 'covariance' 55 | otype : str 56 | Observable type, must be either: 'HEALPix', 'tabular' or 'plain' 57 | """ 58 | def __init__(self, data=None, dtype=None, coords=None, otype=None): 59 | self.dtype = dtype 60 | self.otype = otype 61 | 62 | if isinstance(data, u.Quantity): 63 | self.data = data.value 64 | self.unit = data.unit 65 | elif isinstance(data, np.ndarray): 66 | self.data = data 67 | self.unit = None 68 | else: 69 | raise ValueError 70 | 71 | self.coords = coords 72 | self.rw_flag = False 73 | 74 | @property 75 | def var(self): 76 | """ 77 | The stored variance, if the Observable is a variance or covariance 78 | """ 79 | if self._dtype == 'variance': 80 | return self._data 81 | elif self._dtype == 'covariance': 82 | return pdiag(self._data) 83 | else: 84 | TypeError("Needs 'variance' or 'covariance' observables for this") 85 | 86 | @property 87 | def data(self): 88 | """ 89 | Data stored in the local processor 90 | """ 91 | if self._dtype == 'variance': 92 | # Even when a variance was stored, data will still 93 | # return a covariance, produced on-the-fly! 94 | return pnewdiag(self._data) 95 | 96 | return self._data 97 | 98 | @data.setter 99 | def data(self, data): 100 | """ 101 | extra input format check for 'measured' and 'covariance' 102 | no extra check for 'simulated' 103 | """ 104 | log.debug('@ observable::data') 105 | if data is None: 106 | self._data = None 107 | else: 108 | if self._dtype != 'variance': 109 | assert (len(data.shape) == 2) 110 | assert isinstance(data, np.ndarray) 111 | if (self._dtype == 'measured'): # copy single-row data from memory 112 | assert (data.shape[0] == 1) 113 | self._data = np.copy(data) 114 | if (self._dtype == 'covariance'): 115 | assert np.equal(*self.shape) 116 | 117 | @property 118 | def shape(self): 119 | """ 120 | Shape of the GLOBAL array, i.e. considering all processors 121 | (`numpy.ndarray`, read-only). 122 | """ 123 | return pshape(self._data) # estimate shape from all nodes 124 | 125 | @property 126 | def global_data(self): 127 | """ 128 | Data gathered from ALL processors (`numpy.ndarray`, read-only). 129 | Note that only master node hosts the global data, 130 | while slave nodes hosts None. 131 | """ 132 | return pglobal(self.data) 133 | 134 | @property 135 | def size(self): 136 | """ 137 | Local data size (`int`, read-only) 138 | this size means the dimension of input data 139 | not the sample size of realizations 140 | """ 141 | return self._data.shape[1] 142 | 143 | @property 144 | def ensemble_mean(self): 145 | log.debug('@ observable::ensemble_mean') 146 | if (self._dtype == 'measured'): 147 | assert (self._data.shape[0] == 1) # single realization 148 | return self._data # since each node has a full copy 149 | elif (self._dtype == 'simulated'): 150 | return pmean(self._data) # calculate mean from all nodes 151 | else: 152 | raise TypeError('unsupported data type') 153 | 154 | @property 155 | def dtype(self): 156 | """ 157 | Data type, can be either: 'measured', 'simulated' or 'covariance' 158 | """ 159 | return self._dtype 160 | 161 | @dtype.setter 162 | def dtype(self, dtype): 163 | if dtype is None: 164 | raise ValueError('dtype cannot be None') 165 | else: 166 | assert (dtype in ('measured', 'simulated', 167 | 'variance', 'covariance')) 168 | self._dtype = str(dtype) 169 | 170 | @property 171 | def rw_flag(self): 172 | """ 173 | Rewriting flag, if true, append method will perform rewriting 174 | """ 175 | return self._rw_flag 176 | 177 | @rw_flag.setter 178 | def rw_flag(self, rw_flag): 179 | assert isinstance(rw_flag, bool) 180 | self._rw_flag = rw_flag 181 | 182 | def append(self, new_data): 183 | """ 184 | appending new data happens only to SIMULATED dtype 185 | the new data to be appended should also be distributed 186 | which makes the appending operation naturally in parallel 187 | 188 | rewrite flag will be switched off once rewriten has been performed 189 | """ 190 | log.debug('@ observable::append') 191 | assert isinstance(new_data, (np.ndarray, Observable)) 192 | assert (self._dtype == 'simulated') 193 | 194 | 195 | if isinstance(new_data, u.Quantity): 196 | assert self.unit == new_data.unit 197 | new_data = new_data.value 198 | 199 | if isinstance(new_data, np.ndarray): 200 | prosecutor(new_data) 201 | if (self._rw_flag): # rewriting 202 | self._data = np.copy(new_data) 203 | self._rw_flag = False 204 | else: 205 | self._data = np.vstack([self._data, new_data]) 206 | elif isinstance(new_data, Observable): 207 | if (self._rw_flag): 208 | self._data = np.copy(new_data.data) 209 | self._rw_flag = False 210 | else: 211 | self._data = np.vstack([self._data, new_data.data]) 212 | -------------------------------------------------------------------------------- /imagine/tests/test_likelihood.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Package imports 3 | from mpi4py import MPI 4 | import numpy as np 5 | import pytest 6 | 7 | # IMAGINE imports 8 | from imagine.observables import ( 9 | Observable, Simulations, Measurements, Covariances) 10 | from imagine.likelihoods import ( 11 | SimpleLikelihood, EnsembleLikelihood, EnsembleLikelihoodDiagonal) 12 | from imagine.tools.covariance_estimator import diagonal_mcov 13 | 14 | # Globals 15 | comm = MPI.COMM_WORLD 16 | mpisize = comm.Get_size() 17 | mpirank = comm.Get_rank() 18 | 19 | # Marks tests in this module as quick 20 | pytestmark = pytest.mark.quick 21 | 22 | # %% PYTEST DEFINITIONS 23 | class TestSimpleLikeli(object): 24 | def test_without_cov(self): 25 | simdict = Simulations() 26 | meadict = Measurements() 27 | # mock measurements 28 | arr_a = np.random.rand(1, 48) 29 | comm.Bcast(arr_a, root=0) 30 | mea = Observable(arr_a, 'measured') 31 | meadict.append(name=('test', None, 2, None), data=mea, otype='HEALPix') 32 | # mock sims 33 | arr_b = np.random.rand(3, 48) 34 | sim = Observable(arr_b, 'simulated') 35 | simdict.append(name=('test', None, 2, None), data=sim, otype='HEALPix') 36 | # no covariance 37 | lh = SimpleLikelihood(meadict) 38 | # calc by likelihood 39 | result = lh(simdict) # feed variable value, not parameter value 40 | # calc by hand 41 | full_b = np.vstack(comm.allgather(arr_b)) # global arr_b 42 | diff = (np.mean(full_b, axis=0) - arr_a) 43 | baseline = -float(0.5)*float(np.vdot(diff, diff)) 44 | # comapre 45 | assert np.allclose(result, baseline) 46 | 47 | def test_with_cov(self): 48 | simdict = Simulations() 49 | meadict = Measurements() 50 | covdict = Covariances() 51 | # mock measurements 52 | arr_a = np.random.rand(1, 4*mpisize) 53 | comm.Bcast(arr_a, root=0) 54 | mea = Observable(arr_a, 'measured') 55 | meadict.append(name=('test', None, 4*mpisize, None), 56 | data=mea, otype='plain') 57 | # mock sims 58 | arr_b = np.random.rand(5, 4*mpisize) 59 | sim = Observable(arr_b, 'simulated') 60 | simdict.append(name=('test', None, 4*mpisize, None), 61 | data=sim, otype='plain') 62 | # mock covariance 63 | arr_c = np.random.rand(4, 4*mpisize) 64 | cov = Observable(arr_c, 'covariance') 65 | covdict.append(name=('test', None, 4*mpisize, None), 66 | cov_data=cov) 67 | # with covariance 68 | lh = SimpleLikelihood(meadict, covdict) 69 | # calc by likelihood 70 | result = lh(simdict) # feed variable value, not parameter value 71 | # calc by hand 72 | full_b = np.vstack(comm.allgather(arr_b)) # global arr_b 73 | diff = (np.mean(full_b, axis=0) - arr_a) 74 | full_cov = np.vstack(comm.allgather(arr_c)) # global covariance 75 | (sign, logdet) = np.linalg.slogdet(full_cov*2.*np.pi) 76 | baseline = -0.5*(np.vdot(diff, np.linalg.solve(full_cov, diff.T))+sign*logdet) 77 | assert np.allclose(result, baseline) 78 | 79 | 80 | class TestEnsembleLikeli(object): 81 | def test(self): 82 | simdict = Simulations() 83 | meadict = Measurements() 84 | covdict = Covariances() 85 | # mock measurements 86 | arr_a = np.random.rand(1, 4*mpisize) 87 | comm.Bcast(arr_a, root=0) 88 | mea = Observable(arr_a, 'measured') 89 | meadict.append(name=('test', None, 4*mpisize, None), 90 | data=mea, otype='plain') 91 | # mock covariance 92 | arr_c = np.random.rand(4, 4*mpisize) 93 | cov = Observable(arr_c, 'covariance') 94 | covdict.append(name=('test', None, 4*mpisize, None), 95 | cov_data=cov) 96 | # mock observable with repeated single realisation 97 | arr_b = np.random.rand(1, 4*mpisize) 98 | comm.Bcast(arr_b, root=0) 99 | arr_ens = np.zeros((2, 4*mpisize)) 100 | for i in range(len(arr_ens)): 101 | arr_ens[i] = arr_b 102 | sim = Observable(arr_ens, 'simulated') 103 | simdict.append(name=('test', None, 4*mpisize, None), 104 | data=sim, otype='plain') 105 | 106 | # simplelikelihood 107 | lh_simple = SimpleLikelihood(meadict, covdict) 108 | result_simple = lh_simple(simdict) 109 | # ensemblelikelihood 110 | lh_ensemble = EnsembleLikelihood(meadict, covdict, 111 | use_trace_approximation=False) 112 | result_ensemble = lh_ensemble(simdict) 113 | assert result_ensemble == result_simple 114 | 115 | def test_with_trace_approximation(self): 116 | simdict = Simulations() 117 | meadict = Measurements() 118 | covdict = Covariances() 119 | # mock measurements 120 | arr_a = np.random.rand(1, 4*mpisize) 121 | comm.Bcast(arr_a, root=0) 122 | mea = Observable(arr_a, 'measured') 123 | meadict.append(name=('test', None, 4*mpisize, None), 124 | data=mea, otype='plain') 125 | # mock covariance (NB for the trace approximation to work, the data 126 | # covariance needs to be diagonal) 127 | arr_c = np.diag(np.random.rand(4)) 128 | 129 | cov = Observable(arr_c, 'covariance') 130 | covdict.append(name=('test', None, 4*mpisize, None), 131 | cov_data=cov) 132 | # mock observable with repeated single realisation 133 | arr_b = np.random.rand(1, 4*mpisize) 134 | comm.Bcast(arr_b, root=0) 135 | arr_ens = np.zeros((2, 4*mpisize)) 136 | for i in range(len(arr_ens)): 137 | arr_ens[i] = arr_b 138 | sim = Observable(arr_ens, 'simulated') 139 | simdict.append(name=('test', None, 4*mpisize, None), 140 | data=sim, otype='plain') 141 | 142 | # simplelikelihood 143 | lh_simple = SimpleLikelihood(meadict, covdict) 144 | result_simple = lh_simple(simdict) 145 | # ensemblelikelihood 146 | lh_ensemble = EnsembleLikelihood(meadict, covdict, 147 | use_trace_approximation=True) 148 | result_ensemble = lh_ensemble(simdict) 149 | assert result_ensemble == result_simple 150 | 151 | def test_diag(self): 152 | simdict = Simulations() 153 | meadict = Measurements() 154 | covdict = Covariances() 155 | # mock measurements 156 | arr_a = np.random.rand(1, 40) 157 | mea = Observable(arr_a, 'measured') 158 | meadict.append(name=('test', None, 40, None), 159 | data=mea, otype='plain') 160 | # mock (diagonal) covariance 161 | arr_var = np.random.rand(40) 162 | cov = Observable(np.diag(arr_var), 'covariance') 163 | covdict.append(name=('test', None, 40, None), 164 | cov_data=cov) 165 | # mock observable 166 | arr_ens = np.random.rand(10, 40) 167 | 168 | sim = Observable(arr_ens, 'simulated') 169 | simdict.append(name=('test', None, 40, None), 170 | data=sim, otype='plain') 171 | # ensemblelikelihood + diagonal_covcov 172 | lh_ens = EnsembleLikelihood(meadict, covdict, cov_func=diagonal_mcov) 173 | result_ens = lh_ens(simdict) 174 | # EnsembleLikelihoodDiagonal 175 | lh_diag = EnsembleLikelihoodDiagonal(meadict, covdict) 176 | result_diag = lh_diag(simdict) 177 | 178 | assert np.allclose(result_diag, result_ens) 179 | -------------------------------------------------------------------------------- /imagine/pipelines/emcee_pipeline.py: -------------------------------------------------------------------------------- 1 | # %% IMPORTS 2 | # Built-in imports 3 | import os 4 | import logging 5 | 6 | 7 | # Package imports 8 | import numpy as np 9 | import emcee 10 | import schwimmbad 11 | from mpi4py import MPI 12 | 13 | # IMAGINE imports 14 | from imagine.pipelines import Pipeline 15 | 16 | # GLOBALS 17 | comm = MPI.COMM_WORLD 18 | mpisize = comm.Get_size() 19 | mpirank = comm.Get_rank() 20 | 21 | # All declaration 22 | __all__ = ['EmceePipeline'] 23 | 24 | class EmceePipeline(Pipeline): 25 | """ 26 | Analysis pipeline with the MCMC sampler `emcee `_ 27 | 28 | See base class for initialization details. 29 | 30 | The chains are considered converged once the total number of iterations 31 | becomes smaller than `convergence_factor` times the autocorrelation time. 32 | 33 | The sampler behaviour is controlled using the `sampling_controllers` 34 | property. A description of these can be found below. 35 | 36 | Sampling controllers 37 | -------------------- 38 | resume : bool 39 | If False the Pipeline the sampling starts from the beginning, 40 | overwriting any previous work in the `chains_directory`. Otherwise, 41 | tries to resume a previous run. 42 | nwalkers : int 43 | Number of walkers 44 | max_nsteps : int 45 | Maximum number of iterations 46 | nsteps_check : int 47 | The sampler will check for convergence every `nsteps_check` 48 | convergence_factor : float 49 | Factor used to compute the convergence 50 | burnin_factor : int 51 | Number of autocorrelation times to be discarded from main results 52 | thin_factor : float 53 | Factor used to choose how the chain will be "thinned" after running 54 | custom_initial_positions : list 55 | List containig the the starting positions to be used for the walkers. 56 | If absent (default), initial positions are randomly sampled from the 57 | prior distribution. 58 | """ 59 | 60 | # Class attributes 61 | SUPPORTS_MPI = True 62 | 63 | def call(self, **kwargs): 64 | """ 65 | Returns 66 | ------- 67 | results : dict 68 | A dictionary containing the sampler results 69 | (usually in its native format) 70 | """ 71 | logging.debug('@ emcee_pipeline::__call__') 72 | 73 | default_params = dict(max_nsteps=100000, 74 | nwalkers=32, 75 | burnin_factor=2, 76 | thin_factor=0.5, 77 | nsteps_check=100, 78 | convergence_factor=100, 79 | resume=True) 80 | 81 | # Keyword arguments can alter the sampling controllers 82 | self.sampling_controllers = kwargs # Updates the dict 83 | 84 | # Prepares initialization and run parameters from 85 | # defaults and sampling controllers 86 | params = {k: self.sampling_controllers.get(k, default) 87 | for k, default in default_params.items()} 88 | 89 | # Updates the sampling controllers to reflect what is being used 90 | self.sampling_controllers = params # Updates the dict 91 | 92 | ndim = len(self._active_parameters) 93 | if mpisize==1: 94 | pool = None 95 | master = True 96 | else: 97 | # Uses an MPI pool with more than 1 process is present 98 | pool = schwimmbad.MPIPool(use_dill=True) 99 | master = pool.is_master() 100 | 101 | if not master: 102 | # "Worker" processes behaviour 103 | pool.wait() 104 | self.sampler = None 105 | self._samples_array = None 106 | else: 107 | filename = os.path.join(self.chains_directory, 108 | 'chains.hdf5'.format(mpirank)) 109 | 110 | backend = emcee.backends.HDFBackend(filename) 111 | 112 | # Prepares the initial positions of the priors 113 | if 'custom_initial_positions' in params: 114 | pos = params['custom_initial_positions'] 115 | assert len(pos) == params['nwalkers'], 'Number of initial positions does not match number of walkers' 116 | else: 117 | # Randomly samples from 118 | pos = [self.prior_transform(np.random.sample(ndim)) 119 | for _ in range(params['nwalkers'])] 120 | # Only uses this if not resuming 121 | if os.path.isfile(filename): 122 | if self.sampling_controllers['resume']: 123 | pos = None 124 | else: 125 | backend.reset(params['nwalkers'], ndim) 126 | 127 | # Sets up the sampler 128 | self.sampler = emcee.EnsembleSampler(params['nwalkers'], ndim, 129 | self.log_probability_unnormalized, 130 | pool=pool, backend=backend) 131 | 132 | old_tau = np.inf 133 | nsteps = 0 134 | 135 | # Iterates trying to reach convergence 136 | while nsteps < params['max_nsteps']: 137 | self.sampler.run_mcmc(pos, progress=True, store=True, 138 | nsteps=self.sampling_controllers['nsteps_check']) 139 | 140 | pos = None # Resumes from where it stopped in next steps 141 | 142 | # Checks convergence (following emcee's authors prescription) 143 | self.tau = self.sampler.get_autocorr_time(tol=0) 144 | self.converged = np.all( 145 | self.tau * params['convergence_factor'] < self.sampler.iteration) 146 | self.converged &= np.all(np.abs(old_tau - self.tau) / self.tau < 0.01) 147 | if self.converged: 148 | break 149 | old_tau = self.tau 150 | nsteps = self.sampler.iteration 151 | 152 | burnin = int(params['burnin_factor'] * np.max(self.tau)) 153 | thin = int(params['thin_factor'] * np.min(self.tau)) 154 | 155 | self._samples_array = self.sampler.get_chain(discard=burnin, 156 | thin=thin, 157 | flat=True) 158 | if mpisize > 1: 159 | pool.close() 160 | self._samples_array = comm.bcast(self._samples_array, root=0) 161 | 162 | # This involves no computation of the evidence 163 | self._evidence = np.nan 164 | self._evidence_err = np.nan 165 | 166 | # Removes the internal reference to the sampler 167 | # (to avoid problems while saving) 168 | sampler, self.sampler = self.sampler, None 169 | # Returns the sampler object (to allow further checks by the user) 170 | return sampler 171 | 172 | 173 | def get_intermediate_results(self): 174 | if self.sampler is not None: 175 | chain = self.sampler.get_chain(flat=True) 176 | 177 | # Reconstructs likelihood 178 | posterior = log_prob = self.sampler.get_log_prob(flat=True) 179 | prior = np.array([self.prior_pdf(point) for point in chain]) 180 | logLikelihood = self.sampler.get_log_prob(flat=True) - np.log(prior) 181 | 182 | # Sets current rejected/dead points, as a numpy array of shape (n, npar) 183 | self.intermediate_results['rejected_points'] = chain 184 | # Sets likelihood value of *rejected* points 185 | self.intermediate_results['logLikelihood'] = logLikelihood 186 | --------------------------------------------------------------------------------