├── .github ├── release-drafter.yml └── workflows │ ├── changelog.yml │ ├── ci.yml │ ├── deploy.yml │ └── release-drafter.yml ├── .gitignore ├── .mailmap ├── .prospector.yaml ├── .readthedocs.yml ├── .zenodo.json ├── CHANGES.rst ├── CODEOWNERS ├── LICENSE ├── MANIFEST.in ├── README.rst ├── docs ├── AUTHORS.rst ├── Makefile ├── benchmarks │ └── index.rst ├── binning.rst ├── changelog.rst ├── changes │ ├── 276.feature.rst │ ├── README.md │ └── template.rst ├── conf.py ├── contribute.rst ├── cut_optimization.rst ├── cuts.rst ├── examples.rst ├── gammapy.rst ├── index.rst ├── install.rst ├── interpolation.rst ├── introduction.rst ├── io │ └── index.rst ├── irf │ └── index.rst ├── notebooks │ ├── fact_example.ipynb │ └── index.rst ├── sensitivity.rst ├── simulation.rst ├── spectral.rst ├── statistics.rst └── utils.rst ├── download_irfs.py ├── download_private_data.sh ├── environment.yml ├── examples ├── calculate_eventdisplay_irfs.py ├── comparison_with_EventDisplay.ipynb └── plot_spectra.py ├── pyirf ├── __init__.py ├── _dev_version │ └── __init__.py ├── benchmarks │ ├── __init__.py │ ├── angular_resolution.py │ ├── energy_bias_resolution.py │ └── tests │ │ ├── __init__.py │ │ ├── test_angular_resolution.py │ │ └── test_bias_resolution.py ├── binning.py ├── compat.py ├── conftest.py ├── coordinates.py ├── cut_optimization.py ├── cuts.py ├── exceptions.py ├── gammapy.py ├── interpolation │ ├── __init__.py │ ├── base_extrapolators.py │ ├── base_interpolators.py │ ├── component_estimators.py │ ├── griddata_interpolator.py │ ├── moment_morph_interpolator.py │ ├── nearest_neighbor_searcher.py │ ├── nearest_simplex_extrapolator.py │ ├── quantile_interpolator.py │ ├── tests │ │ ├── __init__.py │ │ ├── test_base_extrapolators.py │ │ ├── test_base_interpolators.py │ │ ├── test_component_estimator_base_classes.py │ │ ├── test_component_estimator_specific_classes.py │ │ ├── test_griddata_interpolator.py │ │ ├── test_moment_morph_interpolator.py │ │ ├── test_nearest_neighbor_searcher.py │ │ ├── test_nearest_simplex_extrapolator.py │ │ ├── test_quantile_interpolator.py │ │ ├── test_utils.py │ │ └── test_visible_edges_extrapolator.py │ ├── utils.py │ └── visible_edges_extrapolator.py ├── io │ ├── __init__.py │ ├── eventdisplay.py │ ├── gadf.py │ └── tests │ │ ├── __init__.py │ │ └── test_gadf.py ├── irf │ ├── __init__.py │ ├── background.py │ ├── effective_area.py │ ├── energy_dispersion.py │ ├── psf.py │ └── tests │ │ ├── __init__.py │ │ ├── test_background.py │ │ ├── test_effective_area.py │ │ ├── test_energy_dispersion.py │ │ └── test_psf.py ├── resources │ └── dampe_p+he.ecsv ├── sensitivity.py ├── simulations.py ├── spectral.py ├── statistics.py ├── tests │ ├── __init__.py │ ├── test_binning.py │ ├── test_coordinates.py │ ├── test_cuts.py │ ├── test_gammapy_interop.py │ ├── test_optimize_cuts.py │ ├── test_sensitivity.py │ ├── test_simulations.py │ ├── test_spectral.py │ ├── test_statistics.py │ └── test_utils.py ├── utils.py └── version.py └── pyproject.toml /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | template: | 2 | ## What's Changed since $PREVIOUS_TAG 3 | 4 | $CHANGES 5 | 6 | ## Contributors 7 | 8 | $CONTRIBUTORS 9 | -------------------------------------------------------------------------------- /.github/workflows/changelog.yml: -------------------------------------------------------------------------------- 1 | name: Changelog 2 | 3 | on: 4 | pull_request: 5 | # should also be re-run when changing labels 6 | types: [opened, reopened, labeled, unlabeled, synchronize] 7 | 8 | env: 9 | FRAGMENT_NAME: "docs/changes/${{ github.event.number }}.*.rst" 10 | 11 | jobs: 12 | changelog: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v3 16 | with: 17 | fetch-depth: 0 18 | 19 | - name: Check for news fragment 20 | if: ${{ ! contains( github.event.pull_request.labels.*.name, 'no-changelog-needed')}} 21 | uses: andstor/file-existence-action@v2 22 | with: 23 | files: ${{ env.FRAGMENT_NAME }} 24 | fail: true 25 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - '**' 9 | pull_request: 10 | 11 | env: 12 | MPLBACKEND: Agg 13 | PYTEST_ADDOPTS: --color=yes 14 | GITHUB_PR_NUMBER: ${{ github.event.number }} 15 | 16 | jobs: 17 | tests: 18 | runs-on: ubuntu-latest 19 | strategy: 20 | matrix: 21 | python-version: ["3.10", "3.11", "3.12", "3.13"] 22 | 23 | steps: 24 | - uses: actions/checkout@v4 25 | with: 26 | fetch-depth: 0 27 | 28 | - uses: actions/cache@v3 29 | id: cache-irfs 30 | with: 31 | path: ./irfs 32 | key: irf-cache 33 | 34 | # make sure we have version info 35 | - run: git fetch --tags 36 | 37 | - name: Set up Python ${{ matrix.python-version }} 38 | uses: actions/setup-python@v5 39 | with: 40 | python-version: ${{ matrix.python-version }} 41 | 42 | - name: Install dependencies 43 | run: | 44 | python --version 45 | pip install -U pip setuptools wheel restructuredtext_lint 46 | pip install -e .[all] 47 | pip freeze 48 | 49 | - name: Check README 50 | run: | 51 | rst-lint README.rst 52 | 53 | - name: Get IRF Files 54 | if: steps.cache-irfs.outputs.cache-hit != 'true' 55 | run: python download_irfs.py 56 | 57 | - name: Tests 58 | run: | 59 | pytest --cov=pyirf --cov-report=xml 60 | 61 | - uses: codecov/codecov-action@v3 62 | 63 | docs: 64 | runs-on: ubuntu-latest 65 | steps: 66 | - uses: actions/checkout@v4 67 | with: 68 | fetch-depth: 0 69 | 70 | - name: Set up Python 71 | uses: actions/setup-python@v5 72 | with: 73 | python-version: "3.12" 74 | 75 | - name: Install doc dependencies 76 | run: | 77 | sudo apt update --yes && sudo apt install --yes git build-essential pandoc curl graphviz 78 | pip install -U pip setuptools wheel towncrier 79 | pip install -e .[docs] 80 | git describe --tags 81 | python -c 'import pyirf; print(pyirf.__version__)' 82 | 83 | - name: Produce Changelog 84 | run: | 85 | towncrier build --yes 86 | 87 | - name: Build docs 88 | run: cd docs && make html SPHINXOPTS="-W --keep-going -n --color -j auto" 89 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy to PyPi 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | fetch-depth: 0 15 | 16 | 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: "3.12" 21 | 22 | - name: Install dependencies 23 | run: | 24 | python --version 25 | pip install -U pip build setuptools_scm[toml] 26 | # make sure we have the version, grep will exit with 1 if it finds 0.0.0 27 | python -m setuptools_scm | grep -v '0.0.0' 28 | python -m build 29 | 30 | - name: Publish package 31 | uses: pypa/gh-action-pypi-publish@release/v1 32 | with: 33 | user: __token__ 34 | password: ${{ secrets.pypi_password }} 35 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | update_release_draft: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: release-drafter/release-drafter@v5 13 | env: 14 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # autogenerated by setuptools_scm 2 | pyirf/_version.py 3 | # Byte-compiled / optimized / DLL files 4 | *.fits.gz 5 | *.hdf5 6 | data 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | pip-wheel-metadata/ 29 | share/python-wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | MANIFEST 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Sphinx 46 | docs/api 47 | docs/_build 48 | 49 | # Results and plots 50 | *.fits.gz 51 | *.png 52 | 53 | # Unit test / coverage reports 54 | htmlcov/ 55 | .tox/ 56 | .nox/ 57 | .coverage 58 | .coverage.* 59 | .cache 60 | nosetests.xml 61 | coverage.xml 62 | *.cover 63 | *.py,cover 64 | .hypothesis/ 65 | .pytest_cache/ 66 | 67 | # Translations 68 | *.mo 69 | *.pot 70 | 71 | # Django stuff: 72 | *.log 73 | local_settings.py 74 | db.sqlite3 75 | db.sqlite3-journal 76 | 77 | # Flask stuff: 78 | instance/ 79 | .webassets-cache 80 | 81 | # Scrapy stuff: 82 | .scrapy 83 | 84 | # Sphinx documentation 85 | docs/_build/ 86 | 87 | # PyBuilder 88 | target/ 89 | 90 | # Jupyter Notebook 91 | .ipynb_checkpoints 92 | 93 | # IPython 94 | profile_default/ 95 | ipython_config.py 96 | 97 | # pyenv 98 | .python-version 99 | 100 | # pipenv 101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 104 | # install all needed dependencies. 105 | #Pipfile.lock 106 | 107 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 108 | __pypackages__/ 109 | 110 | # Celery stuff 111 | celerybeat-schedule 112 | celerybeat.pid 113 | 114 | # SageMath parsed files 115 | *.sage.py 116 | 117 | # Environments 118 | .env 119 | .venv 120 | env/ 121 | venv/ 122 | ENV/ 123 | env.bak/ 124 | venv.bak/ 125 | 126 | # Spyder project settings 127 | .spyderproject 128 | .spyproject 129 | 130 | # Rope project settings 131 | .ropeproject 132 | 133 | # mkdocs documentation 134 | /site 135 | 136 | # mypy 137 | .mypy_cache/ 138 | .dmypy.json 139 | dmypy.json 140 | 141 | # Pyre type checker 142 | .pyre/ 143 | 144 | # IRFs downloaded for tests 145 | irfs/ 146 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | Maximilian Linhoff 2 | 3 | Michele Peresano Michele Peresano 4 | 5 | Thomas Vuillaume vuillaut 6 | Thomas Vuillaume Thomas Vuillaume 7 | 8 | Julian Sitarek <33022433+jsitarek@users.noreply.github.com> 9 | 10 | Rune Michael Dominik 11 | Rune Michael Dominik 12 | -------------------------------------------------------------------------------- /.prospector.yaml: -------------------------------------------------------------------------------- 1 | pep257: 2 | disable: 3 | - D104 4 | - D202 5 | - D213 6 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | configuration: docs/conf.py 5 | 6 | build: 7 | os: ubuntu-24.04 8 | tools: 9 | python: "3.12" 10 | 11 | python: 12 | install: 13 | - method: pip 14 | path: . 15 | extra_requirements: 16 | - docs 17 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "*pyirf* is a python3-based library for the generation of Instrument Response Functions (IRFs) and sensitivities for the Cherenkov Telescope Array (CTA)", 3 | "license": "MIT", 4 | "keywords": [ 5 | "gamma-ray astronomy", 6 | "Imaging Atmospheric Cherenkov Telescope", 7 | "IACT", 8 | "CTA", 9 | "instrument response", 10 | "irf", 11 | "python" 12 | ], 13 | "upload_type": "software", 14 | "access_right": "open", 15 | "creators": [ 16 | { 17 | "name": "Maximilian Linhoff", 18 | "affiliation": "Department of Physics, TU Dortmund University, Otto-Hahn-Str. 4, 44221 Dortmund, Germany", 19 | "orcid": "0000-0001-7993-8189" 20 | }, 21 | { 22 | "name": "Michele Peresano", 23 | "affiliation": "AIM, CEA, CNRS, Universite Paris-Saclay, Universite Paris Diderot, Sorbonne Paris Cite, F-91191 Gif-sur-Yvette, France", 24 | "orcid": "0000-0002-7537-7334" 25 | }, 26 | { 27 | "name": "Rune Michael Dominik", 28 | "affiliation": "Department of Physics, TU Dortmund University, Otto-Hahn-Str. 4, 44221 Dortmund, Germany", 29 | "orcid": "0000-0003-4168-7200" 30 | }, 31 | { 32 | "name": "Julian Sitarek", 33 | "affiliation": "University of Lodz, Faculty of Physics and Applied Informatics, Department of Astrophysics", 34 | "orcid": "0000-0002-1659-5374" 35 | }, 36 | { 37 | "name": "Thomas Vuillaume", 38 | "affiliation": "Laboratoire d’Annecy de Physique des Particules, Univ. Grenoble Alpes, Univ. Savoie Mont Blanc, CNRS, LAPP, 74000 Annecy, France", 39 | "orcid": "0000-0002-5686-2078" 40 | }, 41 | { 42 | "name": "Michael Punch" 43 | }, 44 | { 45 | "name": "Lukas Nickel", 46 | "affiliation": "Department of Physics, TU Dortmund University, Otto-Hahn-Str. 4, 44221 Dortmund, Germany" 47 | }, 48 | { 49 | "affiliation": "Department of Physics, TU Dortmund University, Otto-Hahn-Str. 4, 44221 Dortmund, Germany", 50 | "name": "Noah Biederbeck" 51 | }, 52 | { 53 | "name": "Gernot Maier" 54 | }, 55 | { 56 | "name": "Abelardo Moralejo", 57 | "affiliation": "Institut de Fisica d'Altes Energies (IFAE), The Barcelona Institute of Science and Technology, Campus UAB, 08193 Bellaterra (Barcelona), Spain" 58 | }, 59 | { 60 | "name": "Lea Jouvin", 61 | "affiliation": "Institut de Fisica d'Altes Energies (IFAE), The Barcelona Institute of Science and Technology, Campus UAB, 08193 Bellaterra (Barcelona), Spain" 62 | }, 63 | { 64 | "affiliation": "Aix Marseille Univ, CNRS/IN2P3, CPPM, Marseille, France", 65 | "name": "Gaia Verna" 66 | }, 67 | { 68 | "name": "Hugo van Kemenade" 69 | } 70 | ] 71 | } 72 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # EXAMPLE FROM GITHUB DOCS TO FILL OUT BY AUTHORS & CONTRIBUTORS 2 | 3 | # This is a comment. 4 | # Each line is a file pattern followed by one or more owners. 5 | 6 | # These owners will be the default owners for everything in 7 | # the repo. Unless a later match takes precedence, 8 | # @global-owner1 and @global-owner2 will be requested for 9 | # review when someone opens a pull request. 10 | # * @global-owner1 @global-owner2 11 | * @maxnoe @HealthyPear 12 | 13 | pyirf/interpolation/* @RuneDominik 14 | 15 | # Order is important; the last matching pattern takes the most 16 | # precedence. When someone opens a pull request that only 17 | # modifies JS files, only @js-owner and not the global 18 | # owner(s) will be requested for a review. 19 | # *.js @js-owner 20 | 21 | # You can also use email addresses if you prefer. They'll be 22 | # used to look up users just like we do for commit author 23 | # emails. 24 | # *.go docs@example.com 25 | 26 | # In this example, @doctocat owns any files in the build/logs 27 | # directory at the root of the repository and any of its 28 | # subdirectories. 29 | # /build/logs/ @doctocat 30 | 31 | # The `docs/*` pattern will match files like 32 | # `docs/getting-started.md` but not further nested files like 33 | # `docs/build-app/troubleshooting.md`. 34 | # docs/* docs@example.com 35 | 36 | # In this example, @octocat owns any file in an apps directory 37 | # anywhere in your repository. 38 | # apps/ @octocat 39 | 40 | # In this example, @doctocat owns any file in the `/docs` 41 | # directory in the root of your repository and any of its 42 | # subdirectories. 43 | # /docs/ @doctocat 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Cherenkov Telescope Array Observatory Consortium 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | prune pyirf/_dev_version 2 | include pyirf/resources/* 3 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ========================================== 2 | pyirf |ci| |codacy| |coverage| |doilatest| 3 | ========================================== 4 | 5 | .. |ci| image:: https://github.com/cta-observatory/pyirf/workflows/CI/badge.svg?branch=main 6 | :target: https://github.com/cta-observatory/pyirf/actions?query=workflow%3ACI+branch%3Amain 7 | .. |codacy| image:: https://app.codacy.com/project/badge/Grade/669fef80d3d54070960e66351477e383 8 | :target: https://www.codacy.com/gh/cta-observatory/pyirf/dashboard?utm_source=github.com&utm_medium=referral&utm_content=cta-observatory/pyirf&utm_campaign=Badge_Grade 9 | .. |coverage| image:: https://codecov.io/gh/cta-observatory/pyirf/branch/main/graph/badge.svg 10 | :target: https://codecov.io/gh/cta-observatory/pyirf 11 | .. |doilatest| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4740755.svg 12 | :target: https://doi.org/10.5281/zenodo.4740755 13 | 14 | *pyirf* is a python library for the generation of Instrument Response 15 | Functions (IRFs) and sensitivities for the 16 | `Cherenkov Telescope Array (CTA) `_ . 17 | 18 | Thanks to its simple input/output and modular function-based structure, 19 | it can be used to process also data from other Imaging Atmospheric 20 | Cherenkov Telescopes (IACTs). 21 | 22 | - **Source code:** https://github.com/cta-observatory/pyirf 23 | - **Documentation:** https://cta-observatory.github.io/pyirf/ 24 | 25 | Citing this software 26 | -------------------- 27 | If you use a released version of this software for a publication, 28 | please cite it by using the corresponding DOI: 29 | 30 | - latest : |doilatest| 31 | - all versions: `Please visit Zenodo `_ and select the correct version 32 | 33 | At this point, our latest publication is the `2023 ICRC proceeding `_, which you can 34 | cite using the following bibtex entry, especially if using functionalities from ``pyirf.interpolation``: 35 | 36 | .. code:: 37 | 38 | @inproceedings{pyirf-icrc-2023, 39 | author = {Dominik, Rune Michael and Linhoff, Maximilian and Sitarek, Julian}, 40 | title = {Interpolation of Instrument Response Functions for the Cherenkov Telescope Array in the Context of pyirf}, 41 | usera = {for the CTA Consortium}, 42 | doi = {10.22323/1.444.0703}, 43 | booktitle = {Proceedings, 38th International Cosmic Ray Conference}, 44 | year=2023, 45 | volume={444}, 46 | number={618}, 47 | location={Nagoya, Japan}, 48 | } 49 | -------------------------------------------------------------------------------- /docs/AUTHORS.rst: -------------------------------------------------------------------------------- 1 | .. _authors: 2 | 3 | Authors 4 | ======= 5 | 6 | To see who contributed to ``pyirf``, please visit the 7 | `GitHub contributors page `__ 8 | or run 9 | 10 | .. code-block:: bash 11 | 12 | git shortlog -sne 13 | 14 | 15 | ``pyirf`` started as part of `protopipe `__ by Julien Lefaucher, 16 | but was largely rewritten in September 2020, making use of code from the 17 | previous version, the `pyfact `__ module and the 18 | `FACT irf `__ package. 19 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/benchmarks/index.rst: -------------------------------------------------------------------------------- 1 | .. _benchmarks: 2 | 3 | Benchmarks 4 | ========== 5 | 6 | Functions to calculate benchmarks. 7 | 8 | ------------- 9 | 10 | .. automodapi:: pyirf.benchmarks 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/binning.rst: -------------------------------------------------------------------------------- 1 | .. _binning: 2 | 3 | Binning and Histogram Utilities 4 | =============================== 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.binning 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changelog: 2 | 3 | ========= 4 | Changelog 5 | ========= 6 | 7 | .. include:: ../CHANGES.rst 8 | -------------------------------------------------------------------------------- /docs/changes/276.feature.rst: -------------------------------------------------------------------------------- 1 | Add function to make 3d background for lon/lat coordinates. 2 | -------------------------------------------------------------------------------- /docs/changes/README.md: -------------------------------------------------------------------------------- 1 | # How to use `towncrier` 2 | 3 | A tutorial can be found [here](https://towncrier.readthedocs.io/en/stable/tutorial.html). 4 | 5 | 1. Create a new file for your changes `..rst` in the corresponding folder. The following types are available: 6 | - feature: `New feature` 7 | - bugfix: `Bugfix` 8 | - api: `API Changes` 9 | - optimization: `Refactoring and Optimization` 10 | - maintenance: `Maintenance` 11 | 12 | 2. Write a suitable message for the change: 13 | ``` 14 | Fixed ``crazy_function`` to be consistent with ``not_so_crazy_function`` 15 | ``` 16 | 17 | 3. (For maintainers) How to generate a change log: 18 | - Execute the following command in the base directory of the project 19 | ``` 20 | towncrier build --version= 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/changes/template.rst: -------------------------------------------------------------------------------- 1 | {% if render_title %} 2 | {% if versiondata.name %} 3 | {{ versiondata.name | lower }} {{ versiondata.version }} ({{ versiondata.date }}) 4 | {{ top_underline * ((versiondata.name + versiondata.version + versiondata.date)|length + 4)}} 5 | {% else %} 6 | {{ versiondata.version }} ({{ versiondata.date }}) 7 | {{ top_underline * ((versiondata.version + versiondata.date)|length + 3)}} 8 | {% endif %} 9 | {% endif %} 10 | 11 | {% for category, val in definitions.items() %} 12 | 13 | {% set underline = underlines[0] %} 14 | {{ definitions[category]['name'] }} 15 | {{ underline * definitions[category]['name']|length }} 16 | {% set underline = underlines[1] %} 17 | 18 | {% for section, _ in sections.items() %} 19 | {% if section and category in sections[section] %} 20 | {{section}} 21 | {{ underline * section|length }} 22 | 23 | {% endif %} 24 | {% if sections[section] and category in sections[section] %} 25 | {% if definitions[category]['showcontent'] %} 26 | {% for text, values in sections[section][category].items() %} 27 | - {{ text }} [{{ values|join(', ') }}] 28 | 29 | {% endfor %} 30 | {% else %} 31 | - {{ sections[section][category]['']|join(', ') }} 32 | 33 | {% endif %} 34 | {% if sections[section][category]|length == 0 %} 35 | No significant changes. 36 | 37 | {% else %} 38 | {% endif %} 39 | {% else %} 40 | {# No significant changes. #} 41 | {% endif %} 42 | {% endfor %} 43 | {% endfor %} 44 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | 16 | sys.path.insert(0, os.path.abspath("..")) 17 | from pyirf import __version__ 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = "pyirf" 22 | copyright = "2020, Maximilian Nöthe, Michele Peresano, Thomas Vuillaume" 23 | author = "Maximilian Nöthe, Michele Peresano, Thomas Vuillaume" 24 | 25 | # The full version, including alpha/beta/rc tags 26 | version = __version__ 27 | 28 | # -- General configuration --------------------------------------------------- 29 | 30 | # The suffix(es) of source filenames. 31 | # You can specify multiple suffix as a list of string: 32 | # 33 | source_suffix = [".rst", ".md"] 34 | # source_suffix = '.rst' 35 | 36 | # The master toctree document. 37 | master_doc = "index" 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | "numpydoc", 44 | "nbsphinx", 45 | "sphinx.ext.autodoc", 46 | "sphinx.ext.doctest", 47 | "sphinx.ext.intersphinx", 48 | "sphinx.ext.todo", 49 | "sphinx.ext.coverage", 50 | "sphinx.ext.mathjax", 51 | "sphinx.ext.ifconfig", 52 | "sphinx.ext.viewcode", 53 | "sphinx.ext.autosummary", 54 | "sphinx.ext.napoleon", 55 | "sphinx_automodapi.automodapi", 56 | "sphinx_automodapi.smart_resolver", 57 | "IPython.sphinxext.ipython_console_highlighting", 58 | ] 59 | 60 | # nbsphinx 61 | # nbsphinx_execute = "never" 62 | nbsphinx_execute_arguments = [ 63 | "--InlineBackend.figure_formats={'png', }", 64 | "--InlineBackend.rc={'figure.dpi': 300}", 65 | ] 66 | 67 | numpydoc_show_class_members = False 68 | autosummary_generate = True 69 | 70 | # Add any paths that contain templates here, relative to this directory. 71 | templates_path = ["_templates"] 72 | 73 | # List of patterns, relative to source directory, that match files and 74 | # directories to ignore when looking for source files. 75 | # This pattern also affects html_static_path and html_extra_path. 76 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "changes"] 77 | 78 | 79 | # -- Options for HTML output ------------------------------------------------- 80 | 81 | # The theme to use for HTML and HTML Help pages. See the documentation for 82 | # a list of builtin themes. 83 | # 84 | html_theme = "sphinx_rtd_theme" 85 | 86 | html_theme_options = { 87 | 'canonical_url': 'https://pyirf.readthedocs.io/', 88 | } 89 | 90 | intersphinx_mapping = { 91 | "python": ("https://docs.python.org/3.9/", None), 92 | "numpy": ("https://numpy.org/doc/stable/", None), 93 | "scipy": ("https://docs.scipy.org/doc/scipy/", None), 94 | "astropy": ("https://docs.astropy.org/en/latest/", None), 95 | "gammapy": ("https://docs.gammapy.org/1.1/", None), 96 | } 97 | 98 | # Add any paths that contain custom static files (such as style sheets) here, 99 | # relative to this directory. They are copied after the builtin static files, 100 | # so a file named "default.css" will overwrite the builtin "default.css". 101 | # html_static_path = ['_static'] 102 | -------------------------------------------------------------------------------- /docs/contribute.rst: -------------------------------------------------------------------------------- 1 | .. _contribute: 2 | 3 | How to contribute 4 | ================= 5 | 6 | .. contents:: Current projects 7 | :local: 8 | :depth: 2 9 | 10 | Issue Tracker 11 | ------------- 12 | 13 | We use the `GitHub issue tracker `__ 14 | for individual issues and the `GitHub Projects page `_ can give you a quick overview. 15 | 16 | If you found a bug or you are missing a feature, please check the existing 17 | issues and then open a new one or contribute to the existing issue. 18 | 19 | Development procedure 20 | --------------------- 21 | 22 | 23 | We use the standard `GitHub workflow `__. 24 | 25 | If you are not part of the ``cta-observatory`` organization, 26 | you need to fork the repository to contribute. 27 | See the `GitHub tutorial on forks `__ if you are unsure how to do this. 28 | 29 | #. When you find something that is wrong or missing 30 | 31 | - Go to the issue tracker and check if an issue already exists for your bug or feature 32 | - In general it is always better to anticipate a PR with a new issue and link the two 33 | 34 | #. To work on a bug fix or new feature, create a new branch, add commits and open your pull request 35 | 36 | - If you think your pull request is good to go and ready to be reviewed, 37 | you can directly open it as normal pull request. 38 | 39 | - You can also open it as a “Draft Pull Request”, if you are not yet finished 40 | but want to get early feedback on your ideas. 41 | 42 | - Especially when working on a bug, it makes sense to first add a new 43 | test that fails due to the bug and in a later commit add the fix showing 44 | that the test is then passing. 45 | This helps understanding the bug and will prevent it from reappearing later. 46 | 47 | - Create a changelog entry in ``docs/changes``, please note the ``README.md`` there. 48 | Minor changes (on the magnitude of fixing a broken link or renaming a variable) can receive the ``no-changelog-needed`` label. 49 | This should, however, be a rare exception. 50 | 51 | #. Wait for review comments and then implement or discuss requested changes. 52 | 53 | 54 | We use `Github Actions `__ to 55 | run the unit tests and documentation building automatically for every pull request. 56 | Passing unit tests and coverage of the changed code are required for all pull requests. 57 | 58 | 59 | Running the tests and looking at coverage 60 | ----------------------------------------- 61 | 62 | For more immediate feedback, you should run the tests locally before pushing, 63 | as builds on travis take quite long. 64 | 65 | To run the tests locally, make sure you have the `tests` extras installed and then 66 | run 67 | 68 | .. code:: bash 69 | 70 | $ pytest -v 71 | 72 | 73 | To also inspect the coverage, run 74 | 75 | .. code:: bash 76 | 77 | $ pytest --cov=pyirf --cov-report=html -v 78 | 79 | This will create a coverage report in html form in the ``htmlcov`` directory, 80 | which you can serve locally using 81 | 82 | .. code:: bash 83 | 84 | $ python -m http.server -d htmlcov 85 | 86 | After this, you can view the report in your browser by visiting the url printed 87 | to the terminal. 88 | 89 | 90 | Building the documentation 91 | -------------------------- 92 | 93 | This documentation uses sphinx and restructured text. 94 | For an Introduction, see the `Sphinx documentation `_. 95 | 96 | To build the docs locally, enter the ``docs`` directory and call: 97 | 98 | .. code:: bash 99 | 100 | make html 101 | 102 | Some changes require a full remake of the documentation, for that call 103 | 104 | .. code:: bash 105 | 106 | make clean html 107 | 108 | If you created or deleted file or submodule, you also need to remove the 109 | ``api`` directory, it will be regenerated automatically. 110 | 111 | Make sure the docs are built without warnings from sphinx, as these 112 | will be treated as errors in the build in the CI system as they most often 113 | result in broken styling. 114 | 115 | To look at the docs, use 116 | 117 | .. code:: bash 118 | 119 | $ python -m http.server _build/html 120 | 121 | and visit the printed URL in your browser. 122 | 123 | Making your contribution visible 124 | -------------------------------- 125 | 126 | Together with the changes that will come with you PR, you should check that the 127 | following maintenance files are up-to-date: 128 | 129 | - ``.mailmap`` 130 | - ``CODEOWNERS`` 131 | - ``.zenodo.json`` 132 | 133 | Further details 134 | --------------- 135 | 136 | Please also have a look at the 137 | 138 | - ``ctapipe`` `development guidelines `__ 139 | - The `Open Gamma-Ray Astronomy data formats `__ 140 | which also describe the IRF formats and their definitions. 141 | - ``ctools`` `documentation page on IRFs `__ 142 | - `CTA IRF working group wiki (internal) `__ 143 | 144 | - `CTA IRF Description Document for Prod3b (internal) `__ 145 | -------------------------------------------------------------------------------- /docs/cut_optimization.rst: -------------------------------------------------------------------------------- 1 | .. _cut_optimization: 2 | 3 | Cut Optimization 4 | ================ 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.cut_optimization 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/cuts.rst: -------------------------------------------------------------------------------- 1 | .. _cuts: 2 | 3 | Calculating and Applying Cuts 4 | ============================= 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.cuts 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Examples 4 | ======== 5 | 6 | Calculating Sensitivity and IRFs for EventDisplay DL2 data 7 | ---------------------------------------------------------- 8 | 9 | The ``examples/calculate_eventdisplay_irfs.py`` file is 10 | using ``pyirf`` to optimize cuts, calculate sensitivity and IRFs 11 | and then store these to FITS files for DL2 event lists from EventDisplay. 12 | 13 | The ROOT files were provided by Gernot Maier and converted to FITS format 14 | using `the EventDisplay DL2 converter script `_. 15 | The resulting FITS files are the input to the example and can be downloaded using: 16 | 17 | .. code:: bash 18 | 19 | ./download_private_data.sh 20 | 21 | This requires ``curl`` and ``unzip`` to be installed. 22 | The download is password protected, please ask one of the maintainers for the 23 | password. 24 | 25 | A detailed explanation of the contents of such DL2 files can be found 26 | `here (internal) `_. 27 | 28 | The example can then be run from the root of the repository after installing pyirf 29 | by running: 30 | 31 | .. code:: bash 32 | 33 | python examples/calculate_eventdisplay_irfs.py 34 | 35 | 36 | A jupyter notebook plotting the results and comparing them to the EventDisplay output 37 | is available in ``examples/comparison_with_EventDisplay.ipynb`` 38 | 39 | 40 | Visualization of the included Flux Models 41 | ----------------------------------------- 42 | 43 | The ``examples/plot_spectra.py`` visualizes the Flux models included 44 | in ``pyirf`` for Crab Nebula, cosmic ray and electron flux. 45 | -------------------------------------------------------------------------------- /docs/gammapy.rst: -------------------------------------------------------------------------------- 1 | .. _gammapy: 2 | 3 | Gammapy Interoperability 4 | ======================== 5 | 6 | This module provides functions to convert the ``pyirf`` quantities 7 | for IRFs and the binning to the corresponding ``gammapy`` classes. 8 | 9 | Reference/API 10 | ------------- 11 | 12 | .. automodapi:: pyirf.gammapy 13 | :no-inheritance-diagram: 14 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. meta:: 2 | :github_url: https://github.com/cta-observatory/pyirf 3 | 4 | Welcome to pyirf's documentation! 5 | ================================= 6 | 7 | `pyirf` is a prototype for the generation of Instrument Response Functions (IRFs) 8 | for the `Cherenkov Telescope Array `__ 9 | (CTA). 10 | The package is being developed and tested by members of the CTA consortium and 11 | is a spin-off of the analog sub-process of the 12 | `pipeline protopype `_. 13 | 14 | Its main features are currently to 15 | 16 | * find the best cutoff in gammaness/score, to discriminate between signal 17 | and background, as well as the angular cut to obtain the best sensitivity 18 | for a given amount of observation time and a given template for the 19 | source of interest (:ref:`cut_optimization`) 20 | * compute the instrument response functions, effective area, 21 | point spread function and energy resolution (:ref:`irf`) 22 | * estimate the sensitivity of the array (:ref:`sensitivity`), 23 | 24 | with plans to extend its capabilities to reach the requirements of the 25 | future observatory. 26 | 27 | .. Should we add the following or is it too soon? ---> 28 | .. Event though the initial efforts are focused on CTA, it is potentially possible 29 | .. to extend the capabilities of `pyirf` to other IACTs instruments as well. 30 | 31 | The source code is hosted on a `GitHub repository `__, to 32 | which this documentation is linked. 33 | 34 | .. warning:: 35 | This is not yet stable code, so expect large and rapid changes. 36 | 37 | Citing this software 38 | -------------------- 39 | 40 | .. |doilatest| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4740755.svg 41 | :target: https://doi.org/10.5281/zenodo.4740755 42 | .. |doi_v0.5.0| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4748994.svg 43 | :target: https://doi.org/10.5281/zenodo.4748994 44 | .. |doi_v0.4.0| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4304466.svg 45 | :target: https://doi.org/10.5281/zenodo.4304466 46 | 47 | If you use a released version of this software for a publication, 48 | please cite it by using the corresponding DOI. 49 | 50 | - latest : |doilatest| 51 | - v0.5.0 : |doi_v0.5.0| 52 | - v0.4.0 : |doi_v0.4.0| 53 | 54 | .. toctree:: 55 | :maxdepth: 1 56 | :caption: Overview 57 | :name: _pyirf_intro 58 | 59 | install 60 | introduction 61 | examples 62 | notebooks/index 63 | contribute 64 | changelog 65 | AUTHORS 66 | 67 | 68 | .. toctree:: 69 | :maxdepth: 1 70 | :caption: API Documentation 71 | :name: _pyirf_api_docs 72 | 73 | irf/index 74 | sensitivity 75 | benchmarks/index 76 | cuts 77 | cut_optimization 78 | simulation 79 | spectral 80 | statistics 81 | binning 82 | io/index 83 | interpolation 84 | gammapy 85 | utils 86 | 87 | 88 | Indices and tables 89 | ================== 90 | 91 | * :ref:`genindex` 92 | * :ref:`modindex` 93 | * :ref:`search` 94 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | .. _install: 2 | 3 | Installation 4 | ============ 5 | 6 | ``pyirf`` requires Python ≥3.7 and pip, plus the packages defined in 7 | the ``setup.py``. 8 | 9 | Core dependencies are 10 | 11 | * ``numpy`` 12 | * ``astropy`` 13 | * ``scipy`` 14 | 15 | We provide an environment file for Anaconda or Miniconda users. 16 | 17 | Installing a released version 18 | ----------------------------- 19 | 20 | To install a released version, just install the ``pyirf`` package using 21 | 22 | .. code-block:: bash 23 | 24 | $ pip install pyirf 25 | 26 | or add it to the dependencies of your project. 27 | 28 | Installing for development 29 | -------------------------- 30 | 31 | If you want to work on pyirf itself, clone the repository and install the local 32 | copy of pyirf in development mode. 33 | 34 | The dependencies required to perform unit-testing and to build the documentation 35 | are defined in ``extras`` under ``tests`` and ``docs`` respectively. 36 | 37 | These requirements can also be enabled by installing the ``all`` extra: 38 | 39 | .. code-block:: bash 40 | 41 | $ pip install -e '.[all]' # or [docs,tests] to install them separately 42 | 43 | 44 | You should isolate your pyirf development environment from the rest of your system. 45 | Either by using a virtual environment or by using ``conda`` environments. 46 | ``pyirf`` provides a conda ``environment.yml``, that includes all dependencies: 47 | 48 | .. code-block:: bash 49 | 50 | $ conda env create -f environment.yml 51 | $ conda activate pyirf 52 | $ pip install -e '.[all]' 53 | 54 | In order to have passing unit-tests you have to download some CTA IRFs 55 | from `zenodo `. Simply run 56 | 57 | .. code-block:: bash 58 | 59 | $ python download_irfs.py 60 | 61 | which will download and unpack three IRF files to ``irfs/``. 62 | 63 | Run the tests to make sure everything is OK: 64 | 65 | .. code-block:: bash 66 | 67 | $ pytest 68 | -------------------------------------------------------------------------------- /docs/introduction.rst: -------------------------------------------------------------------------------- 1 | .. _introduction: 2 | 3 | Introduction to ``pyirf`` 4 | ========================= 5 | 6 | 7 | ``pyirf`` aims to provide functions to calculate the Instrument Response Functions (IRFs) 8 | and sensitivity for Imaging Air Cherenkov Telescopes. 9 | 10 | To support a wide range of use cases, ``pyirf`` opts for a library approach of 11 | composable building blocks with well-defined inputs and outputs. 12 | 13 | For more information on IRFs, have a look at the `Specification of the Data Formats for Gamma-Ray Astronomy`_ 14 | or the `ctools documentation on IRFs `_. 15 | 16 | 17 | Currently, ``pyirf`` allows calculation of the usual factorization of the IRFs into: 18 | 19 | * Effective area 20 | * Energy migration 21 | * Point spread function 22 | 23 | Additionally, functions for calculating point-source flux sensitivity are provided. 24 | Flux sensitivity is defined as the smallest flux an IACT can detect with a certain significance, 25 | usually 5 σ according to the Li&Ma likelihood ratio test, in a specified amount of time. 26 | 27 | ``pyirf`` also provides functions to calculate event weights, that are needed 28 | to translate a set of simulations to a physical flux for calculating sensitivity 29 | and expected event counts. 30 | 31 | Event selection with energy dependent cuts is also supported, 32 | but at the moment, only rudimentary functions to find optimal cuts are provided. 33 | 34 | 35 | Input formats 36 | ------------- 37 | 38 | ``pyirf`` does not rely on specific input file formats. 39 | All functions take ``numpy`` arrays, astropy quantities or astropy tables for the 40 | required data and also return the results as these objects. 41 | 42 | ``~pyirf.io`` provides functions to export the internal IRF representation 43 | to FITS files following the `Specification of the Data Formats for Gamma-Ray Astronomy`_ 44 | 45 | 46 | DL2 event lists 47 | ^^^^^^^^^^^^^^^ 48 | 49 | Most functions for calculating IRFs need DL2 event lists as input. 50 | We use ``~astropy.table.QTable`` instances for this. 51 | ``QTable`` are very similar to the standard ``~astropy.table.Table``, 52 | but offer better interoperability with ``astropy.units.Quantity``. 53 | 54 | We expect certain columns to be present in the tables with the appropriate units. 55 | To learn which functions need which columns to be present, have a look at the :ref:`_pyirf_api_docs` 56 | 57 | Most functions only need a small subgroup of these columns. 58 | 59 | .. table:: Column definitions for DL2 event lists 60 | 61 | +------------------------+--------+----------------------------------------------------+ 62 | | Column | Unit | Explanation | 63 | +========================+========+====================================================+ 64 | | true_energy | TeV | True energy of the simulated shower | 65 | +------------------------+--------+----------------------------------------------------+ 66 | | weight | | Event weight | 67 | +------------------------+--------+----------------------------------------------------+ 68 | | true_source_fov_offset | deg | Distance of the true origin to the FOV center | 69 | +------------------------+--------+----------------------------------------------------+ 70 | | reco_source_fov_offset | deg | Distance of the reco origin to the FOV center | 71 | +------------------------+--------+----------------------------------------------------+ 72 | | true_alt | deg | True altitude of the shower origin | 73 | +------------------------+--------+----------------------------------------------------+ 74 | | true_az | deg | True azimuth of the shower origin | 75 | +------------------------+--------+----------------------------------------------------+ 76 | | pointing_alt | deg | Altitude of the field of view center | 77 | +------------------------+--------+----------------------------------------------------+ 78 | | pointing_az | deg | Azimuth of the field of view center | 79 | +------------------------+--------+----------------------------------------------------+ 80 | | reco_energy | TeV | Reconstructed energy of the simulated shower | 81 | +------------------------+--------+----------------------------------------------------+ 82 | | reco_alt | deg | Reconstructed altitude of shower origin | 83 | +------------------------+--------+----------------------------------------------------+ 84 | | reco_az | deg | Reconstructed azimuth of shower origin | 85 | +------------------------+--------+----------------------------------------------------+ 86 | | gh_score | | Gamma/Hadron classification output | 87 | +------------------------+--------+----------------------------------------------------+ 88 | | multiplicity | | Number of telescopes used in the reconstruction | 89 | +------------------------+--------+----------------------------------------------------+ 90 | 91 | 92 | .. _Specification of the Data Formats for Gamma-Ray Astronomy: https://gamma-astro-data-formats.readthedocs.io 93 | -------------------------------------------------------------------------------- /docs/io/index.rst: -------------------------------------------------------------------------------- 1 | .. _io: 2 | 3 | Input / Output 4 | ============== 5 | 6 | Introduction 7 | ------------ 8 | 9 | This module contains functions to read input data and write IRFs in GADF format. 10 | 11 | Currently there is only support for reading EventDisplay DL2 FITS files, 12 | which were converted from the ROOT files by using `EventDisplay DL2 conversion scripts `_. 13 | 14 | 15 | Reference/API 16 | ------------- 17 | 18 | .. automodapi:: pyirf.io 19 | :no-inheritance-diagram: 20 | -------------------------------------------------------------------------------- /docs/irf/index.rst: -------------------------------------------------------------------------------- 1 | .. _irf: 2 | 3 | Instrument Response Functions 4 | ============================= 5 | 6 | 7 | Effective Area 8 | -------------- 9 | 10 | The collection area, which is proportional to the gamma-ray efficiency 11 | of detection, is computed as a function of the true energy. The events which 12 | are considered are the ones passing the threshold of the best cutoff plus 13 | the angular cuts. 14 | 15 | Energy Dispersion Matrix 16 | ------------------------ 17 | 18 | The energy dispersion matrix, ratio of the reconstructed energy over the true energy 19 | as a function of the true energy, is computed with the events passing the 20 | threshold of the best cutoff plus the angular cuts. 21 | 22 | The corresponding energy migration matrix can be build from the dispersion matrix. 23 | 24 | 25 | Point Spread Function 26 | --------------------- 27 | 28 | The PSF describes the probability of measuring a gamma ray 29 | of a given true energy and true position at a reconstructed position. 30 | 31 | Background rate 32 | --------------- 33 | 34 | The background rate is calculated as the number of background-like events per 35 | second, reconstructed energy and solid angle. 36 | The current version is computed in radially symmetric bins in the Field Of View. 37 | 38 | Reference/API 39 | ------------- 40 | 41 | .. automodapi:: pyirf.irf 42 | :no-inheritance-diagram: 43 | -------------------------------------------------------------------------------- /docs/notebooks/index.rst: -------------------------------------------------------------------------------- 1 | .. _notebooks: 2 | 3 | ================= 4 | Example Notebooks 5 | ================= 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | fact_example 11 | -------------------------------------------------------------------------------- /docs/sensitivity.rst: -------------------------------------------------------------------------------- 1 | .. _sensitivity: 2 | 3 | Sensitivity 4 | =========== 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.sensitivity 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/simulation.rst: -------------------------------------------------------------------------------- 1 | .. _simulation: 2 | 3 | Simulation Information 4 | ====================== 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.simulations 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/spectral.rst: -------------------------------------------------------------------------------- 1 | .. _spectral: 2 | 3 | Event Weighting and Spectrum Definitions 4 | ======================================== 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | 11 | .. automodapi:: pyirf.spectral 12 | :no-inheritance-diagram: 13 | :include-all-objects: 14 | -------------------------------------------------------------------------------- /docs/statistics.rst: -------------------------------------------------------------------------------- 1 | .. _statistics: 2 | 3 | Statistics 4 | ========== 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.statistics 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /docs/utils.rst: -------------------------------------------------------------------------------- 1 | .. _utils: 2 | 3 | Utility functions 4 | ================= 5 | 6 | 7 | Reference/API 8 | ------------- 9 | 10 | .. automodapi:: pyirf.utils 11 | :no-inheritance-diagram: 12 | -------------------------------------------------------------------------------- /download_irfs.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | import tarfile 3 | from io import BytesIO 4 | from pathlib import Path 5 | from zipfile import ZipFile 6 | 7 | import requests 8 | 9 | 10 | def download_irfs(): 11 | r = requests.get( 12 | "https://zenodo.org/record/5499840/files/cta-prod5-zenodo-fitsonly-v0.1.zip?download=1" 13 | ) 14 | r.raise_for_status() 15 | 16 | obstime = 50 * 3600 17 | 18 | tarball = ( 19 | "fits/CTA-Performance-prod5-v0.1-North-LSTSubArray-{zenith:d}deg.FITS.tar.gz" 20 | ) 21 | irf_file = "Prod5-North-{zenith:d}deg-AverageAz-4LSTs.{obstime}s-v0.1.fits.gz" 22 | 23 | output_dir = Path(__file__).parent / "irfs" 24 | output_dir.mkdir(exist_ok=True) 25 | 26 | for zenith in (20, 40, 60): 27 | with ZipFile(BytesIO(r.content)) as zipfile: 28 | with tarfile.open( 29 | fileobj=zipfile.open(tarball.format(zenith=zenith), mode="r") 30 | ) as tar: 31 | tar.extract( 32 | irf_file.format(zenith=zenith, obstime=obstime), path=output_dir 33 | ) 34 | 35 | 36 | if __name__ == "__main__": 37 | download_irfs() 38 | -------------------------------------------------------------------------------- /download_private_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | if [ -z "$DATA_PASSWORD" ]; then 6 | echo -n "Password: " 7 | read -s DATA_PASSWORD 8 | echo 9 | fi 10 | 11 | DATA_URL=${DATA_URL:-https://big-tank.app.tu-dortmund.de/pyirf-testdata/} 12 | 13 | wget \ 14 | -R "*.html*,*.gif" \ 15 | --no-host-directories --cut-dirs=1 \ 16 | --no-parent \ 17 | --user=pyirf \ 18 | --password="$DATA_PASSWORD" \ 19 | --no-verbose \ 20 | --recursive \ 21 | --directory-prefix=data \ 22 | "$DATA_URL" 23 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: pyirf 2 | 3 | channels: 4 | - conda-forge 5 | dependencies: 6 | - python=3.11 7 | - numpy 8 | - ipython 9 | - jupyter 10 | - matplotlib 11 | - scipy<1.12 12 | - astropy=5 13 | - setuptools 14 | - tqdm 15 | # tests 16 | - pytest 17 | - pytest-cov 18 | # docs 19 | - towncrier 20 | - numpydoc 21 | - pytables 22 | - nbsphinx 23 | - sphinx 24 | - sphinx_rtd_theme 25 | - sphinx-automodapi 26 | - uproot=4 27 | - awkward=1 28 | - gammapy=1 29 | - pip 30 | - pip: 31 | - ogadf_schema 32 | -------------------------------------------------------------------------------- /examples/plot_spectra.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import astropy.units as u 4 | from scipy.stats import norm 5 | 6 | from pyirf.spectral import ( 7 | DAMPE_P_He_SPECTRUM, 8 | IRFDOC_ELECTRON_SPECTRUM, 9 | IRFDOC_PROTON_SPECTRUM, 10 | PDG_ALL_PARTICLE, 11 | CRAB_HEGRA, 12 | CRAB_MAGIC_JHEAP2015, 13 | POINT_SOURCE_FLUX_UNIT, 14 | DIFFUSE_FLUX_UNIT, 15 | ) 16 | 17 | 18 | cr_spectra = { 19 | "PDG All Particle Spectrum": PDG_ALL_PARTICLE, 20 | "ATIC Proton Fit (from IRF Document)": IRFDOC_PROTON_SPECTRUM, 21 | "DAMPE p + He Table Interpolation": DAMPE_P_He_SPECTRUM 22 | } 23 | 24 | 25 | if __name__ == "__main__": 26 | 27 | energy = np.geomspace(0.001, 300, 1000) * u.TeV 28 | 29 | plt.figure(constrained_layout=True) 30 | plt.title("Crab Nebula Flux") 31 | plt.plot( 32 | energy.to_value(u.TeV), 33 | CRAB_HEGRA(energy).to_value(POINT_SOURCE_FLUX_UNIT), 34 | label="HEGRA", 35 | ) 36 | plt.plot( 37 | energy.to_value(u.TeV), 38 | CRAB_MAGIC_JHEAP2015(energy).to_value(POINT_SOURCE_FLUX_UNIT), 39 | label="MAGIC JHEAP 2015", 40 | ) 41 | 42 | plt.legend() 43 | plt.xscale("log") 44 | plt.yscale("log") 45 | plt.xlabel("E / TeV") 46 | plt.ylabel(f'Flux / ({POINT_SOURCE_FLUX_UNIT.to_string("latex")})') 47 | 48 | plt.figure(constrained_layout=True) 49 | plt.title("Cosmic Ray Flux") 50 | 51 | for label, spectrum in cr_spectra.items(): 52 | unit = energy.unit ** 2 * DIFFUSE_FLUX_UNIT 53 | plt.plot( 54 | energy.to_value(u.TeV), 55 | (spectrum(energy) * energy ** 2).to_value(unit), 56 | label=label, 57 | ) 58 | 59 | plt.legend() 60 | plt.xscale("log") 61 | plt.yscale("log") 62 | plt.xlabel(r"$E \,\,/\,\, \mathrm{TeV}$") 63 | plt.ylabel(rf'$E^{2} \cdot \Phi \,\,/\,\,$ ({unit.to_string("latex")})') 64 | 65 | energy = np.geomspace(0.006, 10, 1000) * u.TeV 66 | plt.figure(constrained_layout=True) 67 | plt.title("Electron Flux") 68 | 69 | unit = u.TeV ** 2 / u.m ** 2 / u.s / u.sr 70 | plt.plot( 71 | energy.to_value(u.TeV), 72 | (energy ** 3 * IRFDOC_ELECTRON_SPECTRUM(energy)).to_value(unit), 73 | label="IFAE 2013 (from IRF Document)", 74 | ) 75 | 76 | plt.legend() 77 | plt.xscale("log") 78 | plt.xlim(5e-3, 10) 79 | plt.ylim(1e-5, 0.25e-3) 80 | plt.xlabel(r"$E \,\,/\,\, \mathrm{TeV}$") 81 | plt.ylabel(rf'$E^3 \cdot \Phi \,\,/\,\,$ ({unit.to_string("latex")})') 82 | plt.grid() 83 | 84 | plt.show() 85 | -------------------------------------------------------------------------------- /pyirf/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | 3 | 4 | __all__ = [ 5 | "__version__", 6 | ] 7 | -------------------------------------------------------------------------------- /pyirf/_dev_version/__init__.py: -------------------------------------------------------------------------------- 1 | # Try to use setuptools_scm to get the current version; this is only used 2 | # in development installations from the git repository. 3 | # see ctapipe/version.py for details 4 | try: 5 | from setuptools_scm import get_version 6 | 7 | version = get_version(root="../..", relative_to=__file__) 8 | except Exception as e: 9 | raise ImportError(f"setuptools_scm broken or not installed: {e}") 10 | -------------------------------------------------------------------------------- /pyirf/benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | from .energy_bias_resolution import energy_bias_resolution, energy_bias_resolution_from_energy_dispersion 2 | from .angular_resolution import angular_resolution 3 | 4 | 5 | __all__ = [ 6 | "energy_bias_resolution", 7 | "energy_bias_resolution_from_energy_dispersion", 8 | "angular_resolution", 9 | ] 10 | -------------------------------------------------------------------------------- /pyirf/benchmarks/angular_resolution.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Sequence 2 | import numpy as np 3 | from astropy.table import QTable 4 | from scipy.stats import norm 5 | import astropy.units as u 6 | 7 | from ..binning import calculate_bin_indices 8 | 9 | 10 | ONE_SIGMA_QUANTILE = norm.cdf(1) - norm.cdf(-1) 11 | 12 | 13 | def angular_resolution( 14 | events, 15 | energy_bins, 16 | energy_type="true", 17 | quantile=ONE_SIGMA_QUANTILE, 18 | ): 19 | """ 20 | Calculate the angular resolution. 21 | 22 | This implementation corresponds to the 68% containment of the angular 23 | distance distribution. 24 | 25 | Passing a list of quantiles results in all the quantiles being calculated. 26 | 27 | Parameters 28 | ---------- 29 | events : astropy.table.QTable 30 | Astropy Table object containing the reconstructed events information. 31 | energy_bins: numpy.ndarray(dtype=float, ndim=1) 32 | Bin edges in energy. 33 | energy_type: str 34 | Either "true" or "reco" energy. 35 | Default is "true". 36 | quantile : list(float) 37 | Which quantile(s) to use for the angular resolution, 38 | by default, the containment of the 1-sigma region 39 | of the normal distribution (~68%) is used. 40 | 41 | Returns 42 | ------- 43 | result : astropy.table.QTable 44 | QTable containing the 68% containment of the angular 45 | distance distribution per each reconstructed energy bin. 46 | """ 47 | # create a table to make use of groupby operations 48 | energy_key = f"{energy_type}_energy" 49 | table = QTable(events[[energy_key, "theta"]]) 50 | 51 | bin_index, valid = calculate_bin_indices(table[energy_key], energy_bins) 52 | 53 | result = QTable() 54 | result[f"{energy_key}_low"] = energy_bins[:-1] 55 | result[f"{energy_key}_high"] = energy_bins[1:] 56 | result[f"{energy_key}_center"] = 0.5 * (energy_bins[:-1] + energy_bins[1:]) 57 | result["n_events"] = 0 58 | 59 | if not isinstance(quantile, Sequence): 60 | quantile = [quantile] 61 | 62 | keys = [f"angular_resolution_{value * 100:.0f}" for value in quantile] 63 | 64 | for key in keys: 65 | result[key] = u.Quantity(np.nan, table["theta"].unit) 66 | 67 | # if we get an empty input (no selected events available) 68 | # we return the table filled with NaNs 69 | if len(events) == 0: 70 | return result 71 | 72 | # use groupby operations to calculate the percentile in each bin 73 | by_bin = table[valid].group_by(bin_index[valid]) 74 | for bin_idx, group in zip(by_bin.groups.keys, by_bin.groups): 75 | result["n_events"][bin_idx] = len(group) 76 | quantile_values = np.nanquantile(group["theta"], quantile) 77 | for key, value in zip(keys, quantile_values): 78 | result[key][bin_idx] = value 79 | 80 | return result 81 | -------------------------------------------------------------------------------- /pyirf/benchmarks/energy_bias_resolution.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.stats import norm 3 | from astropy.table import QTable 4 | import astropy.units as u 5 | 6 | from ..binning import calculate_bin_indices, UNDERFLOW_INDEX, OVERFLOW_INDEX 7 | from ..compat import COPY_IF_NEEDED 8 | 9 | 10 | NORM_LOWER_SIGMA, NORM_UPPER_SIGMA = norm(0, 1).cdf([-1, 1]) 11 | ONE_SIGMA_COVERAGE = NORM_UPPER_SIGMA - NORM_LOWER_SIGMA 12 | MEDIAN = 0.5 13 | 14 | 15 | def energy_resolution_absolute_68(rel_error): 16 | """Calculate the energy resolution as the central 68% interval. 17 | 18 | Utility function for pyirf.benchmarks.energy_bias_resolution 19 | 20 | Parameters 21 | ---------- 22 | rel_error : numpy.ndarray(dtype=float, ndim=1) 23 | Array of float on which the quantile is calculated. 24 | 25 | Returns 26 | ------- 27 | resolution: numpy.ndarray(dtype=float, ndim=1) 28 | Array containing the 68% intervals 29 | """ 30 | return np.nanquantile(np.abs(rel_error), ONE_SIGMA_COVERAGE) 31 | 32 | 33 | def inter_quantile_distance(rel_error): 34 | """Calculate the energy resolution as the half of the 68% containment. 35 | 36 | Percentile equivalent of the standard deviation. 37 | Utility function for pyirf.benchmarks.energy_bias_resolution 38 | 39 | Parameters 40 | ---------- 41 | rel_error : numpy.ndarray(dtype=float, ndim=1) 42 | Array of float on which the quantile is calculated. 43 | 44 | Returns 45 | ------- 46 | resolution: numpy.ndarray(dtype=float, ndim=1) 47 | Array containing the resolution values. 48 | """ 49 | upper_sigma = np.nanquantile(rel_error, NORM_UPPER_SIGMA) 50 | lower_sigma = np.nanquantile(rel_error, NORM_LOWER_SIGMA) 51 | resolution = 0.5 * (upper_sigma - lower_sigma) 52 | return resolution 53 | 54 | 55 | def energy_bias_resolution( 56 | events, 57 | energy_bins, 58 | energy_type="true", 59 | bias_function=np.nanmedian, 60 | resolution_function=inter_quantile_distance, 61 | ): 62 | """ 63 | Calculate bias and energy resolution. 64 | 65 | Parameters 66 | ---------- 67 | events: astropy.table.QTable 68 | Astropy Table object containing the reconstructed events information. 69 | energy_bins: numpy.ndarray(dtype=float, ndim=1) 70 | Bin edges in energy. 71 | energy_type: str 72 | Either "true" or "reco" energy. 73 | Default is "true". 74 | bias_function: callable 75 | Function used to calculate the energy bias 76 | resolution_function: callable 77 | Function used to calculate the energy resolution 78 | 79 | Returns 80 | ------- 81 | result : astropy.table.QTable 82 | QTable containing the energy bias and resolution 83 | per each bin in true energy. 84 | """ 85 | 86 | # create a table to make use of groupby operations 87 | table = QTable(events[["true_energy", "reco_energy"]], copy=COPY_IF_NEEDED) 88 | table["rel_error"] = (events["reco_energy"] / events["true_energy"]).to_value( 89 | u.one 90 | ) - 1 91 | 92 | energy_key = f"{energy_type}_energy" 93 | 94 | result = QTable() 95 | result[f"{energy_key}_low"] = energy_bins[:-1] 96 | result[f"{energy_key}_high"] = energy_bins[1:] 97 | result[f"{energy_key}_center"] = 0.5 * (energy_bins[:-1] + energy_bins[1:]) 98 | 99 | result["n_events"] = 0 100 | result["bias"] = np.nan 101 | result["resolution"] = np.nan 102 | 103 | if not len(events): 104 | # if we get an empty input (no selected events available) 105 | # we return the table filled with NaNs 106 | return result 107 | 108 | # use groupby operations to calculate the percentile in each bin 109 | bin_index, valid = calculate_bin_indices(table[energy_key], energy_bins) 110 | by_bin = table.group_by(bin_index) 111 | 112 | # use groupby operations to calculate the percentile in each bin 113 | by_bin = table[valid].group_by(bin_index[valid]) 114 | for bin_idx, group in zip(by_bin.groups.keys, by_bin.groups): 115 | result["n_events"][bin_idx] = len(group) 116 | result["bias"][bin_idx] = bias_function(group["rel_error"]) 117 | result["resolution"][bin_idx] = resolution_function(group["rel_error"]) 118 | return result 119 | 120 | 121 | def energy_bias_resolution_from_energy_dispersion( 122 | energy_dispersion, 123 | migration_bins, 124 | ): 125 | """ 126 | Calculate bias and energy resolution. 127 | 128 | Parameters 129 | ---------- 130 | edisp: 131 | Energy dispersion matrix of shape 132 | (n_energy_bins, n_migra_bins, n_source_offset_bins) 133 | migration_bins: numpy.ndarray 134 | Bin edges for the relative energy migration (``reco_energy / true_energy``) 135 | """ 136 | 137 | bin_width = np.diff(migration_bins) 138 | cdf = np.cumsum(energy_dispersion * bin_width[np.newaxis, :, np.newaxis], axis=1) 139 | 140 | n_energy_bins, _, n_fov_bins = energy_dispersion.shape 141 | 142 | bias = np.full((n_energy_bins, n_fov_bins), np.nan) 143 | resolution = np.full((n_energy_bins, n_fov_bins), np.nan) 144 | 145 | for energy_bin in range(n_energy_bins): 146 | for fov_bin in range(n_fov_bins): 147 | if np.count_nonzero(cdf[energy_bin, :, fov_bin]) == 0: 148 | continue 149 | 150 | low, median, high = np.interp( 151 | [NORM_LOWER_SIGMA, MEDIAN, NORM_UPPER_SIGMA], 152 | cdf[energy_bin, :, fov_bin], 153 | migration_bins[1:], # cdf is defined at upper bin edge 154 | ) 155 | bias[energy_bin, fov_bin] = median - 1 156 | resolution[energy_bin, fov_bin] = 0.5 * (high - low) 157 | 158 | return bias, resolution 159 | -------------------------------------------------------------------------------- /pyirf/benchmarks/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cta-observatory/pyirf/fa65d16dae92afdeb7896ac0aea730c2c63b0098/pyirf/benchmarks/tests/__init__.py -------------------------------------------------------------------------------- /pyirf/benchmarks/tests/test_angular_resolution.py: -------------------------------------------------------------------------------- 1 | from astropy.table import QTable 2 | import astropy.units as u 3 | import numpy as np 4 | import pytest 5 | from scipy.stats import norm 6 | 7 | 8 | def test_empty_angular_resolution(): 9 | from pyirf.benchmarks import angular_resolution 10 | 11 | events = QTable( 12 | { 13 | "true_energy": [] * u.TeV, 14 | "theta": [] * u.deg, 15 | } 16 | ) 17 | 18 | table = angular_resolution(events, [1, 10, 100] * u.TeV) 19 | 20 | assert np.all(np.isnan(table["angular_resolution_68"])) 21 | 22 | 23 | @pytest.mark.parametrize("unit", (u.deg, u.rad)) 24 | def test_angular_resolution(unit): 25 | from pyirf.benchmarks import angular_resolution 26 | 27 | np.random.seed(1337) 28 | 29 | TRUE_RES_1 = 0.2 30 | TRUE_RES_2 = 0.05 31 | N = 10000 32 | 33 | true_resolution = np.append(np.full(N, TRUE_RES_1), np.full(N, TRUE_RES_2)) 34 | 35 | rng = np.random.default_rng(0) 36 | 37 | events = QTable( 38 | { 39 | "true_energy": np.concatenate( 40 | [ 41 | [0.5], # below bin 1 to test with underflow 42 | np.full(N - 1, 5.0), 43 | np.full(N - 1, 50.0), 44 | [500], # above bin 2 to test with overflow 45 | ] 46 | ) 47 | * u.TeV, 48 | "theta": np.abs(rng.normal(0, true_resolution)) * u.deg, 49 | } 50 | ) 51 | 52 | events["theta"] = events["theta"].to(unit) 53 | 54 | # add nans to test if nans are ignored 55 | events["true_energy"].value[N // 2] = np.nan 56 | events["true_energy"].value[(2 * N) // 2] = np.nan 57 | 58 | bins = [1, 10, 100] * u.TeV 59 | table = angular_resolution(events, bins) 60 | ang_res = table["angular_resolution_68"].to(u.deg) 61 | assert len(ang_res) == 2 62 | assert u.isclose(ang_res[0], TRUE_RES_1 * u.deg, rtol=0.05) 63 | assert u.isclose(ang_res[1], TRUE_RES_2 * u.deg, rtol=0.05) 64 | 65 | # one value in each bin is nan, which is ignored 66 | np.testing.assert_array_equal(table["n_events"], [9998, 9998]) 67 | 68 | # 2 sigma coverage interval 69 | quantile = norm(0, 1).cdf(2) - norm(0, 1).cdf(-2) 70 | table = angular_resolution(events, bins, quantile=quantile) 71 | ang_res = table["angular_resolution_95"].to(u.deg) 72 | assert len(ang_res) == 2 73 | assert u.isclose(ang_res[0], 2 * TRUE_RES_1 * u.deg, rtol=0.05) 74 | assert u.isclose(ang_res[1], 2 * TRUE_RES_2 * u.deg, rtol=0.05) 75 | 76 | # 25%, 50%, 90% coverage interval 77 | table = angular_resolution(events, bins, quantile=[0.25, 0.5, 0.9]) 78 | cov_25 = table["angular_resolution_25"].to(u.deg) 79 | assert len(cov_25) == 2 80 | assert u.isclose( 81 | cov_25[0], norm(0, TRUE_RES_1).interval(0.25)[1] * u.deg, rtol=0.05 82 | ) 83 | assert u.isclose( 84 | cov_25[1], norm(0, TRUE_RES_2).interval(0.25)[1] * u.deg, rtol=0.05 85 | ) 86 | 87 | cov_50 = table["angular_resolution_50"].to(u.deg) 88 | assert u.isclose(cov_50[0], norm(0, TRUE_RES_1).interval(0.5)[1] * u.deg, rtol=0.05) 89 | assert u.isclose(cov_50[1], norm(0, TRUE_RES_2).interval(0.5)[1] * u.deg, rtol=0.05) 90 | 91 | cov_90 = table["angular_resolution_90"].to(u.deg) 92 | assert u.isclose(cov_90[0], norm(0, TRUE_RES_1).interval(0.9)[1] * u.deg, rtol=0.05) 93 | assert u.isclose(cov_90[1], norm(0, TRUE_RES_2).interval(0.9)[1] * u.deg, rtol=0.05) 94 | -------------------------------------------------------------------------------- /pyirf/benchmarks/tests/test_bias_resolution.py: -------------------------------------------------------------------------------- 1 | from astropy.table import QTable 2 | import astropy.units as u 3 | import numpy as np 4 | from scipy.stats import norm 5 | from itertools import product 6 | 7 | 8 | def test_empty_bias_resolution(): 9 | from pyirf.benchmarks import energy_bias_resolution 10 | 11 | events = QTable({ 12 | 'true_energy': [] * u.TeV, 13 | 'reco_energy': [] * u.TeV, 14 | }) 15 | 16 | table = energy_bias_resolution( 17 | events, 18 | [1, 10, 100] * u.TeV 19 | ) 20 | 21 | assert np.all(np.isnan(table["bias"])) 22 | assert np.all(np.isnan(table["resolution"])) 23 | 24 | 25 | def test_absolute_68(): 26 | from pyirf.benchmarks.energy_bias_resolution import energy_resolution_absolute_68 27 | rng = np.random.default_rng(0) 28 | values = rng.normal(0, 1, 1000) 29 | assert np.isclose(energy_resolution_absolute_68(values), 1, rtol=0.01) 30 | 31 | 32 | def test_energy_bias_resolution(): 33 | from pyirf.benchmarks import energy_bias_resolution 34 | 35 | np.random.seed(1337) 36 | 37 | TRUE_RES_1 = 0.2 38 | TRUE_RES_2 = 0.05 39 | TRUE_BIAS_1 = 0.1 40 | TRUE_BIAS_2 = -0.05 41 | 42 | true_bias = np.append(np.full(1000, TRUE_BIAS_1), np.full(1000, TRUE_BIAS_2)) 43 | true_resolution = np.append(np.full(1000, TRUE_RES_1), np.full(1000, TRUE_RES_2)) 44 | 45 | true_energy = np.concatenate([ 46 | [0.5], # below bin 1 to test with underflow 47 | np.full(999, 5.0), 48 | np.full(999, 50.0), 49 | [500], # above bin 2 to test with overflow 50 | ]) * u.TeV 51 | reco_energy = true_energy * (1 + np.random.normal(true_bias, true_resolution)) 52 | 53 | events = QTable({ 54 | 'true_energy': true_energy, 55 | 'reco_energy': reco_energy, 56 | }) 57 | 58 | bias_resolution = energy_bias_resolution( 59 | events, 60 | [1, 10, 100] * u.TeV 61 | ) 62 | 63 | bias = bias_resolution['bias'].quantity 64 | resolution = bias_resolution['resolution'].quantity 65 | 66 | assert len(bias) == len(resolution) == 2 67 | 68 | assert u.isclose(bias[0], TRUE_BIAS_1, rtol=0.05) 69 | assert u.isclose(bias[1], TRUE_BIAS_2, rtol=0.05) 70 | assert u.isclose(resolution[0], TRUE_RES_1, rtol=0.05) 71 | assert u.isclose(resolution[1], TRUE_RES_2, rtol=0.05) 72 | 73 | 74 | def test_energy_bias_resolution(): 75 | from pyirf.benchmarks import energy_bias_resolution_from_energy_dispersion 76 | from pyirf.binning import bin_center 77 | 78 | # create a toy energy dispersion 79 | n_migra_bins = 500 80 | true_bias = np.array([ 81 | [0.5, 0.6], 82 | [0, 0.1], 83 | [-0.1, -0.2], 84 | ]) 85 | true_resolution = np.array([ 86 | [0.4, 0.5], 87 | [0.2, 0.3], 88 | [0.1, 0.15], 89 | ]) 90 | 91 | n_energy_bins, n_fov_bins = true_bias.shape 92 | energy_bins = np.geomspace(10, 1000, n_energy_bins + 1) 93 | energy_center = bin_center(energy_bins) 94 | migra_bins = np.geomspace(0.2, 5, n_migra_bins + 1) 95 | 96 | cdf = np.empty((n_energy_bins, n_migra_bins + 1, n_fov_bins)) 97 | for energy_bin, fov_bin in product(range(n_energy_bins), range(n_fov_bins)): 98 | 99 | energy = energy_center[energy_bin] 100 | mu = (1 + true_bias[energy_bin, fov_bin]) * energy 101 | sigma = true_resolution[energy_bin, fov_bin] * energy 102 | reco_energy = migra_bins * energy 103 | 104 | cdf[energy_bin, :, fov_bin] = norm.cdf(reco_energy, mu, sigma) 105 | 106 | edisp = cdf[:, 1:, :] - cdf[:, :-1, :] 107 | bin_width = np.diff(migra_bins) 108 | edisp /= bin_width[np.newaxis, :, np.newaxis] 109 | 110 | bias, resolution = energy_bias_resolution_from_energy_dispersion( 111 | edisp, 112 | migra_bins, 113 | ) 114 | 115 | assert np.allclose(bias, true_bias, atol=0.01) 116 | assert np.allclose(resolution, true_resolution, atol=0.01) 117 | 118 | with_empty = np.zeros((n_energy_bins + 1, n_migra_bins, n_fov_bins)) 119 | with_empty[1:, :, :] = edisp 120 | 121 | bias, resolution = energy_bias_resolution_from_energy_dispersion( 122 | with_empty, 123 | migra_bins, 124 | ) 125 | 126 | assert np.all(np.isnan(bias[0])) 127 | assert np.all(np.isnan(resolution[0])) 128 | assert np.allclose(bias[1:], true_bias, atol=0.01) 129 | assert np.allclose(resolution[1:], true_resolution, atol=0.01) 130 | -------------------------------------------------------------------------------- /pyirf/compat.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from packaging.version import Version 3 | 4 | 5 | # in numpy 1.x, copy=False allows copying if it cannot be avoided 6 | # in numpy 2.0, copy=False raises an error when the copy cannot be avoided 7 | # copy=None is a new option in numpy 2.0 for the previous behavior of copy=False 8 | COPY_IF_NEEDED = None 9 | if Version(np.__version__) < Version("2.0.0.dev"): 10 | COPY_IF_NEEDED = False 11 | -------------------------------------------------------------------------------- /pyirf/conftest.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import re 3 | 4 | import pytest 5 | from astropy.units import Quantity 6 | 7 | from gammapy.irf import load_irf_dict_from_file 8 | 9 | PROD5_IRF_PATH = pathlib.Path(__file__).parent.parent / "irfs/" 10 | 11 | 12 | @pytest.fixture(scope="session") 13 | def prod5_irfs(): 14 | if not PROD5_IRF_PATH.exists(): 15 | pytest.fail( 16 | "Test IRF files missing, you need to download them using " 17 | "`python download_irfs.py` in pyirfs root directory." 18 | ) 19 | 20 | # Get dict of {ZEN_PNT, IRF} pairs for each file in ./irfs 21 | irfs = { 22 | Quantity(re.search(r"\d{2}deg", str(irf_file)).group()): load_irf_dict_from_file( 23 | irf_file 24 | ) 25 | for irf_file in PROD5_IRF_PATH.glob("*.fits.gz") 26 | } 27 | 28 | assert len(irfs) == 3 29 | for key in ["aeff", "psf", "edisp"]: 30 | for irf in irfs.values(): 31 | assert key in irf 32 | 33 | # Sort dict by zenith angle 34 | return dict(sorted(irfs.items())) 35 | -------------------------------------------------------------------------------- /pyirf/coordinates.py: -------------------------------------------------------------------------------- 1 | import astropy.units as u 2 | from astropy.coordinates import SkyCoord, SkyOffsetFrame, angular_separation, position_angle 3 | 4 | __all__ = [ 5 | 'gadf_fov_coords_lon_lat', 6 | 'gadf_fov_coords_theta_phi', 7 | ] 8 | 9 | 10 | def gadf_fov_coords_lon_lat(lon, lat, pointing_lon, pointing_lat): 11 | """Transform sky coordinates to field-of-view longitude-latitude coordinates in accordance with 12 | the definitions laid out by the Gamma Astro Data Format. 13 | 14 | GADF documentation here: 15 | https://gamma-astro-data-formats.readthedocs.io/en/latest/general/coordinates.html 16 | 17 | Parameters 18 | ---------- 19 | lon, lat : `~astropy.units.Quantity` 20 | Sky coordinate to be transformed. 21 | pointing_lon, pointing_lat : `~astropy.units.Quantity` 22 | Coordinate specifying the pointing position. 23 | (i.e. the center of the field of view.) 24 | 25 | Returns 26 | ------- 27 | lon_t, lat_t : `~astropy.units.Quantity` 28 | Transformed field-of-view coordinate. 29 | """ 30 | # Create a frame that is centered on the pointing position 31 | center = SkyCoord(pointing_lon, pointing_lat) 32 | fov_frame = SkyOffsetFrame(origin=center) 33 | 34 | # Define coordinate to be transformed. 35 | target_sky = SkyCoord(lon, lat) 36 | 37 | # Transform into FoV-system 38 | target_fov = target_sky.transform_to(fov_frame) 39 | 40 | # Switch sign of longitude angle since this axis is 41 | # reversed in our definition of the FoV-system 42 | return -target_fov.lon, target_fov.lat 43 | 44 | 45 | def gadf_fov_coords_theta_phi(lon, lat, pointing_lon, pointing_lat): 46 | """Transform sky coordinates to field-of-view theta-phi coordinates in accordance with 47 | the definitions laid out by the Gamma Astro Data Format. 48 | 49 | GADF documentation here: 50 | https://gamma-astro-data-formats.readthedocs.io/en/latest/general/coordinates.html 51 | 52 | Parameters 53 | ---------- 54 | lon, lat : `~astropy.units.Quantity` 55 | Sky coordinate to be transformed. 56 | pointing_lon, pointing_lat : `~astropy.units.Quantity` 57 | Coordinate specifying the pointing position. 58 | (i.e. the center of the field of view.) 59 | 60 | Returns 61 | ------- 62 | theta, phi : `~astropy.units.Quantity` 63 | Transformed field-of-view coordinate. 64 | """ 65 | 66 | theta = angular_separation(pointing_lon, pointing_lat, lon, lat) 67 | 68 | # astropy defines the position angle as increasing towards east of north 69 | phi = position_angle(pointing_lon, pointing_lat, lon, lat) 70 | 71 | # GADF defines FOV PHI opposite to the position angle way so the sign is switched 72 | return theta.to(u.deg), (-phi).wrap_at(360 * u.deg).to(u.deg) -------------------------------------------------------------------------------- /pyirf/cuts.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from astropy.table import Table, QTable 3 | from scipy.ndimage import gaussian_filter1d 4 | import astropy.units as u 5 | 6 | from .binning import calculate_bin_indices, bin_center 7 | from .compat import COPY_IF_NEEDED 8 | 9 | __all__ = [ 10 | "calculate_percentile_cut", 11 | "evaluate_binned_cut", 12 | "evaluate_binned_cut_by_index", 13 | "compare_irf_cuts", 14 | ] 15 | 16 | 17 | def calculate_percentile_cut( 18 | values, 19 | bin_values, 20 | bins, 21 | fill_value, 22 | percentile=68, 23 | min_value=None, 24 | max_value=None, 25 | smoothing=None, 26 | min_events=10, 27 | ): 28 | """ 29 | Calculate cuts as the percentile of a given quantity in bins of another 30 | quantity. 31 | 32 | Parameters 33 | ---------- 34 | values: ``~numpy.ndarray`` or ``~astropy.units.Quantity`` 35 | The values for which the cut should be calculated 36 | bin_values: ``~numpy.ndarray`` or ``~astropy.units.Quantity`` 37 | The values used to sort the ``values`` into bins 38 | edges: ``~numpy.ndarray`` or ``~astropy.units.Quantity`` 39 | Bin edges 40 | fill_value: float or quantity 41 | Value for bins with less than ``min_events``, 42 | must have same unit as values 43 | percentile: float 44 | The percentile to calculate in each bin as a percentage, 45 | i.e. 0 <= percentile <= 100. 46 | min_value: float or quantity or None 47 | If given, cuts smaller than this value are replaced with ``min_value`` 48 | max_value: float or quantity or None 49 | If given, cuts larger than this value are replaced with ``max_value`` 50 | smoothing: float or None 51 | If given, apply a gaussian filter of width ``sigma`` in terms 52 | of bins. 53 | min_events: int 54 | Bins with less events than this number are replaced with ``fill_value`` 55 | """ 56 | # create a table to make use of groupby operations 57 | # we use a normal table here to avoid astropy/astropy#13840 58 | table = Table({"values": values}, copy=COPY_IF_NEEDED) 59 | unit = table["values"].unit 60 | 61 | # make sure units match 62 | if unit is not None: 63 | fill_value = u.Quantity(fill_value).to(unit) 64 | 65 | if min_value is not None: 66 | min_value = u.Quantity(min_value).to_value(unit) 67 | 68 | if max_value is not None: 69 | max_value = u.Quantity(max_value).to_value(unit) 70 | 71 | bin_index, valid = calculate_bin_indices(bin_values, bins) 72 | by_bin = table[valid].group_by(bin_index[valid]) 73 | 74 | n_bins = len(bins) - 1 75 | cut_table = QTable() 76 | cut_table["low"] = bins[:-1] 77 | cut_table["high"] = bins[1:] 78 | cut_table["center"] = bin_center(bins) 79 | cut_table["n_events"] = 0 80 | 81 | unit = None 82 | if hasattr(fill_value, 'unit'): 83 | unit = fill_value.unit 84 | fill_value = fill_value.value 85 | 86 | percentile = np.asanyarray(percentile) 87 | if percentile.shape == (): 88 | cut_table["cut"] = np.asanyarray(fill_value, values.dtype) 89 | else: 90 | cut_table["cut"] = np.full((n_bins, len(percentile)), fill_value, dtype=values.dtype) 91 | 92 | if unit is not None: 93 | cut_table["cut"].unit = unit 94 | 95 | for bin_idx, group in zip(by_bin.groups.keys, by_bin.groups): 96 | # replace bins with too few events with fill_value 97 | n_events = len(group) 98 | cut_table["n_events"][bin_idx] = n_events 99 | 100 | if n_events < min_events: 101 | cut_table["cut"].value[bin_idx] = fill_value 102 | else: 103 | value = np.nanpercentile(group["values"], percentile) 104 | if min_value is not None or max_value is not None: 105 | value = np.clip(value, min_value, max_value) 106 | 107 | cut_table["cut"].value[bin_idx] = value 108 | 109 | if smoothing is not None: 110 | cut_table["cut"].value[:] = gaussian_filter1d( 111 | cut_table["cut"].value, 112 | smoothing, 113 | mode="nearest", 114 | ) 115 | 116 | return cut_table 117 | 118 | 119 | def evaluate_binned_cut_by_index(values, bin_index, valid, cut_table, op): 120 | """ 121 | Evaluate a binned cut as defined in cut_table with pre-computed bin index. 122 | 123 | This is an optimization over evaluating `evaluate_binned_cut` 124 | multiple times with the same values to prevent re-computation of the index. 125 | 126 | 127 | Parameters 128 | ---------- 129 | values: ``~numpy.ndarray`` or ``~astropy.units.Quantity`` 130 | The values on which the cut should be evaluated 131 | bin_index: ``~numpy.ndarray`` 132 | The index into ``cut_table`` corresponding to the entries in ``values``. 133 | cut_table: ``~astropy.table.Table`` 134 | A table describing the binned cuts, e.g. as created by 135 | ``~pyirf.cuts.calculate_percentile_cut``. 136 | Required columns: 137 | - `low`: lower edges of the bins 138 | - `high`: upper edges of the bins, 139 | - `cut`: cut value 140 | op: callable(a, b) -> bool 141 | A function taking two arguments, comparing element-wise and 142 | returning an array of booleans. 143 | Must support vectorized application. 144 | """ 145 | if not isinstance(cut_table, QTable): 146 | raise ValueError('cut_table needs to be an astropy.table.QTable') 147 | 148 | result = np.zeros(len(bin_index), dtype=bool) 149 | result[valid] = op(values[valid], cut_table["cut"][bin_index[valid]]) 150 | return result 151 | 152 | 153 | def evaluate_binned_cut(values, bin_values, cut_table, op): 154 | """ 155 | Evaluate a binned cut as defined in cut_table on given events. 156 | 157 | Events with bin_values outside the bin edges defined in cut table 158 | will be set to False. 159 | 160 | Parameters 161 | ---------- 162 | values: ``~numpy.ndarray`` or ``~astropy.units.Quantity`` 163 | The values on which the cut should be evaluated 164 | bin_values: ``~numpy.ndarray`` or ``~astropy.units.Quantity`` 165 | The values used to sort the ``values`` into bins 166 | cut_table: ``~astropy.table.Table`` 167 | A table describing the binned cuts, e.g. as created by 168 | ``~pyirf.cuts.calculate_percentile_cut``. 169 | Required columns: 170 | - `low`: lower edges of the bins 171 | - `high`: upper edges of the bins, 172 | - `cut`: cut value 173 | op: callable(a, b) -> bool 174 | A function taking two arguments, comparing element-wise and 175 | returning an array of booleans. 176 | Must support vectorized application. 177 | 178 | 179 | Returns 180 | ------- 181 | result: np.ndarray[bool] 182 | A mask for each entry in ``values`` indicating if the event 183 | passes the bin specific cut given in cut table. 184 | """ 185 | if not isinstance(cut_table, QTable): 186 | raise ValueError("cut_table needs to be an astropy.table.QTable") 187 | 188 | bins = np.append(cut_table["low"], cut_table["high"][-1]) 189 | bin_index, valid = calculate_bin_indices(bin_values, bins) 190 | return evaluate_binned_cut_by_index(values, bin_index, valid, cut_table, op) 191 | 192 | 193 | def compare_irf_cuts(cuts): 194 | """ 195 | checks if the same cuts have been applied in all of them 196 | 197 | Parameters 198 | ---------- 199 | cuts: list of QTables 200 | list of cuts each entry in the list correspond to one set of IRFs 201 | Returns 202 | ------- 203 | match: Boolean 204 | if the cuts are the same in all the files 205 | """ 206 | for i in range(len(cuts) - 1): 207 | if (cuts[i] != cuts[i + 1]).any(): 208 | return False 209 | return True 210 | -------------------------------------------------------------------------------- /pyirf/exceptions.py: -------------------------------------------------------------------------------- 1 | class IRFException(Exception): 2 | pass 3 | 4 | 5 | class MissingColumns(IRFException): 6 | """ 7 | Exception to be raised when a table is missing a required column. 8 | """ 9 | 10 | def __init__(self, columns): 11 | super().__init__(f"Table is missing required columns {columns}") 12 | 13 | 14 | class WrongColumnUnit(IRFException): 15 | """ 16 | Exception to be raised when a column of a table has the wrong unit. 17 | """ 18 | 19 | def __init__(self, column, unit, expected): 20 | super().__init__( 21 | f'Unit {unit} of column "{column}"' 22 | f' has incompatible unit "{unit}", expected {expected}' 23 | f" required column {column}" 24 | ) 25 | -------------------------------------------------------------------------------- /pyirf/gammapy.py: -------------------------------------------------------------------------------- 1 | try: 2 | import gammapy 3 | except ImportError: 4 | raise ImportError('You need gammapy installed to use this module of pyirf') from None 5 | 6 | from gammapy.irf import EffectiveAreaTable2D, PSF3D, EnergyDispersion2D 7 | from gammapy.maps import MapAxis 8 | import astropy.units as u 9 | 10 | 11 | 12 | def _create_offset_axis(fov_offset_bins): 13 | return MapAxis.from_edges(fov_offset_bins, name="offset") 14 | 15 | def _create_energy_axis_true(true_energy_bins): 16 | return MapAxis.from_edges(true_energy_bins, name="energy_true") 17 | 18 | 19 | @u.quantity_input( 20 | effective_area=u.m ** 2, true_energy_bins=u.TeV, fov_offset_bins=u.deg 21 | ) 22 | def create_effective_area_table_2d( 23 | effective_area, 24 | true_energy_bins, 25 | fov_offset_bins, 26 | ): 27 | ''' 28 | Create a :py:class:`gammapy.irf.EffectiveAreaTable2D` from pyirf outputs. 29 | 30 | Parameters 31 | ---------- 32 | effective_area: astropy.units.Quantity[area] 33 | Effective area array, must have shape (n_energy_bins, n_fov_offset_bins) 34 | true_energy_bins: astropy.units.Quantity[energy] 35 | Bin edges in true energy 36 | fov_offset_bins: astropy.units.Quantity[angle] 37 | Bin edges in the field of view offset. 38 | For Point-Like IRFs, only giving a single bin is appropriate. 39 | 40 | Returns 41 | ------- 42 | gammapy.irf.EffectiveAreaTable2D 43 | aeff2d: gammapy.irf.EffectiveAreaTable2D 44 | ''' 45 | offset_axis = _create_offset_axis(fov_offset_bins) 46 | energy_axis_true = _create_energy_axis_true(true_energy_bins) 47 | 48 | return EffectiveAreaTable2D( 49 | axes = [energy_axis_true, 50 | offset_axis], 51 | data=effective_area, 52 | ) 53 | 54 | 55 | 56 | @u.quantity_input( 57 | psf=u.sr ** -1, 58 | true_energy_bins=u.TeV, 59 | source_offset_bins=u.deg, 60 | fov_offset_bins=u.deg, 61 | ) 62 | def create_psf_3d( 63 | psf, 64 | true_energy_bins, 65 | source_offset_bins, 66 | fov_offset_bins, 67 | ): 68 | """ 69 | Create a :py:class:`gammapy.irf.PSF3D` from pyirf outputs. 70 | 71 | Parameters 72 | ---------- 73 | psf: astropy.units.Quantity[(solid angle)^-1] 74 | Point spread function array, must have shape 75 | (n_energy_bins, n_fov_offset_bins, n_source_offset_bins) 76 | true_energy_bins: astropy.units.Quantity[energy] 77 | Bin edges in true energy 78 | source_offset_bins: astropy.units.Quantity[angle] 79 | Bin edges in the source offset. 80 | fov_offset_bins: astropy.units.Quantity[angle] 81 | Bin edges in the field of view offset. 82 | For Point-Like IRFs, only giving a single bin is appropriate. 83 | 84 | Returns 85 | ------- 86 | psf: gammapy.irf.PSF3D 87 | """ 88 | offset_axis = _create_offset_axis(fov_offset_bins) 89 | energy_axis_true = _create_energy_axis_true(true_energy_bins) 90 | rad_axis = MapAxis.from_edges(source_offset_bins, name='rad') 91 | 92 | return PSF3D( 93 | axes = [energy_axis_true, 94 | offset_axis, 95 | rad_axis], 96 | data = psf 97 | ) 98 | 99 | 100 | @u.quantity_input( 101 | true_energy_bins=u.TeV, fov_offset_bins=u.deg, 102 | ) 103 | def create_energy_dispersion_2d( 104 | energy_dispersion, 105 | true_energy_bins, 106 | migration_bins, 107 | fov_offset_bins, 108 | ): 109 | """ 110 | Create a :py:class:`gammapy.irf.EnergyDispersion2D` from pyirf outputs. 111 | 112 | Parameters 113 | ---------- 114 | energy_dispersion: numpy.ndarray 115 | Energy dispersion array, must have shape 116 | (n_energy_bins, n_migra_bins, n_source_offset_bins) 117 | true_energy_bins: astropy.units.Quantity[energy] 118 | Bin edges in true energy 119 | migration_bins: numpy.ndarray 120 | Bin edges for the relative energy migration (``reco_energy / true_energy``) 121 | fov_offset_bins: astropy.units.Quantity[angle] 122 | Bin edges in the field of view offset. 123 | For Point-Like IRFs, only giving a single bin is appropriate. 124 | 125 | Returns 126 | ------- 127 | edisp: gammapy.irf.EnergyDispersion2D 128 | """ 129 | offset_axis = _create_offset_axis(fov_offset_bins) 130 | energy_axis_true = _create_energy_axis_true(true_energy_bins) 131 | migra_axis = MapAxis.from_edges(migration_bins, name="migra") 132 | 133 | return EnergyDispersion2D( 134 | axes = [energy_axis_true, 135 | migra_axis, 136 | offset_axis], 137 | data = energy_dispersion, 138 | ) 139 | -------------------------------------------------------------------------------- /pyirf/interpolation/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Collection of interpolation and extrapolation methods 3 | """ 4 | 5 | from .base_extrapolators import ( 6 | BaseExtrapolator, 7 | DiscretePDFExtrapolator, 8 | ParametrizedExtrapolator, 9 | ) 10 | from .base_interpolators import ( 11 | BaseInterpolator, 12 | DiscretePDFInterpolator, 13 | ParametrizedInterpolator, 14 | PDFNormalization, 15 | ) 16 | from .component_estimators import ( 17 | BaseComponentEstimator, 18 | DiscretePDFComponentEstimator, 19 | EffectiveAreaEstimator, 20 | EnergyDispersionEstimator, 21 | ParametrizedComponentEstimator, 22 | PSFTableEstimator, 23 | RadMaxEstimator, 24 | ) 25 | from .griddata_interpolator import GridDataInterpolator 26 | from .moment_morph_interpolator import MomentMorphInterpolator 27 | from .nearest_neighbor_searcher import ( 28 | BaseNearestNeighborSearcher, 29 | DiscretePDFNearestNeighborSearcher, 30 | ParametrizedNearestNeighborSearcher, 31 | ) 32 | from .nearest_simplex_extrapolator import ( 33 | MomentMorphNearestSimplexExtrapolator, 34 | ParametrizedNearestSimplexExtrapolator, 35 | ) 36 | from .quantile_interpolator import QuantileInterpolator 37 | from .visible_edges_extrapolator import ParametrizedVisibleEdgesExtrapolator 38 | 39 | __all__ = [ 40 | "BaseComponentEstimator", 41 | "BaseInterpolator", 42 | "BaseNearestNeighborSearcher", 43 | "BaseExtrapolator", 44 | "PDFNormalization", 45 | "DiscretePDFExtrapolator", 46 | "ParametrizedExtrapolator", 47 | "DiscretePDFComponentEstimator", 48 | "DiscretePDFInterpolator", 49 | "DiscretePDFNearestNeighborSearcher", 50 | "GridDataInterpolator", 51 | "MomentMorphInterpolator", 52 | "MomentMorphNearestSimplexExtrapolator", 53 | "ParametrizedComponentEstimator", 54 | "ParametrizedInterpolator", 55 | "ParametrizedNearestNeighborSearcher", 56 | "ParametrizedNearestSimplexExtrapolator", 57 | "ParametrizedVisibleEdgesExtrapolator", 58 | "QuantileInterpolator", 59 | "EffectiveAreaEstimator", 60 | "RadMaxEstimator", 61 | "EnergyDispersionEstimator", 62 | "PSFTableEstimator", 63 | ] 64 | -------------------------------------------------------------------------------- /pyirf/interpolation/base_extrapolators.py: -------------------------------------------------------------------------------- 1 | """Base classes for extrapolators""" 2 | from abc import ABCMeta, abstractmethod 3 | 4 | import numpy as np 5 | from pyirf.binning import bin_center 6 | from pyirf.interpolation.base_interpolators import PDFNormalization 7 | 8 | __all__ = ["BaseExtrapolator", "ParametrizedExtrapolator", "DiscretePDFExtrapolator"] 9 | 10 | 11 | class BaseExtrapolator(metaclass=ABCMeta): 12 | """ 13 | Base class for all extrapolators, only knowing grid-points, 14 | providing a common __call__-interface. 15 | """ 16 | 17 | def __init__(self, grid_points): 18 | """BaseExtrapolator 19 | 20 | Parameters 21 | ---------- 22 | grid_points: np.ndarray, shape=(n_points, n_dims): 23 | Grid points at which templates exist 24 | 25 | """ 26 | self.grid_points = grid_points 27 | if self.grid_points.ndim == 1: 28 | self.grid_points = self.grid_points.reshape(*self.grid_points.shape, 1) 29 | self.N = self.grid_points.shape[0] 30 | self.grid_dim = self.grid_points.shape[1] 31 | 32 | @abstractmethod 33 | def extrapolate(self, target_point): 34 | """Overridable function for the actual extrapolation code""" 35 | 36 | def __call__(self, target_point): 37 | """Providing a common __call__ interface 38 | 39 | Parameters 40 | ---------- 41 | target_point: np.ndarray, shape=(1, n_dims) 42 | Target for extrapolation 43 | 44 | Returns 45 | ------- 46 | Extrapolated result. 47 | """ 48 | return self.extrapolate(target_point=target_point) 49 | 50 | 51 | class ParametrizedExtrapolator(BaseExtrapolator): 52 | """ 53 | Base class for all extrapolators used with IRF components that can be 54 | treated independently, e.g. parametrized ones like 3Gauss 55 | but also AEff. Derived from pyirf.interpolation.BaseExtrapolator 56 | """ 57 | 58 | def __init__(self, grid_points, params): 59 | """ParametrizedExtrapolator 60 | 61 | Parameters 62 | ---------- 63 | grid_points: np.ndarray, shape=(n_points, n_dims) 64 | Grid points at which templates exist 65 | params: np.ndarray, shape=(n_points, ..., n_params) 66 | Corresponding parameter values at each point in grid_points. 67 | First dimesion has to correspond to number of grid_points 68 | 69 | Note 70 | ---- 71 | Also calls pyirf.interpolation.BaseExtrapolators.__init__ 72 | """ 73 | super().__init__(grid_points) 74 | 75 | self.params = params 76 | 77 | if self.params.ndim == 1: 78 | self.params = self.params[..., np.newaxis] 79 | 80 | 81 | class DiscretePDFExtrapolator(BaseExtrapolator): 82 | """ 83 | Base class for all extrapolators used with binned IRF components like EDisp. 84 | Derived from pyirf.interpolation.BaseExtrapolator 85 | """ 86 | 87 | def __init__( 88 | self, grid_points, bin_edges, binned_pdf, normalization=PDFNormalization.AREA 89 | ): 90 | """DiscretePDFExtrapolator 91 | 92 | Parameters 93 | ---------- 94 | grid_points: np.ndarray, shape=(n_points, n_dims) 95 | Grid points at which templates exist 96 | bin_edges: np.ndarray, shape=(n_bins+1) 97 | Edges of the data binning 98 | binned_pdf: np.ndarray, shape=(n_points, ..., n_bins) 99 | Content of each bin in bin_edges for 100 | each point in grid_points. First dimesion has to correspond to number 101 | of grid_points, last dimension has to correspond to number of bins for 102 | the quantity that should be extrapolated (e.g. the Migra axis for EDisp) 103 | normalization: PDFNormalization 104 | How the PDF is normalized 105 | 106 | Note 107 | ---- 108 | Also calls pyirf.interpolation.BaseExtrapolators.__init__ 109 | """ 110 | super().__init__(grid_points) 111 | 112 | self.normalization = normalization 113 | self.bin_edges = bin_edges 114 | self.bin_mids = bin_center(self.bin_edges) 115 | self.binned_pdf = binned_pdf 116 | -------------------------------------------------------------------------------- /pyirf/interpolation/base_interpolators.py: -------------------------------------------------------------------------------- 1 | """Base classes for interpolators""" 2 | from abc import ABCMeta, abstractmethod 3 | import enum 4 | 5 | import numpy as np 6 | 7 | from ..binning import bin_center 8 | 9 | __all__ = [ 10 | "BaseInterpolator", 11 | "ParametrizedInterpolator", 12 | "DiscretePDFInterpolator", 13 | "PDFNormalization", 14 | ] 15 | 16 | 17 | class PDFNormalization(enum.Enum): 18 | """How a discrete PDF is normalized""" 19 | 20 | #: PDF is normalized to a "normal" area integral of 1 21 | AREA = enum.auto() 22 | #: PDF is normalized to 1 over the solid angle integral where the bin 23 | #: edges represent the opening angles of cones in radian. 24 | CONE_SOLID_ANGLE = enum.auto() 25 | 26 | 27 | class BaseInterpolator(metaclass=ABCMeta): 28 | """ 29 | Base class for all interpolators, only knowing grid-points, 30 | providing a common __call__-interface. 31 | """ 32 | 33 | def __init__(self, grid_points): 34 | """BaseInterpolator 35 | 36 | Parameters 37 | ---------- 38 | grid_points: np.ndarray, shape=(n_points, n_dims): 39 | Grid points at which interpolation templates exist 40 | 41 | """ 42 | self.grid_points = grid_points 43 | if self.grid_points.ndim == 1: 44 | self.grid_points = self.grid_points.reshape(*self.grid_points.shape, 1) 45 | self.n_points = self.grid_points.shape[0] 46 | self.grid_dim = self.grid_points.shape[1] 47 | 48 | @abstractmethod 49 | def interpolate(self, target_point): 50 | """Overridable function for the actual interpolation code""" 51 | 52 | def __call__(self, target_point): 53 | """Providing a common __call__ interface 54 | 55 | Parameters 56 | ---------- 57 | target_point: np.ndarray, shape=(1, n_dims) 58 | Target for inter-/extrapolation 59 | When target_point is outside of the grids convex hull but extrapolator is None 60 | 61 | Returns 62 | ------- 63 | Interpolated result. 64 | """ 65 | return self.interpolate(target_point=target_point) 66 | 67 | 68 | class ParametrizedInterpolator(BaseInterpolator): 69 | """ 70 | Base class for all interpolators used with IRF components that can be 71 | independently interpolated, e.g. parametrized ones like 3Gauss 72 | but also AEff. Derived from pyirf.interpolation.BaseInterpolator 73 | """ 74 | 75 | def __init__(self, grid_points, params): 76 | """ParametrizedInterpolator 77 | 78 | Parameters 79 | ---------- 80 | grid_points, np.ndarray, shape=(n_points, n_dims) 81 | Grid points at which interpolation templates exist 82 | params: np.ndarray, shape=(n_points, ..., n_params) 83 | Corresponding parameter values at each point in grid_points. 84 | First dimesion has to correspond to number of grid_points 85 | 86 | Note 87 | ---- 88 | Also calls pyirf.interpolation.BaseInterpolators.__init__ 89 | """ 90 | super().__init__(grid_points) 91 | 92 | self.params = params 93 | 94 | if self.params.ndim == 1: 95 | self.params = self.params[..., np.newaxis] 96 | 97 | 98 | class DiscretePDFInterpolator(BaseInterpolator): 99 | """ 100 | Base class for all interpolators used with binned IRF components like EDisp. 101 | Derived from pyirf.interpolation.BaseInterpolator 102 | """ 103 | 104 | def __init__( 105 | self, grid_points, bin_edges, binned_pdf, normalization=PDFNormalization.AREA 106 | ): 107 | """DiscretePDFInterpolator 108 | 109 | Parameters 110 | ---------- 111 | grid_points : np.ndarray, shape=(n_points, n_dims) 112 | Grid points at which interpolation templates exist 113 | bin_edges : np.ndarray, shape=(n_bins+1) 114 | Edges of the data binning 115 | binned_pdf : np.ndarray, shape=(n_points, ..., n_bins) 116 | Content of each bin in bin_edges for 117 | each point in grid_points. First dimesion has to correspond to number 118 | of grid_points, last dimension has to correspond to number of bins for 119 | the quantity that should be interpolated (e.g. the Migra axis for EDisp) 120 | normalization : PDFNormalization 121 | How the PDF is normalized 122 | 123 | Note 124 | ---- 125 | Also calls pyirf.interpolation.BaseInterpolators.__init__ 126 | """ 127 | super().__init__(grid_points) 128 | 129 | self.bin_edges = bin_edges 130 | self.bin_mids = bin_center(self.bin_edges) 131 | self.binned_pdf = binned_pdf 132 | self.normalization = normalization 133 | -------------------------------------------------------------------------------- /pyirf/interpolation/griddata_interpolator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple wrapper around scipy.interpolate.griddata to interpolate parametrized quantities 3 | """ 4 | from scipy.interpolate import griddata 5 | 6 | from .base_interpolators import ParametrizedInterpolator 7 | 8 | __all__ = ["GridDataInterpolator"] 9 | 10 | 11 | class GridDataInterpolator(ParametrizedInterpolator): 12 | """ "Wrapper arounf scipy.interpolate.griddata.""" 13 | 14 | def __init__(self, grid_points, params, **griddata_kwargs): 15 | """Parametrized Interpolator using scipy.interpolate.griddata 16 | 17 | Parameters 18 | ---------- 19 | grid_points: np.ndarray, shape=(n_points, n_dims) 20 | Grid points at which interpolation templates exist 21 | params: np.ndarray, shape=(n_points, ..., n_params) 22 | Structured array of corresponding parameter values at each 23 | point in grid_points. 24 | First dimesion has to correspond to number of grid_points 25 | griddata_kwargs: dict 26 | Keyword-Arguments passed to scipy.griddata [1], e.g. 27 | interpolation method. Defaults to None, which uses scipy's 28 | defaults 29 | 30 | Raises 31 | ------ 32 | TypeError: 33 | When params is not a np.ndarray 34 | ValueError: 35 | When number of points grid_points and params is not matching 36 | 37 | References 38 | ---------- 39 | .. [1] Scipy Documentation, scipy.interpolate.griddata 40 | https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html 41 | 42 | """ 43 | super().__init__(grid_points, params) 44 | 45 | self.griddata_kwargs = griddata_kwargs 46 | 47 | def interpolate(self, target_point): 48 | """ 49 | Wrapper around scipy.interpolate.griddata [1] 50 | 51 | Parameters 52 | ---------- 53 | target_point: np.ndarray, shape=(1, n_dims) 54 | Target point for interpolation 55 | 56 | Returns 57 | ------- 58 | interpolant: np.ndarray, shape=(1, ..., n_params) 59 | Interpolated parameter values 60 | 61 | References 62 | ---------- 63 | .. [1] Scipy Documentation, scipy.interpolate.griddata 64 | https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html 65 | """ 66 | interpolant = griddata( 67 | self.grid_points, self.params, target_point, **self.griddata_kwargs 68 | ).squeeze() 69 | 70 | return interpolant.reshape(1, *self.params.shape[1:]) 71 | -------------------------------------------------------------------------------- /pyirf/interpolation/nearest_neighbor_searcher.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .base_extrapolators import DiscretePDFExtrapolator, ParametrizedExtrapolator 4 | from .base_interpolators import ( 5 | BaseInterpolator, 6 | DiscretePDFInterpolator, 7 | ParametrizedInterpolator, 8 | ) 9 | 10 | __all__ = [ 11 | "BaseNearestNeighborSearcher", 12 | "DiscretePDFNearestNeighborSearcher", 13 | "ParametrizedNearestNeighborSearcher", 14 | ] 15 | 16 | 17 | class BaseNearestNeighborSearcher(BaseInterpolator): 18 | """ 19 | Dummy NearestNeighbor approach usable instead of 20 | actual Interpolation/Extrapolation 21 | """ 22 | 23 | def __init__(self, grid_points, values, norm_ord=2): 24 | """ 25 | BaseNearestNeighborSearcher 26 | 27 | Parameters 28 | ---------- 29 | grid_points: np.ndarray, shape=(n_points, n_dims) 30 | Grid points at which templates exist 31 | values: np.ndarray, shape=(n_points, ...) 32 | Corresponding IRF values at grid_points 33 | norm_ord: non-zero int 34 | Order of the norm which is used to compute the distances, 35 | passed to numpy.linalg.norm [1]. Defaults to 2, 36 | which uses the euclidean norm. 37 | 38 | Raises 39 | ------ 40 | TypeError: 41 | If norm_ord is not non-zero integer 42 | 43 | Notes 44 | ----- 45 | Also calls pyirf.interpolation.BaseInterpolators.__init__ 46 | """ 47 | super().__init__(grid_points) 48 | 49 | self.values = values 50 | 51 | # Test wether norm_ord is a number 52 | try: 53 | norm_ord > 0 54 | except TypeError: 55 | raise ValueError( 56 | f"Only positiv integers allowed for norm_ord, got {norm_ord}." 57 | ) 58 | 59 | # Test wether norm_ord is a finite, positive integer 60 | if (norm_ord <= 0) or ~np.isfinite(norm_ord) or (norm_ord != int(norm_ord)): 61 | raise ValueError( 62 | f"Only positiv integers allowed for norm_ord, got {norm_ord}." 63 | ) 64 | 65 | self.norm_ord = norm_ord 66 | 67 | def interpolate(self, target_point): 68 | """ 69 | Takes a grid of IRF values for a bunch of different parameters and returns 70 | the values at the nearest grid point as seen from the target point. 71 | 72 | Parameters 73 | ---------- 74 | target_point: numpy.ndarray, shape=(1, n_dims) 75 | Value for which the nearest neighbor should be found (target point) 76 | 77 | Returns 78 | ------- 79 | content_new: numpy.ndarray, shape=(1, ...) 80 | values at nearest neighbor 81 | 82 | Notes 83 | ----- 84 | In case of multiple nearest neighbors, the values corresponding 85 | to the first one are returned. 86 | """ 87 | 88 | if target_point.ndim == 1: 89 | target_point = target_point.reshape(1, *target_point.shape) 90 | 91 | distances = np.linalg.norm( 92 | self.grid_points - target_point, ord=self.norm_ord, axis=1 93 | ) 94 | 95 | index = np.argmin(distances) 96 | 97 | return self.values[index, :] 98 | 99 | 100 | class DiscretePDFNearestNeighborSearcher(BaseNearestNeighborSearcher): 101 | """ 102 | Dummy NearestNeighbor approach usable instead of 103 | actual interpolation/extrapolation. 104 | Compatible with discretized PDF IRF component API. 105 | """ 106 | 107 | def __init__(self, grid_points, bin_edges, binned_pdf, norm_ord=2): 108 | """ 109 | NearestNeighborSearcher compatible with discretized PDF IRF components API 110 | 111 | Parameters 112 | ---------- 113 | grid_points: np.ndarray, shape=(n_points, n_dims) 114 | Grid points at which templates exist 115 | bin_edges: np.ndarray, shape=(n_bins+1) 116 | Edges of the data binning. Ignored for nearest neighbor searching. 117 | binned_pdf: np.ndarray, shape=(n_points, ..., n_bins) 118 | Content of each bin in bin_edges for 119 | each point in grid_points. First dimesion has to correspond to number 120 | of grid_points, last dimension has to correspond to number of bins for 121 | the quantity that should be interpolated (e.g. the Migra axis for EDisp) 122 | norm_ord: non-zero int 123 | Order of the norm which is used to compute the distances, 124 | passed to numpy.linalg.norm [1]. Defaults to 2, 125 | which uses the euclidean norm. 126 | 127 | Notes 128 | ----- 129 | Also calls pyirf.interpolation.BaseNearestNeighborSearcher.__init__ 130 | """ 131 | 132 | super().__init__(grid_points=grid_points, values=binned_pdf, norm_ord=norm_ord) 133 | 134 | 135 | DiscretePDFInterpolator.register(DiscretePDFNearestNeighborSearcher) 136 | DiscretePDFExtrapolator.register(DiscretePDFNearestNeighborSearcher) 137 | 138 | 139 | class ParametrizedNearestNeighborSearcher(BaseNearestNeighborSearcher): 140 | """ 141 | Dummy NearestNeighbor approach usable instead of 142 | actual interpolation/extrapolation 143 | Compatible with parametrized IRF component API. 144 | """ 145 | 146 | def __init__(self, grid_points, params, norm_ord=2): 147 | """ 148 | NearestNeighborSearcher compatible with parametrized IRF components API 149 | 150 | Parameters 151 | ---------- 152 | grid_points: np.ndarray, shape=(n_points, n_dims) 153 | Grid points at which templates exist 154 | params: np.ndarray, shape=(n_points, ..., n_params) 155 | Corresponding parameter values at each point in grid_points. 156 | First dimesion has to correspond to number of grid_points 157 | norm_ord: non-zero int 158 | Order of the norm which is used to compute the distances, 159 | passed to numpy.linalg.norm [1]. Defaults to 2, 160 | which uses the euclidean norm. 161 | 162 | Notes 163 | ---- 164 | Also calls pyirf.interpolation.BaseNearestNeighborSearcher.__init__ 165 | """ 166 | 167 | super().__init__(grid_points=grid_points, values=params, norm_ord=norm_ord) 168 | 169 | 170 | ParametrizedInterpolator.register(ParametrizedNearestNeighborSearcher) 171 | ParametrizedExtrapolator.register(ParametrizedNearestNeighborSearcher) 172 | -------------------------------------------------------------------------------- /pyirf/interpolation/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cta-observatory/pyirf/fa65d16dae92afdeb7896ac0aea730c2c63b0098/pyirf/interpolation/tests/__init__.py -------------------------------------------------------------------------------- /pyirf/interpolation/tests/test_base_extrapolators.py: -------------------------------------------------------------------------------- 1 | """Tests for base extrapolator classes""" 2 | import numpy as np 3 | import pytest 4 | 5 | 6 | def test_BaseExtrapolator_instantiation(): 7 | """Test ParametrizedExtrapolator initialization""" 8 | from pyirf.interpolation.base_extrapolators import BaseExtrapolator 9 | 10 | grid_points1D = np.array([1, 2, 3]) 11 | target1D = np.array([[0]]) 12 | 13 | grid_points2D = np.array([[1, 1], [2, 1], [1.5, 1.5]]) 14 | target2D = np.array([[0.25, 0.25]]) 15 | 16 | with pytest.raises(TypeError): # Abstract class, cannot be instantiated 17 | BaseExtrapolator(grid_points1D) 18 | 19 | class DummyBaseExtrapolator(BaseExtrapolator): 20 | def extrapolate(self, target_point): 21 | return 42 22 | 23 | interp1D = DummyBaseExtrapolator(grid_points1D) 24 | assert interp1D(target1D) == 42 25 | 26 | interp2D = DummyBaseExtrapolator(grid_points2D) 27 | assert interp2D(target2D) == 42 28 | 29 | 30 | def test_ParametrizedExtrapolator_instantiation(): 31 | """Test ParametrizedExtrapolator initialization""" 32 | from pyirf.interpolation.base_extrapolators import ParametrizedExtrapolator 33 | 34 | grid_points1D = np.array([1, 2, 3]) 35 | grid_points2D = np.array([[1, 1], [2, 1], [1.5, 1.5]]) 36 | target1D = np.array([[0]]) 37 | target2D = np.array([[0.25, 0.25]]) 38 | 39 | params = np.array([[1], [2], [3]]) 40 | 41 | with pytest.raises(TypeError): # Abstract class, cannot be instantiated 42 | ParametrizedExtrapolator(grid_points1D, params) 43 | 44 | class DummyParametrizedExtrapolator(ParametrizedExtrapolator): 45 | def extrapolate(self, target_point): 46 | return 42 47 | 48 | interp1D = DummyParametrizedExtrapolator(grid_points1D, params) 49 | assert interp1D(target1D) == 42 50 | 51 | interp2D = DummyParametrizedExtrapolator(grid_points2D, params) 52 | assert interp2D(target2D) == 42 53 | 54 | # If only one param per point exists and param.shape is not (n_points, 1) 55 | # they should be broadcasted internally 56 | interp1D = DummyParametrizedExtrapolator(grid_points1D, params.squeeze()) 57 | assert interp1D(target1D) == 42 58 | 59 | 60 | def test_DiscretePDFExtrapolator_instantiation(): 61 | """Test DiscretePDFExtrapolator initialization and sanity checks""" 62 | from pyirf.interpolation.base_extrapolators import DiscretePDFExtrapolator 63 | 64 | grid_points1D = np.array([1, 2, 3]) 65 | grid_points2D = np.array([[1, 1], [2, 1], [1.5, 1.5]]) 66 | target1D = np.array([[0]]) 67 | target2D = np.array([[0.25, 0.25]]) 68 | 69 | bin_edges = np.linspace(-1, 1, 11) 70 | binned_pdf = np.ones(shape=(len(grid_points1D), len(bin_edges) - 1)) 71 | 72 | with pytest.raises(TypeError): # Abstract class, cannot be instantiated 73 | DiscretePDFExtrapolator(grid_points1D, bin_edges, binned_pdf) 74 | 75 | class DummyBinnedExtrapolator(DiscretePDFExtrapolator): 76 | def extrapolate(self, target_point): 77 | return 42 78 | 79 | interp1D = DummyBinnedExtrapolator(grid_points1D, bin_edges, binned_pdf) 80 | assert interp1D(target1D) == 42 81 | 82 | interp2D = DummyBinnedExtrapolator(grid_points2D, bin_edges, binned_pdf) 83 | assert interp2D(target2D) == 42 84 | 85 | 86 | def test_virtual_subclasses(): 87 | """Tests that corresponding nearest neighbor seacher are virtual sublasses of extrapolators""" 88 | from pyirf.interpolation import ( 89 | DiscretePDFExtrapolator, 90 | DiscretePDFNearestNeighborSearcher, 91 | ParametrizedExtrapolator, 92 | ParametrizedNearestNeighborSearcher, 93 | ) 94 | 95 | assert issubclass(DiscretePDFNearestNeighborSearcher, DiscretePDFExtrapolator) 96 | assert issubclass(ParametrizedNearestNeighborSearcher, ParametrizedExtrapolator) 97 | assert not issubclass(ParametrizedNearestNeighborSearcher, DiscretePDFExtrapolator) 98 | assert not issubclass(DiscretePDFNearestNeighborSearcher, ParametrizedExtrapolator) 99 | -------------------------------------------------------------------------------- /pyirf/interpolation/tests/test_base_interpolators.py: -------------------------------------------------------------------------------- 1 | """Tests for base interpolator classes""" 2 | import numpy as np 3 | import pytest 4 | 5 | 6 | def test_BaseInterpolator_instantiation(): 7 | """Test ParametrizedInterpolator initialization""" 8 | from pyirf.interpolation.base_interpolators import BaseInterpolator 9 | 10 | grid_points1D = np.array([1, 2, 3]) 11 | target1D = np.array([[1]]) 12 | 13 | grid_points2D = np.array([[1, 1], [2, 1], [1.5, 1.5]]) 14 | target2D = np.array([[1.25, 1.25]]) 15 | 16 | with pytest.raises(TypeError): # Abstract class, cannot be instantiated 17 | BaseInterpolator(grid_points1D) 18 | 19 | class DummyBaseInterpolator(BaseInterpolator): 20 | def interpolate(self, target_point, **kwargs): 21 | return 42 22 | 23 | interp1D = DummyBaseInterpolator(grid_points1D) 24 | assert interp1D(target1D) == 42 25 | 26 | interp2D = DummyBaseInterpolator(grid_points2D) 27 | assert interp2D(target2D) == 42 28 | 29 | 30 | def test_ParametrizedInterpolator_instantiation(): 31 | """Test ParametrizedInterpolator initialization""" 32 | from pyirf.interpolation.base_interpolators import ParametrizedInterpolator 33 | 34 | grid_points1D = np.array([1, 2, 3]) 35 | grid_points2D = np.array([[1, 1], [2, 1], [1.5, 1.5]]) 36 | target1D = np.array([[1]]) 37 | target2D = np.array([[1.25, 1.25]]) 38 | 39 | params = np.array([[1], [2], [3]]) 40 | 41 | with pytest.raises(TypeError): # Abstract class, cannot be instantiated 42 | ParametrizedInterpolator(grid_points1D, params) 43 | 44 | class DummyParametrizedInterpolator(ParametrizedInterpolator): 45 | def interpolate(self, target_point, **kwargs): 46 | return 42 47 | 48 | interp1D = DummyParametrizedInterpolator(grid_points1D, params) 49 | assert interp1D(target1D) == 42 50 | 51 | interp2D = DummyParametrizedInterpolator(grid_points2D, params) 52 | assert interp2D(target2D) == 42 53 | 54 | 55 | def test_DiscretePDFInterpolator_instantiation(): 56 | """Test DiscretePDFInterpolator initialization and sanity checks""" 57 | from pyirf.interpolation.base_interpolators import DiscretePDFInterpolator 58 | 59 | grid_points1D = np.array([1, 2, 3]) 60 | grid_points2D = np.array([[1, 1], [2, 1], [1.5, 1.5]]) 61 | target1D = np.array([[1]]) 62 | target2D = np.array([[1.25, 1.25]]) 63 | 64 | bin_edges = np.linspace(-1, 1, 11) 65 | binned_pdf = np.ones(shape=(len(grid_points1D), len(bin_edges) - 1)) 66 | 67 | with pytest.raises(TypeError): # Abstract class, cannot be instantiated 68 | DiscretePDFInterpolator(grid_points1D, bin_edges, binned_pdf) 69 | 70 | class DummyBinnedInterpolator(DiscretePDFInterpolator): 71 | def interpolate(self, target_point, **kwargs): 72 | return 42 73 | 74 | interp1D = DummyBinnedInterpolator(grid_points1D, bin_edges, binned_pdf) 75 | assert interp1D(target1D) == 42 76 | 77 | interp2D = DummyBinnedInterpolator(grid_points2D, bin_edges, binned_pdf) 78 | assert interp2D(target2D) == 42 79 | 80 | 81 | def test_virtual_subclasses(): 82 | """Tests that corresponding nearest neighbor seacher are virtual sublasses of interpolators""" 83 | from pyirf.interpolation import ( 84 | DiscretePDFInterpolator, 85 | DiscretePDFNearestNeighborSearcher, 86 | ParametrizedInterpolator, 87 | ParametrizedNearestNeighborSearcher, 88 | ) 89 | 90 | assert issubclass(DiscretePDFNearestNeighborSearcher, DiscretePDFInterpolator) 91 | assert issubclass(ParametrizedNearestNeighborSearcher, ParametrizedInterpolator) 92 | assert not issubclass(ParametrizedNearestNeighborSearcher, DiscretePDFInterpolator) 93 | assert not issubclass(DiscretePDFNearestNeighborSearcher, ParametrizedInterpolator) 94 | -------------------------------------------------------------------------------- /pyirf/interpolation/tests/test_griddata_interpolator.py: -------------------------------------------------------------------------------- 1 | """Tests for GridDataInterpolator""" 2 | import numpy as np 3 | 4 | 5 | def test_GridDataInterpolator_1DGrid(): 6 | """Test GridDataInterpolator on 1D Grid""" 7 | from pyirf.interpolation import GridDataInterpolator 8 | 9 | grid_points = np.array([[0], [1]]) 10 | target_point = np.array([[0.5]]) 11 | 12 | dummy_data1 = np.array([[[0, 1], [1, 1]], [[0, 2], [2, 3]], [[0, 3], [3, 5]]]) 13 | dummy_data2 = np.array([[[0, 2], [2, 2]], [[0, 4], [4, 6]], [[0, 6], [6, 10]]]) 14 | 15 | dummy_data = np.array([dummy_data1, dummy_data2]) 16 | 17 | interpolator = GridDataInterpolator( 18 | grid_points=grid_points, params=dummy_data, method="linear" 19 | ) 20 | interpolant = interpolator(target_point) 21 | 22 | dummy_data_target = 1.5 * dummy_data1 23 | 24 | assert np.allclose(interpolant, dummy_data_target) 25 | assert interpolant.shape == (1, *dummy_data.shape[1:]) 26 | 27 | 28 | def test_GridDataInterpolator_2DGrid(): 29 | """Test GridDataInterpolator on 2D Grid""" 30 | from pyirf.interpolation import GridDataInterpolator 31 | 32 | grid_points = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) 33 | target_point = np.array([[0.5, 0.5]]) 34 | 35 | dummy_data1 = np.array([[[0, 1], [1, 1]], [[0, 2], [2, 3]], [[0, 3], [3, 5]]]) 36 | dummy_data2 = np.array([[[0, 2], [1, 2]], [[0, 4], [2, 6]], [[0, 6], [3, 10]]]) 37 | dummy_data3 = np.array([[[0, 1], [2, 1]], [[0, 2], [4, 3]], [[0, 3], [6, 5]]]) 38 | dummy_data4 = np.array([[[0, 2], [2, 2]], [[0, 4], [4, 6]], [[0, 6], [6, 10]]]) 39 | 40 | dummy_data = np.array([dummy_data1, dummy_data2, dummy_data3, dummy_data4]) 41 | 42 | interpolator = GridDataInterpolator( 43 | grid_points=grid_points, params=dummy_data, method="linear" 44 | ) 45 | interpolant = interpolator(target_point) 46 | 47 | dummy_data_target = 1.5 * dummy_data1 48 | 49 | assert np.allclose(interpolant, dummy_data_target) 50 | assert interpolant.shape == (1, *dummy_data.shape[1:]) 51 | -------------------------------------------------------------------------------- /pyirf/interpolation/tests/test_nearest_neighbor_searcher.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | 5 | @pytest.fixture 6 | def grid_1d(): 7 | return np.array([[1], [2], [3], [4]]) 8 | 9 | 10 | @pytest.fixture 11 | def grid_2d(): 12 | return np.array([[1, 1], [1, 2], [2, 1], [2, 2]]) 13 | 14 | 15 | @pytest.fixture 16 | def binned_pdf(grid_1d): 17 | return np.array( 18 | [ 19 | [[np.full(10, x), np.full(10, x / 2)], [np.full(10, 2 * x), np.zeros(10)]] 20 | for x in grid_1d 21 | ] 22 | ) 23 | 24 | 25 | def test_BaseNearestNeighborSearcher_1DGrid(grid_1d, binned_pdf): 26 | from pyirf.interpolation import BaseNearestNeighborSearcher 27 | 28 | searcher = BaseNearestNeighborSearcher(grid_1d, binned_pdf, norm_ord=2) 29 | 30 | target = np.array([0]) 31 | assert np.array_equal(searcher(target), binned_pdf[0, :]) 32 | 33 | target = np.array([[1.9]]) 34 | assert np.array_equal(searcher(target), binned_pdf[1, :]) 35 | 36 | 37 | def test_BaseNearestNeighborSearcher_2DGrid(grid_2d, binned_pdf): 38 | from pyirf.interpolation import BaseNearestNeighborSearcher 39 | 40 | searcher = BaseNearestNeighborSearcher(grid_2d, binned_pdf, norm_ord=2) 41 | 42 | target = np.array([[0, 1]]) 43 | assert np.array_equal(searcher(target), binned_pdf[0, :]) 44 | 45 | target = np.array([3, 3]) 46 | assert np.array_equal(searcher(target), binned_pdf[-1, :]) 47 | 48 | 49 | def test_BaseNearestNeighborSearcher_manhatten_norm(grid_2d, binned_pdf): 50 | from pyirf.interpolation import BaseNearestNeighborSearcher 51 | 52 | searcher = BaseNearestNeighborSearcher(grid_2d, binned_pdf, norm_ord=1) 53 | 54 | target = np.array([[0, 1]]) 55 | assert np.array_equal(searcher(target), binned_pdf[0, :]) 56 | 57 | target = np.array([[3, 3]]) 58 | assert np.array_equal(searcher(target), binned_pdf[-1, :]) 59 | 60 | 61 | def test_BaseNearestNeighborSearcher_wrong_norm(grid_1d, binned_pdf): 62 | from pyirf.interpolation import BaseNearestNeighborSearcher 63 | 64 | with pytest.raises(ValueError, match="Only positiv integers allowed for norm_ord"): 65 | BaseNearestNeighborSearcher(grid_1d, binned_pdf, norm_ord=-2) 66 | 67 | with pytest.raises(ValueError, match="Only positiv integers allowed for norm_ord"): 68 | BaseNearestNeighborSearcher(grid_1d, binned_pdf, norm_ord=1.5) 69 | 70 | with pytest.raises(ValueError, match="Only positiv integers allowed for norm_ord"): 71 | BaseNearestNeighborSearcher(grid_1d, binned_pdf, norm_ord=np.inf) 72 | 73 | with pytest.raises(ValueError, match="Only positiv integers allowed for norm_ord"): 74 | BaseNearestNeighborSearcher(grid_1d, binned_pdf, norm_ord="nuc") 75 | 76 | 77 | def test_DiscretePDFNearestNeighborSearcher(grid_2d, binned_pdf): 78 | from pyirf.interpolation import DiscretePDFNearestNeighborSearcher 79 | 80 | bin_edges = np.linspace(0, 1, binned_pdf.shape[-1] + 1) 81 | 82 | searcher = DiscretePDFNearestNeighborSearcher( 83 | grid_points=grid_2d, bin_edges=bin_edges, binned_pdf=binned_pdf, norm_ord=1 84 | ) 85 | 86 | target = np.array([[0, 1]]) 87 | assert np.array_equal(searcher(target), binned_pdf[0, :]) 88 | 89 | target = np.array([[3, 3]]) 90 | assert np.array_equal(searcher(target), binned_pdf[-1, :]) 91 | 92 | 93 | def test_ParametrizedNearestNeighborSearcher(grid_2d, binned_pdf): 94 | from pyirf.interpolation import ParametrizedNearestNeighborSearcher 95 | 96 | searcher = ParametrizedNearestNeighborSearcher( 97 | grid_points=grid_2d, params=binned_pdf, norm_ord=1 98 | ) 99 | 100 | target = np.array([[0, 1]]) 101 | assert np.array_equal(searcher(target), binned_pdf[0, :]) 102 | 103 | target = np.array([[3, 3]]) 104 | assert np.array_equal(searcher(target), binned_pdf[-1, :]) 105 | -------------------------------------------------------------------------------- /pyirf/interpolation/tests/test_quantile_interpolator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from pyirf.binning import bin_center 4 | from scipy.stats import norm 5 | 6 | from pyirf.interpolation.base_interpolators import PDFNormalization 7 | 8 | 9 | @pytest.fixture 10 | def data(): 11 | """Create common dataset containing binned Gaussians for interpolation testing. Binned PDFs sum to 1.""" 12 | bin_edges = np.linspace(-5, 30, 101) 13 | distributions = [norm(5, 1), norm(10, 2), norm(15, 3)] 14 | 15 | # create binned pdfs by interpolation of bin content 16 | binned_pdfs = np.array([np.diff(dist.cdf(bin_edges)) for dist in distributions]) 17 | bin_width = np.diff(bin_edges) 18 | binned_pdfs /= bin_width[np.newaxis, :] 19 | 20 | dataset = { 21 | "bin_edges": bin_edges, 22 | "means": np.array([5, 10, 15]), 23 | "stds": np.array([1, 2, 3]), 24 | "distributions": distributions, 25 | "binned_pdfs": binned_pdfs, 26 | "grid_points": np.array([1, 2, 3]), 27 | } 28 | 29 | return dataset 30 | 31 | 32 | def test_cdf_values(data): 33 | from pyirf.interpolation.quantile_interpolator import cdf_values 34 | 35 | # Assert empty histograms result in cdf containing zeros 36 | np.testing.assert_array_equal( 37 | cdf_values( 38 | np.zeros_like(data["binned_pdfs"])[0], 39 | data["bin_edges"], 40 | PDFNormalization.AREA, 41 | ), 42 | 0, 43 | ) 44 | 45 | cdf_est = cdf_values( 46 | data["binned_pdfs"][0], data["bin_edges"], PDFNormalization.AREA 47 | ) 48 | # Assert cdf is increasing or constant for actual pdfs 49 | assert np.all(np.diff(cdf_est) >= 0) 50 | 51 | # Assert cdf is capped at 1 52 | assert np.max(cdf_est) == 1 53 | 54 | # Assert estimated and true cdf are matching 55 | true_cdf = data["distributions"][0].cdf(data["bin_edges"][1:]) 56 | np.testing.assert_allclose(cdf_est, true_cdf, atol=1e-12) 57 | 58 | 59 | def test_ppf_values(data): 60 | from pyirf.interpolation.quantile_interpolator import cdf_values, ppf_values 61 | 62 | # Create quantiles, ignore the 0% and 100% quantile as they are analytically +- inf 63 | quantiles = np.linspace(0, 1, 10)[1:-2] 64 | 65 | # True ppf-values 66 | ppf_true = data["distributions"][0].ppf(quantiles) 67 | bin_mids = bin_center(data["bin_edges"]) 68 | 69 | # Estimated ppf-values 70 | cdf_est = cdf_values( 71 | data["binned_pdfs"][0], data["bin_edges"], PDFNormalization.AREA 72 | ) 73 | ppf_est = ppf_values(bin_mids, cdf_est, quantiles) 74 | 75 | # Assert truth and estimation match allowing for +- bin_width deviation 76 | np.testing.assert_allclose(ppf_true, ppf_est, atol=np.diff(data["bin_edges"])[0]) 77 | 78 | 79 | def test_pdf_from_ppf(data): 80 | from pyirf.interpolation.quantile_interpolator import ( 81 | cdf_values, 82 | pdf_from_ppf, 83 | ppf_values, 84 | ) 85 | 86 | # Create quantiles 87 | quantiles = np.linspace(0, 1, 1000) 88 | bin_mids = bin_center(data["bin_edges"]) 89 | 90 | # Estimate ppf-values 91 | cdf_est = cdf_values( 92 | data["binned_pdfs"][0], data["bin_edges"], PDFNormalization.AREA 93 | ) 94 | ppf_est = ppf_values(bin_mids, cdf_est, quantiles) 95 | 96 | # Compute pdf_values 97 | pdf_est = pdf_from_ppf(data["bin_edges"], ppf_est, quantiles) 98 | 99 | # Assert pdf-values matching true pdf within +-1% 100 | np.testing.assert_allclose(pdf_est, data["binned_pdfs"][0], atol=1e-2) 101 | 102 | 103 | def test_norm_pdf(data): 104 | from pyirf.interpolation.quantile_interpolator import norm_pdf 105 | 106 | result = norm_pdf( 107 | 2 * data["binned_pdfs"][0], 108 | data["bin_edges"], 109 | PDFNormalization.AREA, 110 | ) 111 | np.testing.assert_allclose(np.sum(result * np.diff(data["bin_edges"])), 1) 112 | np.testing.assert_allclose( 113 | result, 114 | data["binned_pdfs"][0], 115 | ) 116 | 117 | result = norm_pdf( 118 | np.zeros(len(data["bin_edges"]) - 1), 119 | data["bin_edges"], 120 | PDFNormalization.AREA, 121 | ) 122 | np.testing.assert_allclose(result, 0) 123 | 124 | 125 | def test_interpolate_binned_pdf(data): 126 | from pyirf.interpolation import QuantileInterpolator 127 | 128 | interpolator = QuantileInterpolator( 129 | grid_points=data["grid_points"][[0, 2]], 130 | bin_edges=data["bin_edges"], 131 | binned_pdf=data["binned_pdfs"][[0, 2], :], 132 | quantile_resolution=1e-3, 133 | ) 134 | 135 | interp = interpolator( 136 | target_point=np.array([data["grid_points"][1]]), 137 | ).squeeze() 138 | 139 | bin_mids = bin_center(data["bin_edges"]) 140 | bin_width = np.diff(data["bin_edges"])[0] 141 | 142 | # Estimate mean and standart_deviation from interpolant 143 | interp_mean = np.average(bin_mids, weights=interp) 144 | interp_std = np.sqrt(np.average((bin_mids - interp_mean) ** 2, weights=interp)) 145 | 146 | # Assert they match the truth within one bin of uncertainty 147 | assert np.isclose(interp_mean, data["means"][1], atol=bin_width) 148 | assert np.isclose(interp_std, data["stds"][1], atol=bin_width) 149 | -------------------------------------------------------------------------------- /pyirf/interpolation/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from scipy.spatial import Delaunay 4 | 5 | 6 | @pytest.fixture 7 | def non_rect_grid(): 8 | grid = np.array([[0, 0], [10, 20], [30, 20], [20, 0], [40, 0]]) 9 | 10 | return Delaunay(grid) 11 | 12 | 13 | def test_plumb_point_distance(): 14 | """Test line-segment to point distance computation""" 15 | from pyirf.interpolation.utils import plumb_point_dist 16 | 17 | # Test vertical line 18 | line = np.array([[0, 0], [0, 1]]) 19 | 20 | # Plumb point between end-points 21 | assert plumb_point_dist(line, np.array([-1, 0.5])) == 1 22 | assert plumb_point_dist(line, np.array([-0.7, 0.25])) == 0.7 23 | 24 | # Plumb point on one end-point 25 | assert plumb_point_dist(line, np.array([0, 2.1])) == 1.1 26 | assert plumb_point_dist(line, np.array([0, -1])) == 1 27 | 28 | # Plumb point not between end-points, nearest point is (0, 0) 29 | assert plumb_point_dist(line, np.array([-1, -1])) == np.sqrt(2) 30 | # Nearest point is (0, 1) 31 | assert plumb_point_dist(line, np.array([3, 3])) == np.sqrt(13) 32 | 33 | # Test horzontal line 34 | line = np.array([[0, 0], [1, 0]]) 35 | 36 | # Plumb point between end-points 37 | assert plumb_point_dist(line, np.array([0.5, 0.5])) == 0.5 38 | 39 | # Plumb point in extention of line, nearest point is (1, 0) 40 | assert plumb_point_dist(line, np.array([2, 0])) == 1 41 | 42 | # Plumb point on end point 43 | assert plumb_point_dist(line, np.array([1, 1])) == 1 44 | 45 | # Nearest point is (0, 0) 46 | assert plumb_point_dist(line, np.array([-1, -1])) == np.sqrt(2) 47 | 48 | # Test arbitrary line 49 | line = np.array([[1, 1], [-1, -1]]) 50 | # isclose needed here, as there is a small numerical deviation 51 | # of +/- eps in this case. Plumb point between end-points 52 | assert np.isclose(plumb_point_dist(line, np.array([-1, 1])), np.sqrt(2)) 53 | 54 | # Nearest point is (-1, -1) 55 | assert plumb_point_dist(line, np.array([-2, -3])) == np.sqrt(5) 56 | 57 | 58 | def test_point_facet_angle(): 59 | """Test angle computation in triangle, function should return cos(angle)""" 60 | from pyirf.interpolation.utils import point_facet_angle 61 | 62 | line = np.array([[0, 0], [0, 1]]) 63 | 64 | assert np.isclose( 65 | point_facet_angle(line, np.array([1, 0])), np.cos(45 * np.pi / 180) 66 | ) 67 | assert np.isclose( 68 | point_facet_angle(line, np.array([-1, 0])), np.cos(45 * np.pi / 180) 69 | ) 70 | # these points build a same-side triangle 71 | assert np.isclose( 72 | point_facet_angle(line, np.array([np.sqrt(3) / 2, 0.5])), 73 | np.cos(60 * np.pi / 180), 74 | ) 75 | 76 | 77 | def test_find_nearest_facet_rect_grid(): 78 | """Test nearest facet finding on rectanguar grid""" 79 | from pyirf.interpolation.utils import find_nearest_facet 80 | 81 | rect_grid = Delaunay( 82 | np.array([[0, 0], [0, 20], [20, 20], [20, 0], [40, 0], [40, 20]]) 83 | ) 84 | qhull_points = rect_grid.points[rect_grid.convex_hull] 85 | 86 | nearest_facet_ind = find_nearest_facet(qhull_points, np.array([10, -5])) 87 | assert np.logical_or( 88 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[0, 0], [20, 0]])), 89 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[20, 0], [0, 0]])), 90 | ) 91 | 92 | nearest_facet_ind = find_nearest_facet(qhull_points, np.array([45, 15])) 93 | assert np.logical_or( 94 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[40, 0], [40, 20]])), 95 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[40, 20], [40, 0]])), 96 | ) 97 | 98 | nearest_facet_ind = find_nearest_facet(qhull_points, np.array([-10, -1])) 99 | assert np.logical_or( 100 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[0, 0], [0, 20]])), 101 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[0, 20], [0, 0]])), 102 | ) 103 | 104 | 105 | def test_find_nearest_facet_non_rect_grid(non_rect_grid): 106 | """Test nearest facet finding on a non rectanguar grid to catch some more cases""" 107 | from pyirf.interpolation.utils import find_nearest_facet 108 | 109 | qhull_points = non_rect_grid.points[non_rect_grid.convex_hull] 110 | 111 | nearest_facet_ind = find_nearest_facet(qhull_points, np.array([5, 20])) 112 | assert np.logical_or( 113 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[0, 0], [10, 20]])), 114 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[10, 20], [0, 0]])), 115 | ) 116 | 117 | nearest_facet_ind = find_nearest_facet(qhull_points, np.array([35, 30])) 118 | assert np.logical_or( 119 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[10, 20], [30, 20]])), 120 | np.array_equal(qhull_points[nearest_facet_ind], np.array([[30, 20], [10, 20]])), 121 | ) 122 | 123 | 124 | def test_find_simplex_to_facet(non_rect_grid): 125 | """ 126 | Test facet-to-simplex finding on non rectangular grid, as the triangulation 127 | is clear in this case as it is build from left to right and not ambiguous. 128 | For the rectangular grid used above two triangulations exist. 129 | """ 130 | from pyirf.interpolation.utils import find_simplex_to_facet 131 | 132 | simplices_points = non_rect_grid.points[non_rect_grid.simplices] 133 | 134 | assert find_simplex_to_facet(simplices_points, np.array([[0, 0], [0, 20]])) == 0 135 | assert find_simplex_to_facet(simplices_points, np.array([[10, 20], [30, 20]])) == 1 136 | assert find_simplex_to_facet(simplices_points, np.array([[30, 20], [40, 0]])) == 2 137 | 138 | 139 | def test_find_nearest_simplex(non_rect_grid): 140 | """ 141 | Test whole nearest simplex finding on non rectangular grid, as the triangulation 142 | is clear in this case as it is build from left to right and not ambiguous. 143 | For the rectangular grid used above two triangulations exist. 144 | """ 145 | from pyirf.interpolation.utils import find_nearest_simplex 146 | 147 | assert find_nearest_simplex(non_rect_grid, np.array([-10, -10])) == 0 148 | assert find_nearest_simplex(non_rect_grid, np.array([10, 30])) == 1 149 | assert find_nearest_simplex(non_rect_grid, np.array([20.00000000001, -10])) == 2 150 | 151 | 152 | def test_get_bin_width(): 153 | from pyirf.interpolation.utils import get_bin_width 154 | from pyirf.interpolation import PDFNormalization 155 | 156 | bins = np.array([0, 1, 3]) 157 | np.testing.assert_allclose(get_bin_width(bins, PDFNormalization.AREA), [1, 2]) 158 | 159 | bins = np.array([0, np.pi / 3, np.pi / 2]) 160 | width = get_bin_width(bins, PDFNormalization.CONE_SOLID_ANGLE) 161 | np.testing.assert_allclose(width, [np.pi, np.pi]) 162 | -------------------------------------------------------------------------------- /pyirf/interpolation/utils.py: -------------------------------------------------------------------------------- 1 | import astropy.units as u 2 | import numpy as np 3 | 4 | from ..utils import cone_solid_angle 5 | from .base_interpolators import PDFNormalization 6 | 7 | 8 | def get_bin_width(bin_edges, normalization): 9 | if normalization is PDFNormalization.AREA: 10 | return np.diff(bin_edges) 11 | 12 | if normalization is PDFNormalization.CONE_SOLID_ANGLE: 13 | return np.diff(cone_solid_angle(bin_edges).to_value(u.sr)) 14 | 15 | raise ValueError(f"Invalid PDF normalization: {normalization}") 16 | 17 | 18 | def plumb_point_dist(line, target): 19 | """ 20 | Compute minimal distance between target and line under the constraint, that it has 21 | to lay between the points building the line and not on the extension of it. 22 | 23 | Parameters 24 | ---------- 25 | line: np.ndarray, shape=(2, n_dims) 26 | Array of two points spanning a line segment. Might be in two or three dims n_dims. 27 | target: np.ndarray, shape=(n_dims) 28 | Target point, of which the minimal distance to line segement is needed 29 | 30 | Returns 31 | ------- 32 | d_min: float 33 | Minimal distance to line segement between points in line 34 | """ 35 | A = line[0] 36 | B = line[1] 37 | P = target 38 | 39 | # Costruct the footpoint/plumb point of the target projected onto 40 | # both lines OA + r1*AB (F1) and OB + r1*BA (F2), for details see 41 | # https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Vector_formulation 42 | F1 = A + np.dot(P - A, B - A) * (B - A) / np.dot(B - A, B - A) 43 | F2 = B + np.dot(P - B, A - B) * (A - B) / np.dot(A - B, A - B) 44 | 45 | # Find, at which parameter value r1/r2 the plumb point lies on line F1/F2 46 | if B[0] - A[0] == 0: 47 | r1 = (F1[1] - A[1]) / (B[1] - A[1]) 48 | else: 49 | r1 = (F1[0] - A[0]) / (B[0] - A[0]) 50 | 51 | if A[0] - B[0] == 0: 52 | r2 = (F2[1] - B[1]) / (A[1] - B[1]) 53 | else: 54 | r2 = (F2[0] - B[0]) / (A[0] - B[0]) 55 | 56 | # If |r1| + |r2| == 1, the plomb point lies between A and B, thus the 57 | # distance is the seareched one. In cases where the plumb point is A or B 58 | # use the second method consistently, as there might be +/- eps differences 59 | # due to different methods of computation. 60 | if np.isclose(np.abs(r1) + np.abs(r2), 1) and not ( 61 | np.isclose(r1, 0) or np.isclose(r2, 0) 62 | ): 63 | # Compute distance of plumb point to line, for details see 64 | # https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Another_vector_formulation 65 | return np.linalg.norm(np.cross(P - A, B - A)) / np.linalg.norm(B - A) 66 | # If not, the nearest point A <= x <= B is one A and B, thus the searched distance 67 | # is the one to this nearest point 68 | else: 69 | return np.min(np.array([np.linalg.norm(A - P), np.linalg.norm(B - P)])) 70 | 71 | 72 | def point_facet_angle(line, target): 73 | """ 74 | Compute cos(angle) between target and a line segment" 75 | 76 | Parameters 77 | ---------- 78 | line: np.ndarray, shape=(2, n_dims) 79 | Array of two points spanning a line segment. Might be in two or three dims n_dims. 80 | target: np.ndarray, shape=(n_dims) 81 | Target point, of which the angle is needed. 82 | 83 | Returns 84 | ------- 85 | cos_angle: float 86 | Cosine of angle at target in the triangle ABT with line-points A and B and target T. 87 | """ 88 | PB = line[1] - target 89 | PA = line[0] - target 90 | 91 | # For details see https://en.wikipedia.org/wiki/Angle#Dot_product_and_generalisations 92 | return np.dot(PB, PA) / (np.linalg.norm(PA) * np.linalg.norm(PB)) 93 | 94 | 95 | def find_nearest_facet(qhull_points, target): 96 | """ 97 | Search nearest facet by looking for the closest point on a facet. If this point is an edge point 98 | (which by definition lays on two facets) use the one with the lowest cos of angle 99 | (maximising angle) as fallback 100 | 101 | Parameters 102 | ---------- 103 | qhull_points: np.ndarray, shape=(n_facets, 2, 2) 104 | Array containting both points building a facet for all facets in a 105 | grids convex hull. 106 | target: np.ndarray, shape=(2) 107 | Target point, of which the nearest facet on the convex hull is wanted. 108 | 109 | Returns 110 | ------- 111 | nearest_facet_ind: int 112 | Index of the nearest facet in qhull_points. 113 | """ 114 | plumbs = np.array( 115 | [ 116 | (plumb_point_dist(line, target), point_facet_angle(line, target)) 117 | for line in qhull_points 118 | ], 119 | dtype=[("plumb_dist", "= 1 116 | Degree of smoothness wanted in the extrapolation region. See [1] for 117 | additional information. Defaults to 1. 118 | 119 | Raises 120 | ------ 121 | TypeError: 122 | If m is not a number 123 | ValueError: 124 | If m is not a non-zero integer 125 | 126 | Note 127 | ---- 128 | Also calls pyirf.interpolation.ParametrizedNearestSimplexExtrapolator.__init__. 129 | 130 | References 131 | ---------- 132 | .. [1] P. Alfred (1984). Triangular Extrapolation. Technical summary rept., 133 | Univ. of Wisconsin-Madison. https://apps.dtic.mil/sti/pdfs/ADA144660.pdf 134 | 135 | """ 136 | 137 | def __init__(self, grid_points, params, m=1): 138 | super().__init__(grid_points, params) 139 | 140 | # Test wether m is a number 141 | try: 142 | m > 0 143 | except TypeError: 144 | raise TypeError(f"Only positive integers allowed for m, got {m}.") 145 | 146 | # Test wether m is a finite, positive integer 147 | if (m <= 0) or ~np.isfinite(m) or (m != int(m)): 148 | raise ValueError(f"Only positive integers allowed for m, got {m}.") 149 | 150 | self.m = m 151 | 152 | def extrapolate(self, target_point): 153 | if self.grid_dim == 1: 154 | return super().extrapolate(target_point) 155 | elif self.grid_dim == 2: 156 | visible_facet_points = find_visible_facets(self.grid_points, target_point) 157 | 158 | if visible_facet_points.shape[0] == 1: 159 | return super().extrapolate(target_point) 160 | else: 161 | simplices_points = self.triangulation.points[ 162 | self.triangulation.simplices 163 | ] 164 | 165 | visible_simplices_indices = np.array( 166 | [ 167 | find_simplex_to_facet(simplices_points, facet) 168 | for facet in visible_facet_points 169 | ] 170 | ) 171 | 172 | extrapolation_weigths = compute_extrapolation_weights( 173 | visible_facet_points, target_point, self.m 174 | ) 175 | 176 | extrapolation_weigths = extrapolation_weigths.reshape( 177 | extrapolation_weigths.shape[0], 178 | *np.ones(self.params.ndim - 1, "int"), 179 | ) 180 | 181 | # Function has to be copied outside list comprehention as the super() short-form 182 | # cannot be used inside it (at least until python 3.11) 183 | extrapolate2D = super()._extrapolate2D 184 | 185 | simplex_extrapolations = np.array( 186 | [ 187 | extrapolate2D( 188 | self.triangulation.simplices[ind], target_point 189 | ).squeeze() 190 | for ind in visible_simplices_indices 191 | ] 192 | ) 193 | 194 | extrapolant = np.sum( 195 | extrapolation_weigths * simplex_extrapolations, axis=0 196 | )[np.newaxis, :] 197 | 198 | return extrapolant 199 | -------------------------------------------------------------------------------- /pyirf/io/__init__.py: -------------------------------------------------------------------------------- 1 | from .eventdisplay import read_eventdisplay_fits 2 | from .gadf import ( 3 | create_aeff2d_hdu, 4 | create_energy_dispersion_hdu, 5 | create_psf_table_hdu, 6 | create_rad_max_hdu, 7 | create_background_2d_hdu, 8 | create_background_3d_hdu, 9 | ) 10 | 11 | 12 | __all__ = [ 13 | "read_eventdisplay_fits", 14 | "create_psf_table_hdu", 15 | "create_aeff2d_hdu", 16 | "create_energy_dispersion_hdu", 17 | "create_psf_table_hdu", 18 | "create_rad_max_hdu", 19 | "create_background_2d_hdu", 20 | "create_background_3d_hdu", 21 | ] 22 | -------------------------------------------------------------------------------- /pyirf/io/eventdisplay.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from astropy.table import QTable, unique 4 | import astropy.units as u 5 | 6 | from ..simulations import SimulatedEventsInfo 7 | 8 | 9 | log = logging.getLogger(__name__) 10 | 11 | 12 | COLUMN_MAP = { 13 | "obs_id": "OBS_ID", 14 | "event_id": "EVENT_ID", 15 | "true_energy": "MC_ENERGY", 16 | "reco_energy": "ENERGY", 17 | "true_alt": "MC_ALT", 18 | "true_az": "MC_AZ", 19 | "pointing_alt": "PNT_ALT", 20 | "pointing_az": "PNT_AZ", 21 | "reco_alt": "ALT", 22 | "reco_az": "AZ", 23 | "gh_score": "GH_MVA", 24 | "multiplicity": "MULTIP", 25 | } 26 | 27 | 28 | def read_eventdisplay_fits(infile, use_histogram=True): 29 | """ 30 | Read a DL2 FITS file as produced by the EventDisplay DL2 converter 31 | from ROOT files: 32 | https://github.com/Eventdisplay/Converters/blob/master/DL2/generate_DL2_file.py 33 | 34 | Parameters 35 | ---------- 36 | infile : str or pathlib.Path 37 | Path to the input fits file 38 | use_histogram : bool 39 | If True, use number of simulated events from histogram provided in fits file, 40 | if False, estimate this number from the unique run_id, pointing direction 41 | combinations and the number of events per run in the run header. 42 | This will fail e.g. for protons with cuts already applied, since many 43 | runs will have 0 events surviving cuts. 44 | 45 | Returns 46 | ------- 47 | events: astropy.QTable 48 | Astropy Table object containing the reconstructed events information. 49 | simulated_events: ``~pyirf.simulations.SimulatedEventsInfo`` 50 | """ 51 | log.debug(f"Reading {infile}") 52 | events = QTable.read(infile, hdu="EVENTS") 53 | sim_events = QTable.read(infile, hdu="SIMULATED EVENTS") 54 | run_header = QTable.read(infile, hdu="RUNHEADER")[0] 55 | 56 | for new, old in COLUMN_MAP.items(): 57 | events.rename_column(old, new) 58 | 59 | n_runs = len(unique(events[['obs_id', 'pointing_az', 'pointing_alt']])) 60 | log.info(f"Estimated number of runs from obs ids and pointing position: {n_runs}") 61 | 62 | n_showers_guessed = n_runs * run_header["num_use"] * run_header["num_showers"] 63 | n_showers_hist = int(sim_events["EVENTS"].sum()) 64 | 65 | if use_histogram: 66 | n_showers = n_showers_hist 67 | else: 68 | n_showers = n_showers_guessed 69 | 70 | log.debug("Number of events histogram: %d", n_showers_hist) 71 | log.debug("Number of events from n_runs and run header: %d", n_showers_guessed) 72 | log.debug("Using number of events from %s", "histogram" if use_histogram else "guess") 73 | 74 | sim_info = SimulatedEventsInfo( 75 | n_showers=n_showers, 76 | energy_min=u.Quantity(run_header["E_range"][0], u.TeV), 77 | energy_max=u.Quantity(run_header["E_range"][1], u.TeV), 78 | max_impact=u.Quantity(run_header["core_range"][1], u.m), 79 | spectral_index=run_header["spectral_index"], 80 | viewcone_min=u.Quantity(run_header["viewcone"][0], u.deg), 81 | viewcone_max=u.Quantity(run_header["viewcone"][1], u.deg), 82 | ) 83 | 84 | return events, sim_info 85 | -------------------------------------------------------------------------------- /pyirf/io/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cta-observatory/pyirf/fa65d16dae92afdeb7896ac0aea730c2c63b0098/pyirf/io/tests/__init__.py -------------------------------------------------------------------------------- /pyirf/irf/__init__.py: -------------------------------------------------------------------------------- 1 | from .effective_area import ( 2 | effective_area, 3 | effective_area_per_energy_and_fov, 4 | effective_area_per_energy, 5 | effective_area_3d_polar, 6 | effective_area_3d_lonlat, 7 | ) 8 | from .energy_dispersion import energy_dispersion 9 | from .psf import psf_table 10 | from .background import background_2d, background_3d_lonlat 11 | 12 | __all__ = [ 13 | "effective_area", 14 | "effective_area_per_energy", 15 | "effective_area_per_energy_and_fov", 16 | "effective_area_3d_polar", 17 | "effective_area_3d_lonlat", 18 | "energy_dispersion", 19 | "psf_table", 20 | "background_2d", 21 | "background_3d_lonlat", 22 | ] 23 | -------------------------------------------------------------------------------- /pyirf/irf/background.py: -------------------------------------------------------------------------------- 1 | import astropy.units as u 2 | import numpy as np 3 | 4 | from ..utils import cone_solid_angle, rectangle_solid_angle 5 | 6 | #: Unit of the background rate IRF 7 | BACKGROUND_UNIT = u.Unit("s-1 TeV-1 sr-1") 8 | 9 | 10 | def background_2d(events, reco_energy_bins, fov_offset_bins, t_obs): 11 | """ 12 | Calculate background rates in radially symmetric bins in the field of view. 13 | 14 | GADF documentation here: 15 | https://gamma-astro-data-formats.readthedocs.io/en/latest/irfs/full_enclosure/bkg/index.html#bkg-2d 16 | 17 | Parameters 18 | ---------- 19 | events: astropy.table.QTable 20 | DL2 events table of the selected background events. 21 | Needed columns for this function: `reco_source_fov_offset`, `reco_energy`, `weight` 22 | reco_energy: astropy.units.Quantity[energy] 23 | The bins in reconstructed energy to be used for the IRF 24 | fov_offset_bins: astropy.units.Quantity[angle] 25 | The bins in the field of view offset to be used for the IRF 26 | t_obs: astropy.units.Quantity[time] 27 | Observation time. This must match with how the individual event 28 | weights are calculated. 29 | 30 | Returns 31 | ------- 32 | bg_rate: astropy.units.Quantity 33 | The background rate as particles per energy, time and solid angle 34 | in the specified bins. 35 | 36 | Shape: (len(reco_energy_bins) - 1, len(fov_offset_bins) - 1) 37 | """ 38 | 39 | hist, _, _ = np.histogram2d( 40 | events["reco_energy"].to_value(u.TeV), 41 | events["reco_source_fov_offset"].to_value(u.deg), 42 | bins=[ 43 | reco_energy_bins.to_value(u.TeV), 44 | fov_offset_bins.to_value(u.deg), 45 | ], 46 | weights=events["weight"], 47 | ) 48 | 49 | # divide all energy bins by their width 50 | # hist has shape (n_energy, n_fov_offset) so we need to transpose and then back 51 | bin_width_energy = np.diff(reco_energy_bins) 52 | per_energy = (hist.T / bin_width_energy).T 53 | 54 | # divide by solid angle in each fov bin and the observation time 55 | bin_solid_angle = np.diff(cone_solid_angle(fov_offset_bins)) 56 | bg_rate = per_energy / t_obs / bin_solid_angle 57 | 58 | return bg_rate.to(BACKGROUND_UNIT) 59 | 60 | 61 | def background_3d_lonlat(events, reco_energy_bins, fov_lon_bins, fov_lat_bins, t_obs): 62 | """ 63 | Calculate background rates in square bins in the field of view. 64 | 65 | GADF documentation here: 66 | https://gamma-astro-data-formats.readthedocs.io/en/latest/irfs/full_enclosure/bkg/index.html#bkg-3d 67 | 68 | Parameters 69 | ---------- 70 | events: astropy.table.QTable 71 | DL2 events table of the selected background events. 72 | Needed columns for this function: `reco_fov_lon`, `reco_fov_lat`, 73 | `reco_energy`, `weight`. 74 | reco_energy: astropy.units.Quantity[energy] 75 | The bins in reconstructed energy to be used for the IRF 76 | fov_lon_bins: astropy.units.Quantity[angle] 77 | The bins in the field of view longitudinal direction to be used for the IRF. 78 | Will become DETX bins. 79 | fov_lat_bins: astropy.units.Quantity[angle] 80 | The bins in the field of view latitudinal direction to be used for the IRF. 81 | Will become DETY bins. 82 | t_obs: astropy.units.Quantity[time] 83 | Observation time. This must match with how the individual event 84 | weights are calculated. 85 | 86 | Returns 87 | ------- 88 | bg_rate: astropy.units.Quantity 89 | The background rate as particles per energy, time and solid angle 90 | in the specified bins. 91 | 92 | Shape: (len(reco_energy_bins) - 1, len(fov_lon_bins) - 1, len(fov_lon_bins) - 1) 93 | """ 94 | 95 | hist, _ = np.histogramdd( 96 | [ 97 | events["reco_energy"].to_value(u.TeV), 98 | events["reco_source_fov_lon"].to_value(u.deg), 99 | events["reco_source_fov_lat"].to_value(u.deg), 100 | ], 101 | bins=[ 102 | reco_energy_bins.to_value(u.TeV), 103 | fov_lon_bins.to_value(u.deg), 104 | fov_lat_bins.to_value(u.deg), 105 | ], 106 | weights=events["weight"], 107 | ) 108 | 109 | # divide all energy bins by their width 110 | # hist has shape (n_energy, n_fov_offset) so we need to transpose and then back 111 | bin_width_energy = np.diff(reco_energy_bins) 112 | per_energy = hist / bin_width_energy[:, np.newaxis, np.newaxis] 113 | 114 | # divide by solid angle in each fov bin and the observation time 115 | bin_solid_angle = rectangle_solid_angle( 116 | fov_lon_bins[:-1], 117 | fov_lon_bins[1:], 118 | fov_lat_bins[:-1], 119 | fov_lat_bins[1:], 120 | ) 121 | bg_rate = per_energy / t_obs / bin_solid_angle 122 | return bg_rate.to(BACKGROUND_UNIT) 123 | -------------------------------------------------------------------------------- /pyirf/irf/effective_area.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import astropy.units as u 3 | from ..binning import create_histogram_table 4 | 5 | 6 | __all__ = [ 7 | "effective_area", 8 | "effective_area_per_energy", 9 | "effective_area_per_energy_and_fov", 10 | "effective_area_3d_polar", 11 | "effective_area_3d_lonlat", 12 | ] 13 | 14 | 15 | @u.quantity_input(area=u.m ** 2) 16 | def effective_area(n_selected, n_simulated, area): 17 | """ 18 | Calculate effective area for histograms of selected and total simulated events 19 | 20 | Parameters 21 | ---------- 22 | n_selected: int or numpy.ndarray[int] 23 | The number of surviving (e.g. triggered, analysed, after cuts) 24 | n_simulated: int or numpy.ndarray[int] 25 | The total number of events simulated 26 | area: astropy.units.Quantity[area] 27 | Area in which particle's core position was simulated 28 | """ 29 | return (n_selected / n_simulated) * area 30 | 31 | 32 | def effective_area_per_energy(selected_events, simulation_info, true_energy_bins): 33 | """ 34 | Calculate effective area in bins of true energy. 35 | 36 | Parameters 37 | ---------- 38 | selected_events: astropy.table.QTable 39 | DL2 events table, required columns for this function: `true_energy`. 40 | simulation_info: pyirf.simulations.SimulatedEventsInfo 41 | The overall statistics of the simulated events 42 | true_energy_bins: astropy.units.Quantity[energy] 43 | The bin edges in which to calculate effective area. 44 | """ 45 | area = np.pi * simulation_info.max_impact ** 2 46 | 47 | hist_selected = create_histogram_table( 48 | selected_events, true_energy_bins, "true_energy" 49 | ) 50 | hist_simulated = simulation_info.calculate_n_showers_per_energy(true_energy_bins) 51 | 52 | return effective_area(hist_selected["n"], hist_simulated, area) 53 | 54 | 55 | def effective_area_per_energy_and_fov( 56 | selected_events, simulation_info, true_energy_bins, fov_offset_bins 57 | ): 58 | """ 59 | Calculate effective area in bins of true energy and field of view offset. 60 | 61 | Parameters 62 | ---------- 63 | selected_events: astropy.table.QTable 64 | DL2 events table, required columns for this function: 65 | - `true_energy` 66 | - `true_source_fov_offset` 67 | simulation_info: pyirf.simulations.SimulatedEventsInfo 68 | The overall statistics of the simulated events 69 | true_energy_bins: astropy.units.Quantity[energy] 70 | The true energy bin edges in which to calculate effective area. 71 | fov_offset_bins: astropy.units.Quantity[angle] 72 | The field of view radial bin edges in which to calculate effective area. 73 | """ 74 | area = np.pi * simulation_info.max_impact ** 2 75 | 76 | hist_simulated = simulation_info.calculate_n_showers_per_energy_and_fov( 77 | true_energy_bins, fov_offset_bins 78 | ) 79 | 80 | hist_selected, _, _ = np.histogram2d( 81 | selected_events["true_energy"].to_value(u.TeV), 82 | selected_events["true_source_fov_offset"].to_value(u.deg), 83 | bins=[ 84 | true_energy_bins.to_value(u.TeV), 85 | fov_offset_bins.to_value(u.deg), 86 | ], 87 | ) 88 | 89 | return effective_area(hist_selected, hist_simulated, area) 90 | 91 | 92 | def effective_area_3d_polar( 93 | selected_events, 94 | simulation_info, 95 | true_energy_bins, 96 | fov_offset_bins, 97 | fov_position_angle_bins, 98 | ): 99 | """ 100 | Calculate effective area in bins of true energy, field of view offset, and field of view position angle. 101 | 102 | Parameters 103 | ---------- 104 | selected_events: astropy.table.QTable 105 | DL2 events table, required columns for this function: 106 | - `true_energy` 107 | - `true_source_fov_offset` 108 | - `true_source_fov_position_angle` 109 | simulation_info: pyirf.simulations.SimulatedEventsInfo 110 | The overall statistics of the simulated events 111 | true_energy_bins: astropy.units.Quantity[energy] 112 | The true energy bin edges in which to calculate effective area. 113 | fov_offset_bins: astropy.units.Quantity[angle] 114 | The field of view radial bin edges in which to calculate effective area. 115 | fov_position_angle_bins: astropy.units.Quantity[radian] 116 | The field of view azimuthal bin edges in which to calculate effective area. 117 | """ 118 | area = np.pi * simulation_info.max_impact**2 119 | 120 | hist_simulated = simulation_info.calculate_n_showers_3d_polar( 121 | true_energy_bins, fov_offset_bins, fov_position_angle_bins 122 | ) 123 | 124 | hist_selected, _ = np.histogramdd( 125 | np.column_stack( 126 | [ 127 | selected_events["true_energy"].to_value(u.TeV), 128 | selected_events["true_source_fov_offset"].to_value(u.deg), 129 | selected_events["true_source_fov_position_angle"].to_value(u.rad), 130 | ] 131 | ), 132 | bins=( 133 | true_energy_bins.to_value(u.TeV), 134 | fov_offset_bins.to_value(u.deg), 135 | fov_position_angle_bins.to_value(u.rad), 136 | ), 137 | ) 138 | 139 | return effective_area(hist_selected, hist_simulated, area) 140 | 141 | 142 | def effective_area_3d_lonlat( 143 | selected_events, 144 | simulation_info, 145 | true_energy_bins, 146 | fov_longitude_bins, 147 | fov_latitude_bins, 148 | subpixels=20, 149 | ): 150 | """ 151 | Calculate effective area in bins of true energy, field of view longitude, and field of view latitude. 152 | 153 | Parameters 154 | ---------- 155 | selected_events: astropy.table.QTable 156 | DL2 events table, required columns for this function: 157 | - `true_energy` 158 | - `true_source_fov_lon` 159 | - `true_source_fov_lat` 160 | simulation_info: pyirf.simulations.SimulatedEventsInfo 161 | The overall statistics of the simulated events 162 | true_energy_bins: astropy.units.Quantity[energy] 163 | The true energy bin edges in which to calculate effective area. 164 | fov_longitude_bins: astropy.units.Quantity[angle] 165 | The field of view longitude bin edges in which to calculate effective area. 166 | fov_latitude_bins: astropy.units.Quantity[angle] 167 | The field of view latitude bin edges in which to calculate effective area. 168 | """ 169 | area = np.pi * simulation_info.max_impact**2 170 | 171 | hist_simulated = simulation_info.calculate_n_showers_3d_lonlat( 172 | true_energy_bins, fov_longitude_bins, fov_latitude_bins, subpixels=subpixels 173 | ) 174 | 175 | selected_columns = np.column_stack( 176 | [ 177 | selected_events["true_energy"].to_value(u.TeV), 178 | selected_events["true_source_fov_lon"].to_value(u.deg), 179 | selected_events["true_source_fov_lat"].to_value(u.deg), 180 | ] 181 | ) 182 | bins = ( 183 | true_energy_bins.to_value(u.TeV), 184 | fov_longitude_bins.to_value(u.deg), 185 | fov_latitude_bins.to_value(u.deg), 186 | ) 187 | 188 | hist_selected, _ = np.histogramdd(selected_columns, bins=bins) 189 | 190 | return effective_area(hist_selected, hist_simulated, area) 191 | -------------------------------------------------------------------------------- /pyirf/irf/energy_dispersion.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import numpy as np 3 | import astropy.units as u 4 | from ..binning import resample_histogram1d 5 | 6 | 7 | __all__ = [ 8 | "energy_dispersion", 9 | "energy_migration_matrix", 10 | "energy_dispersion_to_migration", 11 | ] 12 | 13 | 14 | def _normalize_hist(hist, migration_bins): 15 | # make sure we do not mutate the input array 16 | hist = hist.copy() 17 | bin_width = np.diff(migration_bins) 18 | 19 | # calculate number of events along the N_MIGRA axis to get events 20 | # per energy per fov 21 | norm = hist.sum(axis=1) 22 | 23 | with np.errstate(invalid="ignore"): 24 | # hist shape is (N_E, N_MIGRA, N_FOV), norm shape is (N_E, N_FOV) 25 | # so we need to add a new axis in the middle to get (N_E, 1, N_FOV) 26 | # bin_width is 1d, so we need newaxis, use the values, newaxis 27 | hist = hist / norm[:, np.newaxis, :] / bin_width[np.newaxis, :, np.newaxis] 28 | 29 | return np.nan_to_num(hist) 30 | 31 | 32 | def energy_dispersion( 33 | selected_events, 34 | true_energy_bins, 35 | fov_offset_bins, 36 | migration_bins, 37 | ): 38 | """ 39 | Calculate energy dispersion for the given DL2 event list. 40 | Energy dispersion is defined as the probability of finding an event 41 | at a given relative deviation ``(reco_energy / true_energy)`` for a given 42 | true energy. 43 | 44 | Parameters 45 | ---------- 46 | selected_events: astropy.table.QTable 47 | Table of the DL2 events. 48 | Required columns: ``reco_energy``, ``true_energy``, ``true_source_fov_offset``. 49 | true_energy_bins: astropy.units.Quantity[energy] 50 | Bin edges in true energy 51 | migration_bins: astropy.units.Quantity[energy] 52 | Bin edges in relative deviation, recommended range: [0.2, 5] 53 | fov_offset_bins: astropy.units.Quantity[angle] 54 | Bin edges in the field of view offset. 55 | For Point-Like IRFs, only giving a single bin is appropriate. 56 | 57 | Returns 58 | ------- 59 | energy_dispersion: numpy.ndarray 60 | Energy dispersion matrix 61 | with shape (n_true_energy_bins, n_migration_bins, n_fov_ofset_bins) 62 | """ 63 | mu = (selected_events["reco_energy"] / selected_events["true_energy"]).to_value( 64 | u.one 65 | ) 66 | 67 | energy_dispersion, _ = np.histogramdd( 68 | np.column_stack( 69 | [ 70 | selected_events["true_energy"].to_value(u.TeV), 71 | mu, 72 | selected_events["true_source_fov_offset"].to_value(u.deg), 73 | ] 74 | ), 75 | bins=[ 76 | true_energy_bins.to_value(u.TeV), 77 | migration_bins, 78 | fov_offset_bins.to_value(u.deg), 79 | ], 80 | ) 81 | 82 | energy_dispersion = _normalize_hist(energy_dispersion, migration_bins) 83 | 84 | return energy_dispersion 85 | 86 | 87 | @u.quantity_input(true_energy_bins=u.TeV, reco_energy_bins=u.TeV, fov_offset_bins=u.deg) 88 | def energy_migration_matrix( 89 | events, true_energy_bins, reco_energy_bins, fov_offset_bins 90 | ): 91 | """Compute the energy migration matrix directly from the events. 92 | 93 | Parameters 94 | ---------- 95 | events : `~astropy.table.QTable` 96 | Table of the DL2 events. 97 | Required columns: ``reco_energy``, ``true_energy``, ``true_source_fov_offset``. 98 | true_energy_bins : `~astropy.units.Quantity` 99 | Bin edges in true energy. 100 | reco_energy_bins : `~astropy.units.Quantity` 101 | Bin edges in reconstructed energy. 102 | 103 | Returns 104 | ------- 105 | matrix : array-like 106 | Migration matrix as probabilities along the reconstructed energy axis. 107 | energy axis with shape 108 | (n_true_energy_bins, n_reco_energy_bins, n_fov_offset_bins) 109 | containing energies in TeV. 110 | """ 111 | 112 | hist, _ = np.histogramdd( 113 | np.column_stack( 114 | [ 115 | events["true_energy"].to_value(u.TeV), 116 | events["reco_energy"].to_value(u.TeV), 117 | events["true_source_fov_offset"].to_value(u.deg), 118 | ] 119 | ), 120 | bins=[ 121 | true_energy_bins.to_value(u.TeV), 122 | reco_energy_bins.to_value(u.TeV), 123 | fov_offset_bins.to_value(u.deg), 124 | ], 125 | ) 126 | 127 | with np.errstate(invalid="ignore"): 128 | hist /= hist.sum(axis=1)[:, np.newaxis, :] 129 | # the nans come from the fact that the sum along the reconstructed energy axis 130 | # might sometimes be 0 when there are no events in that given true energy bin 131 | # and fov offset bin 132 | hist[np.isnan(hist)] = 0 133 | 134 | return hist 135 | 136 | 137 | def energy_dispersion_to_migration( 138 | dispersion_matrix, 139 | disp_true_energy_edges, 140 | disp_migration_edges, 141 | new_true_energy_edges, 142 | new_reco_energy_edges, 143 | ): 144 | """ 145 | Construct a energy migration matrix from an energy dispersion matrix. 146 | 147 | Depending on the new energy ranges, the sum over the first axis 148 | can be smaller than 1. 149 | The new true energy bins need to be a subset of the old range, 150 | extrapolation is not supported. 151 | New reconstruction bins outside of the old migration range are filled with 152 | zeros. 153 | 154 | Parameters 155 | ---------- 156 | dispersion_matrix: numpy.ndarray 157 | Energy dispersion_matrix 158 | disp_true_energy_edges: astropy.units.Quantity[energy] 159 | True energy edges matching the first dimension of the dispersion matrix 160 | disp_migration_edges: numpy.ndarray 161 | Migration edges matching the second dimension of the dispersion matrix 162 | new_true_energy_edges: astropy.units.Quantity[energy] 163 | True energy edges matching the first dimension of the output 164 | new_reco_energy_edges: astropy.units.Quantity[energy] 165 | Reco energy edges matching the second dimension of the output 166 | 167 | Returns 168 | ------- 169 | migration_matrix: numpy.ndarray 170 | Three-dimensional energy migration matrix. The third dimension 171 | equals the fov offset dimension of the energy dispersion matrix. 172 | """ 173 | migration_matrix = np.zeros( 174 | ( 175 | len(new_true_energy_edges) - 1, 176 | len(new_reco_energy_edges) - 1, 177 | dispersion_matrix.shape[2], 178 | ) 179 | ) 180 | 181 | migra_width = np.diff(disp_migration_edges) 182 | probability = dispersion_matrix * migra_width[np.newaxis, :, np.newaxis] 183 | 184 | true_energy_interpolation = resample_histogram1d( 185 | probability, 186 | disp_true_energy_edges, 187 | new_true_energy_edges, 188 | axis=0, 189 | ) 190 | 191 | norm = np.sum(true_energy_interpolation, axis=1, keepdims=True) 192 | norm[norm == 0] = 1 193 | true_energy_interpolation /= norm 194 | 195 | for idx, e_true in enumerate( 196 | (new_true_energy_edges[1:] + new_true_energy_edges[:-1]) / 2 197 | ): 198 | # get migration for the new true energy bin 199 | e_true_dispersion = true_energy_interpolation[idx] 200 | 201 | with warnings.catch_warnings(): 202 | # silence inf/inf division warning 203 | warnings.filterwarnings( 204 | "ignore", "invalid value encountered in true_divide" 205 | ) 206 | interpolation_edges = new_reco_energy_edges / e_true 207 | 208 | y = resample_histogram1d( 209 | e_true_dispersion, 210 | disp_migration_edges, 211 | interpolation_edges, 212 | axis=0, 213 | ) 214 | 215 | migration_matrix[idx, :, :] = y 216 | 217 | return migration_matrix 218 | -------------------------------------------------------------------------------- /pyirf/irf/psf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import astropy.units as u 3 | 4 | from ..utils import cone_solid_angle 5 | 6 | 7 | def psf_table(events, true_energy_bins, source_offset_bins, fov_offset_bins): 8 | """ 9 | Calculate the table based PSF (radially symmetrical bins around the true source) 10 | """ 11 | 12 | array = np.column_stack( 13 | [ 14 | events["true_energy"].to_value(u.TeV), 15 | events["true_source_fov_offset"].to_value(u.deg), 16 | events["theta"].to_value(u.deg), 17 | ] 18 | ) 19 | 20 | hist, _ = np.histogramdd( 21 | array, 22 | [ 23 | true_energy_bins.to_value(u.TeV), 24 | fov_offset_bins.to_value(u.deg), 25 | source_offset_bins.to_value(u.deg), 26 | ], 27 | ) 28 | 29 | psf = _normalize_psf(hist, source_offset_bins) 30 | return psf 31 | 32 | 33 | def _normalize_psf(hist, source_offset_bins): 34 | """Normalize the psf histogram to a probability densitity over solid angle""" 35 | solid_angle = np.diff(cone_solid_angle(source_offset_bins)) 36 | 37 | # ignore numpy zero division warning 38 | with np.errstate(invalid="ignore"): 39 | 40 | # normalize over the theta axis 41 | n_events = hist.sum(axis=2) 42 | # normalize and replace nans with 0 43 | psf = np.nan_to_num(hist / n_events[:, :, np.newaxis]) 44 | 45 | return psf / solid_angle 46 | -------------------------------------------------------------------------------- /pyirf/irf/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cta-observatory/pyirf/fa65d16dae92afdeb7896ac0aea730c2c63b0098/pyirf/irf/tests/__init__.py -------------------------------------------------------------------------------- /pyirf/irf/tests/test_background.py: -------------------------------------------------------------------------------- 1 | import astropy.units as u 2 | from astropy.table import QTable 3 | import numpy as np 4 | 5 | 6 | def test_background(): 7 | from pyirf.irf import background_2d 8 | from pyirf.utils import cone_solid_angle 9 | 10 | np.random.seed(0) 11 | 12 | N1 = 1000 13 | N2 = 100 14 | N = N1 + N2 15 | 16 | # toy event data set with just two energies 17 | # and a psf per energy bin, point-like 18 | events = QTable( 19 | { 20 | "reco_energy": np.append(np.full(N1, 1), np.full(N2, 2)) * u.TeV, 21 | "reco_source_fov_offset": np.zeros(N) * u.deg, 22 | "weight": np.ones(N), 23 | } 24 | ) 25 | 26 | energy_bins = [0, 1.5, 3] * u.TeV 27 | fov_bins = [0, 1] * u.deg 28 | 29 | # We return a table with one row as needed for gadf 30 | bg = background_2d(events, energy_bins, fov_bins, t_obs=1 * u.s) 31 | 32 | # 2 energy bins, 1 fov bin, 200 source distance bins 33 | assert bg.shape == (2, 1) 34 | assert bg.unit == u.Unit("TeV-1 s-1 sr-1") 35 | 36 | # check that psf is normalized 37 | bin_solid_angle = np.diff(cone_solid_angle(fov_bins)) 38 | e_width = np.diff(energy_bins) 39 | assert np.allclose( 40 | np.sum((bg.T * e_width).T * bin_solid_angle, axis=1), [1000, 100] / u.s 41 | ) 42 | 43 | 44 | def test_background_3d_lonlat(): 45 | from pyirf.irf import background_3d_lonlat 46 | from pyirf.utils import rectangle_solid_angle 47 | from pyirf.irf.background import BACKGROUND_UNIT 48 | 49 | reco_energy_bins = [0.1, 1.1, 11.1, 111.1] * u.TeV 50 | fov_lon_bins = [-1.0, 0, 1.0] * u.deg 51 | fov_lat_bins = [-1.0, 0, 1.0] * u.deg 52 | 53 | N_low = 4000 54 | N_high = 40 55 | N_tot = N_low + N_high 56 | 57 | # Fill values 58 | E_low, E_hig = 0.5, 5 59 | Lon_low, Lon_hig = (-0.5, 0.5) * u.deg 60 | Lat_low, Lat_hig = (-0.5, 0.5) * u.deg 61 | 62 | t_obs = 100 * u.s 63 | bin_width_energy = np.diff(reco_energy_bins) 64 | bin_solid_angle = rectangle_solid_angle( 65 | fov_lon_bins[:-1], fov_lon_bins[1:], fov_lat_bins[:-1], fov_lat_bins[1:] 66 | ) 67 | 68 | # Toy events with two energies and four different sky positions 69 | selected_events = QTable( 70 | { 71 | "reco_energy": np.concatenate( 72 | [ 73 | np.full(N_low // 4, E_low), 74 | np.full(N_high // 4, E_hig), 75 | np.full(N_low // 4, E_low), 76 | np.full(N_high // 4, E_hig), 77 | np.full(N_low // 4, E_low), 78 | np.full(N_high // 4, E_hig), 79 | np.full(N_low // 4, E_low), 80 | np.full(N_high // 4, E_hig), 81 | ] 82 | ) 83 | * u.TeV, 84 | "reco_source_fov_lon": np.concatenate( 85 | [ 86 | np.full(N_low // 4, Lon_low), 87 | np.full(N_high // 4, Lon_hig), 88 | np.full(N_low // 4, Lon_low), 89 | np.full(N_high // 4, Lon_hig), 90 | np.full(N_low // 4, Lon_low), 91 | np.full(N_high // 4, Lon_hig), 92 | np.full(N_low // 4, Lon_low), 93 | np.full(N_high // 4, Lon_hig), 94 | ] 95 | ) 96 | * u.deg, 97 | "reco_source_fov_lat": np.append( 98 | np.full(N_tot // 2, Lat_low), np.full(N_tot // 2, Lat_hig) 99 | ) 100 | * u.deg, 101 | "weight": np.full(N_tot, 1.0), 102 | } 103 | ) 104 | 105 | bkg_rate = background_3d_lonlat( 106 | selected_events, 107 | reco_energy_bins=reco_energy_bins, 108 | fov_lon_bins=fov_lon_bins, 109 | fov_lat_bins=fov_lat_bins, 110 | t_obs=t_obs, 111 | ) 112 | assert bkg_rate.shape == ( 113 | len(reco_energy_bins) - 1, 114 | len(fov_lon_bins) - 1, 115 | len(fov_lat_bins) - 1, 116 | ) 117 | assert bkg_rate.unit == BACKGROUND_UNIT 118 | 119 | # Convert to counts, project to energy axis, and check counts round-trip correctly 120 | assert np.allclose( 121 | (bin_solid_angle * bkg_rate * bin_width_energy[:, np.newaxis, np.newaxis]).sum( 122 | axis=(1, 2) 123 | ) 124 | * t_obs, 125 | [N_low, N_high, 0], 126 | ) 127 | # Convert to counts, project to latitude axis, and check counts round-trip correctly 128 | assert np.allclose( 129 | (bin_solid_angle * bkg_rate * bin_width_energy[:, np.newaxis, np.newaxis]).sum( 130 | axis=(0, 1) 131 | ) 132 | * t_obs, 133 | 2 * [N_tot // 2], 134 | ) 135 | -------------------------------------------------------------------------------- /pyirf/irf/tests/test_psf.py: -------------------------------------------------------------------------------- 1 | import astropy.units as u 2 | from astropy.table import QTable 3 | import numpy as np 4 | 5 | 6 | def test_psf(): 7 | from pyirf.irf import psf_table 8 | from pyirf.utils import cone_solid_angle 9 | 10 | np.random.seed(0) 11 | 12 | N = 1000 13 | 14 | TRUE_SIGMA_1 = 0.2 15 | TRUE_SIGMA_2 = 0.1 16 | TRUE_SIGMA = np.append(np.full(N, TRUE_SIGMA_1), np.full(N, TRUE_SIGMA_2)) 17 | 18 | # toy event data set with just two energies 19 | # and a psf per energy bin, point-like 20 | events = QTable( 21 | { 22 | "true_energy": np.append(np.full(N, 1), np.full(N, 2)) * u.TeV, 23 | "true_source_fov_offset": np.zeros(2 * N) * u.deg, 24 | "theta": np.random.normal(0, TRUE_SIGMA) * u.deg, 25 | } 26 | ) 27 | 28 | energy_bins = [0, 1.5, 3] * u.TeV 29 | fov_bins = [0, 1] * u.deg 30 | source_bins = np.linspace(0, 1, 201) * u.deg 31 | 32 | # We return a table with one row as needed for gadf 33 | psf = psf_table(events, energy_bins, source_bins, fov_bins) 34 | 35 | # 2 energy bins, 1 fov bin, 200 source distance bins 36 | assert psf.shape == (2, 1, 200) 37 | assert psf.unit == u.Unit("sr-1") 38 | 39 | # check that psf is normalized 40 | bin_solid_angle = np.diff(cone_solid_angle(source_bins)) 41 | assert np.allclose(np.sum(psf * bin_solid_angle, axis=2), 1.0) 42 | 43 | cumulated = np.cumsum(psf * bin_solid_angle, axis=2) 44 | 45 | # first energy and only fov bin 46 | bin_centers = 0.5 * (source_bins[1:] + source_bins[:-1]) 47 | assert u.isclose( 48 | bin_centers[np.where(cumulated[0, 0, :] >= 0.68)[0][0]], 49 | TRUE_SIGMA_1 * u.deg, 50 | rtol=0.1, 51 | ) 52 | 53 | # second energy and only fov bin 54 | assert u.isclose( 55 | bin_centers[np.where(cumulated[1, 0, :] >= 0.68)[0][0]], 56 | TRUE_SIGMA_2 * u.deg, 57 | rtol=0.1, 58 | ) 59 | -------------------------------------------------------------------------------- /pyirf/resources/dampe_p+he.ecsv: -------------------------------------------------------------------------------- 1 | # %ECSV 0.9 2 | # --- 3 | # datatype: 4 | # - {name: energy, unit: GeV, datatype: float64} 5 | # - {name: flux, unit: 1 / (GeV m2 s sr), datatype: float64} 6 | # meta: !!omap 7 | # - {description: Combined P + He Spectrum measured by DAMPE} 8 | # - {reference: 'https://inspirehep.net/files/62efc8374ffced58ea7e3a333bfa1217'} 9 | # schema: astropy-2.0 10 | energy flux 11 | 5.0 241.37151783447848 12 | 50.5185 0.46837 13 | 78.9163 0.13929 14 | 124.383 0.040600 15 | 199.575 0.011277 16 | 317.38 0.0032219 17 | 495.808 0.00097386 18 | 788.531 0.00028617 19 | 1254.06 8.3702e-05 20 | 1994.62 2.5301e-05 21 | 3116.73 8.3056e-06 22 | 4957.69 2.5627e-06 23 | 7956.55 7.6322e-07 24 | 12321.0 2.4523e-07 25 | 19946.8 6.6955e-08 26 | 100269.0 8.5990e-10 27 | -------------------------------------------------------------------------------- /pyirf/statistics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .compat import COPY_IF_NEEDED 4 | from .utils import is_scalar 5 | 6 | __all__ = ["li_ma_significance"] 7 | 8 | 9 | def li_ma_significance(n_on, n_off, alpha=0.2): 10 | """ 11 | Calculate the Li & Ma significance. 12 | 13 | Formula (17) in https://doi.org/10.1086/161295 14 | 15 | This functions returns 0 significance when n_on < alpha * n_off 16 | instead of the negative sensitivities that would result from naively 17 | evaluating the formula. 18 | 19 | Parameters 20 | ---------- 21 | n_on: integer or array like 22 | Number of events for the on observations 23 | n_off: integer or array like 24 | Number of events for the off observations 25 | alpha: float 26 | Ratio between the on region and the off region size or obstime. 27 | 28 | Returns 29 | ------- 30 | s_lima: float or array 31 | The calculated significance 32 | """ 33 | 34 | scalar = is_scalar(n_on) 35 | 36 | # Cast everything into float64 to avoid numeric instabilties 37 | # when multiplying very small and very big numbers to get t1 and t2 38 | n_on = np.array(n_on, copy=COPY_IF_NEEDED, ndmin=1, dtype=np.float64) 39 | n_off = np.array(n_off, copy=COPY_IF_NEEDED, ndmin=1, dtype=np.float64) 40 | alpha = np.float64(alpha) 41 | 42 | with np.errstate(divide="ignore", invalid="ignore"): 43 | p_on = n_on / (n_on + n_off) 44 | p_off = n_off / (n_on + n_off) 45 | 46 | t1 = n_on * np.log(((1 + alpha) / alpha) * p_on) 47 | t2 = n_off * np.log((1 + alpha) * p_off) 48 | 49 | # lim x+->0 (x log(x)) = 0 50 | t1[n_on == 0] = 0 51 | t2[n_off == 0] = 0 52 | 53 | ts = t1 + t2 54 | 55 | significance = np.sqrt(ts * 2) 56 | 57 | significance[n_on < (alpha * n_off)] = 0 58 | 59 | if scalar: 60 | return significance[0] 61 | 62 | return significance 63 | -------------------------------------------------------------------------------- /pyirf/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cta-observatory/pyirf/fa65d16dae92afdeb7896ac0aea730c2c63b0098/pyirf/tests/__init__.py -------------------------------------------------------------------------------- /pyirf/tests/test_coordinates.py: -------------------------------------------------------------------------------- 1 | from numpy.testing import assert_allclose 2 | import astropy.units as u 3 | 4 | def test_gadf_fov_coords_lon_lat(): 5 | from pyirf.coordinates import gadf_fov_coords_lon_lat 6 | # test some simple cases 7 | lon, lat = gadf_fov_coords_lon_lat(1 * u.deg, 1 * u.deg, 0 * u.deg, 0 * u.deg) 8 | assert_allclose(lon.value, -1) 9 | assert_allclose(lat.value, 1) 10 | 11 | lon, lat = gadf_fov_coords_lon_lat(269 * u.deg, 0 * u.deg, 270 * u.deg, 0 * u.deg) 12 | assert_allclose(lon.value, 1) 13 | assert_allclose(lat.value, 0, atol=1e-7) 14 | 15 | lon, lat = gadf_fov_coords_lon_lat(1 * u.deg, 60 * u.deg, 0 * u.deg, 60 * u.deg) 16 | assert_allclose(lon.value, -0.5, rtol=1e-3) 17 | assert_allclose(lat.value, 0.003779, rtol=1e-3) 18 | 19 | # these are cross-checked with the 20 | # transformation as implemented in H.E.S.S. 21 | az = [51.320575, 50.899125, 52.154053, 48.233023] 22 | alt = [49.505451, 50.030165, 51.811739, 54.700102] 23 | az_pointing = [52.42056255, 52.24706061, 52.06655505, 51.86795724] 24 | alt_pointing = [51.11908203, 51.23454751, 51.35376141, 51.48385814] 25 | lon, lat = gadf_fov_coords_lon_lat( 26 | az * u.deg, alt * u.deg, az_pointing * u.deg, alt_pointing * u.deg 27 | ) 28 | assert_allclose( 29 | lon.value, [0.7145614, 0.86603433, -0.05409698, 2.10295248], rtol=1e-5 30 | ) 31 | assert_allclose( 32 | lat.value, [-1.60829115, -1.19643974, 0.45800984, 3.26844192], rtol=1e-5 33 | ) 34 | 35 | def test_gadf_fov_coords_theta_phi(): 36 | from pyirf.coordinates import gadf_fov_coords_theta_phi 37 | 38 | theta, phi = gadf_fov_coords_theta_phi( 39 | lat=1 * u.deg, lon=0 * u.deg, pointing_lat=0 * u.deg, pointing_lon=0 * u.deg 40 | ) 41 | assert u.isclose(theta, 1 * u.deg) 42 | assert u.isclose(phi, 0 * u.deg) 43 | 44 | theta, phi = gadf_fov_coords_theta_phi( 45 | lat=-1 * u.deg, lon=0 * u.deg, pointing_lat=0 * u.deg, pointing_lon=0 * u.deg 46 | ) 47 | assert u.isclose(theta, 1 * u.deg) 48 | assert u.isclose(phi, 180 * u.deg) 49 | 50 | theta, phi = gadf_fov_coords_theta_phi( 51 | lat=0 * u.deg, lon=-1 * u.deg, pointing_lat=0 * u.deg, pointing_lon=0 * u.deg 52 | ) 53 | assert u.isclose(theta, 1 * u.deg) 54 | assert u.isclose(phi, 90 * u.deg) 55 | 56 | theta, phi = gadf_fov_coords_theta_phi( 57 | lat=0 * u.deg, lon=1 * u.deg, pointing_lat=0 * u.deg, pointing_lon=0 * u.deg 58 | ) 59 | assert u.isclose(theta, 1 * u.deg) 60 | assert u.isclose(phi, 270 * u.deg) -------------------------------------------------------------------------------- /pyirf/tests/test_cuts.py: -------------------------------------------------------------------------------- 1 | import operator 2 | import numpy as np 3 | from astropy.table import QTable 4 | import astropy.units as u 5 | import pytest 6 | from scipy.stats import norm 7 | 8 | 9 | @pytest.fixture 10 | def events(): 11 | return QTable( 12 | { 13 | "bin_reco_energy": [0, 0, 1, 1, 2, 2], 14 | "theta": [0.1, 0.02, 0.3, 0.15, 0.01, 0.1] * u.deg, 15 | "gh_score": [1.0, -0.2, 0.5, 0.05, 1.0, 0.3], 16 | } 17 | ) 18 | 19 | 20 | def test_calculate_percentile_cuts(): 21 | from pyirf.cuts import calculate_percentile_cut 22 | 23 | np.random.seed(0) 24 | 25 | dist1 = norm(0, 1) 26 | dist2 = norm(10, 1) 27 | N = int(1e4) 28 | 29 | values = np.append(dist1.rvs(size=N), dist2.rvs(size=N)) * u.deg 30 | bin_values = np.append(np.zeros(N), np.ones(N)) * u.m 31 | # add some values outside of binning to test that under/overflow are ignored 32 | bin_values[10] = 5 * u.m 33 | bin_values[30] = -1 * u.m 34 | 35 | bins = [-0.5, 0.5, 1.5] * u.m 36 | 37 | cuts = calculate_percentile_cut(values, bin_values, bins, fill_value=np.nan * u.deg) 38 | assert np.all(cuts["low"] == bins[:-1]) 39 | assert np.all(cuts["high"] == bins[1:]) 40 | 41 | assert np.allclose( 42 | cuts["cut"].to_value(u.deg), 43 | [dist1.ppf(0.68), dist2.ppf(0.68)], 44 | rtol=0.1, 45 | ) 46 | 47 | # test with min/max value 48 | cuts = calculate_percentile_cut( 49 | values, 50 | bin_values, 51 | bins, 52 | fill_value=np.nan * u.deg, 53 | min_value=1 * u.deg, 54 | max_value=5 * u.deg, 55 | ) 56 | assert np.all(cuts["cut"] == [1.0, 5.0] * u.deg) 57 | 58 | 59 | def test_calculate_percentile_cuts_no_units(): 60 | from pyirf.cuts import calculate_percentile_cut 61 | 62 | np.random.seed(0) 63 | 64 | dist1 = norm(0, 1) 65 | dist2 = norm(10, 1) 66 | N = int(1e4) 67 | 68 | values = np.append(dist1.rvs(size=N), dist2.rvs(size=N)) 69 | bin_values = np.append(np.zeros(N), np.ones(N)) * u.m 70 | bins = [-0.5, 0.5, 1.5] * u.m 71 | 72 | cuts = calculate_percentile_cut(values, bin_values, bins, fill_value=np.nan) 73 | assert np.all(cuts["low"] == bins[:-1]) 74 | assert np.all(cuts["high"] == bins[1:]) 75 | 76 | assert np.allclose( 77 | cuts["cut"], 78 | [dist1.ppf(0.68), dist2.ppf(0.68)], 79 | rtol=0.1, 80 | ) 81 | 82 | 83 | def test_calculate_percentile_cuts_smoothing(): 84 | from pyirf.cuts import calculate_percentile_cut 85 | 86 | np.random.seed(0) 87 | 88 | dist1 = norm(0, 1) 89 | dist2 = norm(10, 1) 90 | N = int(1e4) 91 | 92 | values = np.append(dist1.rvs(size=N), dist2.rvs(size=N)) 93 | bin_values = np.append(np.zeros(N), np.ones(N)) * u.m 94 | bins = [-0.5, 0.5, 1.5] * u.m 95 | 96 | cuts = calculate_percentile_cut(values, bin_values, bins, fill_value=np.nan, smoothing=1) 97 | assert np.all(cuts["low"] == bins[:-1]) 98 | assert np.all(cuts["high"] == bins[1:]) 99 | 100 | assert np.allclose( 101 | cuts["cut"], 102 | [3.5, 7.5], 103 | rtol=0.2, 104 | ) 105 | 106 | 107 | def test_evaluate_binned_cut(): 108 | from pyirf.cuts import evaluate_binned_cut 109 | 110 | cuts = QTable({"low": [0, 1], "high": [1, 2], "cut": [100, 1000],}) 111 | 112 | survived = evaluate_binned_cut( 113 | np.array([500, 1500, 50, 2000, 25, 800]), 114 | np.array([0.5, 1.5, 0.5, 1.5, 0.5, 1.5]), 115 | cut_table=cuts, 116 | op=operator.ge, 117 | ) 118 | assert np.all(survived == [True, True, False, True, False, False]) 119 | 120 | # test with quantity 121 | cuts = QTable( 122 | {"low": [0, 1] * u.TeV, "high": [1, 2] * u.TeV, "cut": [100, 1000] * u.m,} 123 | ) 124 | 125 | survived = evaluate_binned_cut( 126 | [500, 1500, 50, 2000, 25, 800] * u.m, 127 | [0.5, 1.5, 0.5, 1.5, 0.5, 1.5] * u.TeV, 128 | cut_table=cuts, 129 | op=operator.ge, 130 | ) 131 | assert np.all(survived == [True, True, False, True, False, False]) 132 | 133 | 134 | def test_compare_irf_cuts(): 135 | """Tests compare_irf_cuts.""" 136 | 137 | from pyirf.cuts import compare_irf_cuts 138 | # first create some dummy cuts 139 | enbins = np.logspace(-2, 3) * u.TeV 140 | thcuts1 = np.linspace(0.5, 0.1) * u.deg 141 | thcuts2 = np.linspace(0.6, 0.2) * u.deg 142 | names = ("Energy", "Theta2") 143 | t1 = QTable([enbins, thcuts1], names=names) 144 | t1b = QTable([enbins, thcuts1], names=names) 145 | t2 = QTable([enbins, thcuts2], names=names) 146 | # comparing identical cuts ==> should return True 147 | assert compare_irf_cuts([t1, t1b]) 148 | 149 | # different cuts ==> should return False 150 | assert compare_irf_cuts([t1, t2]) is False 151 | 152 | 153 | def test_calculate_percentile_cuts_table(): 154 | '''Test that calculate percentile cuts does not modify input table''' 155 | from pyirf.cuts import calculate_percentile_cut 156 | 157 | np.random.seed(0) 158 | 159 | dist1 = norm(0, 1) 160 | dist2 = norm(10, 1) 161 | N = int(1e4) 162 | 163 | table = QTable({ 164 | "foo": np.append(dist1.rvs(size=N), dist2.rvs(size=N)) * u.deg, 165 | "bar": np.append(np.zeros(N), np.ones(N)) * u.m, 166 | }) 167 | 168 | bins = [-0.5, 0.5, 1.5] * u.m 169 | cuts = calculate_percentile_cut( 170 | table["foo"], 171 | table["bar"], 172 | bins, 173 | fill_value=np.nan * u.rad, 174 | ) 175 | assert table.colnames == ["foo", "bar"] 176 | assert np.allclose( 177 | cuts["cut"].to_value(u.deg), 178 | [dist1.ppf(0.68), dist2.ppf(0.68)], 179 | rtol=0.1, 180 | ) 181 | -------------------------------------------------------------------------------- /pyirf/tests/test_gammapy_interop.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import astropy.units as u 3 | import numpy as np 4 | 5 | pytest.importorskip('gammapy') 6 | 7 | 8 | true_energy_bins = [0.1, 1, 10, 100] * u.TeV 9 | fov_offset_bins = [0, 1, 2] * u.deg 10 | source_offset_bins = np.linspace(0, 1, 20) * u.deg 11 | migration_bins = np.geomspace(0.2, 5, 10) 12 | 13 | 14 | def test_effective_area_table_2d(): 15 | from pyirf.gammapy import create_effective_area_table_2d 16 | 17 | shape = (len(true_energy_bins) - 1, len(fov_offset_bins) - 1) 18 | aeff = np.random.uniform(0, 1e5, size=shape) * u.m**2 19 | 20 | aeff_gammapy = create_effective_area_table_2d(aeff, true_energy_bins, fov_offset_bins) 21 | # test str repr works 22 | 23 | str(aeff_gammapy) 24 | 25 | 26 | def test_psf_3d(): 27 | from pyirf.gammapy import create_psf_3d 28 | 29 | shape = (len(true_energy_bins) - 1, len(fov_offset_bins) - 1, len(source_offset_bins) - 1) 30 | psf = np.zeros(shape) / u.sr 31 | psf3d = create_psf_3d(psf, true_energy_bins, source_offset_bins, fov_offset_bins) 32 | str(psf3d) 33 | 34 | 35 | def test_energy_dispersion(): 36 | from pyirf.gammapy import create_energy_dispersion_2d 37 | 38 | shape = (len(true_energy_bins) - 1, len(migration_bins) - 1, len(fov_offset_bins) - 1) 39 | edisp = np.zeros(shape) 40 | edisp2d = create_energy_dispersion_2d(edisp, true_energy_bins, migration_bins, fov_offset_bins) 41 | str(edisp2d) 42 | -------------------------------------------------------------------------------- /pyirf/tests/test_optimize_cuts.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from astropy.table import QTable 3 | import astropy.units as u 4 | 5 | 6 | def test_optimize_gh_cuts(): 7 | from pyirf.cuts import calculate_percentile_cut 8 | from pyirf.cut_optimization import optimize_gh_cut 9 | 10 | rng = np.random.default_rng(0) 11 | 12 | n_signal = 1000 13 | n_background = 10000 14 | 15 | signal = QTable({ 16 | "reco_energy": rng.uniform(1.0, 10.0, n_signal) * u.TeV, 17 | "theta": rng.uniform(0.0, 0.5, n_signal) * u.deg, 18 | "gh_score": np.clip(rng.normal(0.7, 0.4, n_signal), 0, 1), 19 | }) 20 | 21 | background = QTable({ 22 | "reco_energy": rng.uniform(1.0, 10.0, n_background) * u.TeV, 23 | "theta": rng.uniform(0.0, 0.5, n_background) * u.deg, 24 | "gh_score": np.clip(rng.normal(0.2, 0.3, n_background), 0, 1), 25 | "reco_source_fov_offset": rng.uniform(0, 1, n_background) * u.deg, 26 | }) 27 | 28 | 29 | e_reco_bins = np.linspace(1.0, 10.0, 5) * u.TeV 30 | theta_cuts = calculate_percentile_cut(signal["theta"], signal["reco_energy"], e_reco_bins, fill_value=1 * u.deg) 31 | 32 | optimize_gh_cut(signal, background, e_reco_bins, [0.5, 0.8, 0.9], theta_cuts) 33 | 34 | 35 | def test_optimize_cuts(): 36 | from pyirf.cut_optimization import optimize_cuts 37 | 38 | rng = np.random.default_rng(0) 39 | 40 | n_signal = 3000 41 | n_background = 10000 42 | 43 | signal = QTable({ 44 | "reco_energy": rng.uniform(1.0, 10.0, n_signal) * u.TeV, 45 | "theta": rng.uniform(0.0, 0.5, n_signal) * u.deg, 46 | "gh_score": np.clip(rng.normal(0.7, 0.4, n_signal), 0, 1), 47 | "multiplicity": rng.integers(2, 15, n_signal), 48 | "weight": rng.uniform(0.5, 2, n_signal), 49 | }) 50 | 51 | background = QTable({ 52 | "reco_energy": rng.uniform(1.0, 10.0, n_background) * u.TeV, 53 | "theta": rng.uniform(0.0, 0.5, n_background) * u.deg, 54 | "gh_score": np.clip(rng.normal(0.2, 0.3, n_background), 0, 1), 55 | "reco_source_fov_offset": rng.uniform(0, 1, n_background) * u.deg, 56 | "multiplicity": rng.integers(2, 15, n_background), 57 | "weight": rng.uniform(0.5, 2, n_background), 58 | }) 59 | 60 | 61 | e_reco_bins = np.linspace(1.0, 10.0, 5) * u.TeV 62 | 63 | optimize_cuts( 64 | signal, 65 | background, 66 | e_reco_bins, 67 | multiplicity_cuts=[2, 3, 4, 5], 68 | gh_cut_efficiencies=[0.5, 0.8, 0.9], 69 | theta_cut_efficiencies=[0.68, 0.75], 70 | alpha=0.2, 71 | ) 72 | -------------------------------------------------------------------------------- /pyirf/tests/test_sensitivity.py: -------------------------------------------------------------------------------- 1 | from astropy.table import QTable 2 | import numpy as np 3 | import astropy.units as u 4 | 5 | 6 | def test_relative_sensitivity(): 7 | from pyirf.sensitivity import relative_sensitivity 8 | 9 | # some general case 10 | n_on = 100 11 | n_off = 200 12 | alpha = 0.2 13 | assert 0.1 < relative_sensitivity(n_on, n_off, alpha) < 1.0 14 | 15 | # numbers yield lima = 5 relatively precisely, so sensitivity should be 1 16 | assert np.isclose(relative_sensitivity(81, 202, 0.2), 1, rtol=0.01) 17 | 18 | # test different target significance 19 | # numbers yield lima = 8 relatively precisely, so sensitivity should be 1 20 | result = relative_sensitivity(93, 150, 0.2, min_significance=8) 21 | assert np.isclose(result, 1, rtol=0.01) 22 | 23 | # no signal => inf 24 | assert np.isinf(relative_sensitivity(10, 100, 0.2)) 25 | 26 | # no background, should work 27 | assert relative_sensitivity(10, 0, 0.2) > 0 28 | 29 | # test vectorization 30 | 31 | n_on = np.array([10, 20, 30]) 32 | n_off = np.array([10, 100]) 33 | s = relative_sensitivity(n_on, n_off[:, np.newaxis], alpha=0.2) 34 | assert s.shape == (2, 3) 35 | 36 | 37 | def test_calculate_sensitivity(): 38 | from pyirf.sensitivity import calculate_sensitivity 39 | from pyirf.binning import bin_center 40 | bins = [0.1, 1.0, 10] * u.TeV 41 | signal_hist = QTable({ 42 | 'reco_energy_low': bins[:-1], 43 | 'reco_energy_high': bins[1:], 44 | 'reco_energy_center': bin_center(bins), 45 | 'n': [100, 100], 46 | 'n_weighted': [100, 100], 47 | }) 48 | bg_hist = signal_hist.copy() 49 | bg_hist['n'] = 20 50 | bg_hist['n_weighted'] = 50 51 | 52 | sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2) 53 | 54 | # sensitivity smaller than 1 55 | signal_hist['n_weighted'] = [10, 10] 56 | sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2) 57 | 58 | assert len(sensitivity) == len(signal_hist) 59 | np.testing.assert_almost_equal(5.0, sensitivity['significance']) 60 | np.testing.assert_array_less(1.0, sensitivity['relative_sensitivity']) 61 | 62 | # not above 5 percent of remaining background 63 | signal_hist['n_weighted'] = 699 64 | bg_hist['n_weighted'] = 70_000 65 | sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2) 66 | assert len(sensitivity) == len(signal_hist) 67 | # we scale up the signal until we meet the requirement, so 68 | # we must have more than 5 sigma 69 | assert np.all(sensitivity['significance'] >= 5) 70 | 71 | # less then 10 events 72 | signal_hist['n_weighted'] = [9, 9] 73 | bg_hist['n_weighted'] = [1, 1] 74 | sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2) 75 | # we scale up the signal until we meet the requirement, so 76 | # we must have more than 5 sigma 77 | assert np.all(sensitivity['significance'] >= 5) 78 | 79 | # No background at all, li&ma not applicable => event count requirement 80 | signal_hist['n_weighted'] = [5, 5] 81 | bg_hist['n_weighted'] = [0, 0] 82 | sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2) 83 | np.testing.assert_equal(sensitivity['relative_sensitivity'], 2) 84 | 85 | 86 | def test_estimate_background(): 87 | from pyirf.sensitivity import estimate_background 88 | N = 1000 89 | events = QTable({ 90 | 'reco_source_fov_offset': np.append(np.full(N, 0.5), np.full(N, 1.5)) * u.deg, 91 | 'reco_energy': np.tile([5, 50], N) * u.TeV, 92 | 'weight': np.tile([1, 2], N), 93 | }) 94 | reco_energy_bins = [1, 10, 100] * u.TeV 95 | theta_cuts = QTable({ 96 | 'low': [1, 10] * u.TeV, 97 | 'high': [10, 100] * u.TeV, 98 | 'center': [5.5, 55] * u.TeV, 99 | 'cut': (np.arccos([0.9998, 0.9999]) * u.rad).to(u.deg), 100 | }) 101 | fov_offset_max = np.arccos(0.999) * u.rad 102 | 103 | bg = estimate_background( 104 | events, 105 | reco_energy_bins, 106 | theta_cuts, 107 | alpha=0.2, 108 | fov_offset_min=0 * u.deg, 109 | fov_offset_max=fov_offset_max, 110 | ) 111 | 112 | assert np.allclose(bg['n'], [1000, 500]) 113 | assert np.allclose(bg['n_weighted'], [1000, 1000]) 114 | -------------------------------------------------------------------------------- /pyirf/tests/test_spectral.py: -------------------------------------------------------------------------------- 1 | import astropy.units as u 2 | import pytest 3 | import numpy as np 4 | 5 | 6 | def test_table_interpolation(): 7 | 8 | from pyirf.spectral import TableInterpolationSpectrum 9 | 10 | # log log 11 | energy = [1, 10, 100] * u.TeV 12 | flux = [50, 5, 0.5] / (u.GeV * u.m**2 * u.sr * u.s) 13 | 14 | spectrum = TableInterpolationSpectrum(energy, flux) 15 | assert u.isclose(spectrum(5 * u.TeV), 10 / (u.GeV * u.m**2 * u.sr * u.s)) 16 | 17 | 18 | # lin lin 19 | energy = [1, 2, 3] * u.TeV 20 | flux = [10, 8, 6] / (u.GeV * u.m**2 * u.sr * u.s) 21 | 22 | spectrum = TableInterpolationSpectrum(energy, flux, log_energy=False, log_flux=False) 23 | assert u.isclose(spectrum(1.5 * u.TeV), 9 / (u.GeV * u.m**2 * u.sr * u.s)) 24 | 25 | 26 | 27 | def test_powerlaw_integrate_cone_invalid(): 28 | from pyirf.spectral import PowerLaw, POINT_SOURCE_FLUX_UNIT 29 | 30 | point_source = PowerLaw( 31 | normalization=1 * POINT_SOURCE_FLUX_UNIT, 32 | index=-2, 33 | e_ref=500 * u.GeV, 34 | ) 35 | with pytest.raises(ValueError): 36 | point_source.integrate_cone(0 * u.deg, 2 * u.deg) 37 | 38 | 39 | @pytest.mark.parametrize( 40 | "outer,expected", 41 | zip((90 * u.deg, 180 * u.deg), (2 * np.pi * u.sr, 4 * np.pi * u.sr)) 42 | ) 43 | def test_powerlaw_integrate_cone(outer, expected): 44 | from pyirf.spectral import PowerLaw, POINT_SOURCE_FLUX_UNIT, DIFFUSE_FLUX_UNIT 45 | diffuse_flux = PowerLaw( 46 | normalization=1 * DIFFUSE_FLUX_UNIT, 47 | index=-2, 48 | e_ref=500 * u.GeV, 49 | ) 50 | 51 | 52 | integrated = diffuse_flux.integrate_cone(0 * u.rad, outer) 53 | assert integrated.normalization.unit.is_equivalent(POINT_SOURCE_FLUX_UNIT) 54 | assert u.isclose(integrated.normalization, diffuse_flux.normalization * expected) 55 | assert integrated.index == diffuse_flux.index 56 | assert integrated.e_ref == diffuse_flux.e_ref 57 | 58 | 59 | def test_powerlaw(): 60 | from pyirf.spectral import PowerLaw 61 | 62 | with pytest.raises(TypeError): 63 | PowerLaw(normalization=1e-10, index=-2) 64 | 65 | with pytest.raises(u.UnitsError): 66 | PowerLaw(normalization=1e-10 / u.TeV, index=-2) 67 | 68 | with pytest.raises(ValueError): 69 | PowerLaw(normalization=1e-10 / u.TeV / u.m**2 / u.s, index=2) 70 | 71 | # check we get a reasonable unit out of astropy independent of input unit 72 | unit = u.TeV**-1 * u.m**-2 * u.s**-1 73 | power_law = PowerLaw(1e-10 * unit, -2.65) 74 | assert power_law(1 * u.TeV).unit == unit 75 | assert power_law(1 * u.GeV).unit == unit 76 | 77 | 78 | def test_powerlaw_from_simulations(): 79 | from pyirf.simulations import SimulatedEventsInfo 80 | from pyirf.spectral import PowerLaw 81 | 82 | # calculate sensitivity between 1 and 2 degrees offset from fov center 83 | obstime = 50 * u.hour 84 | 85 | simulated_events = SimulatedEventsInfo( 86 | n_showers=int(1e6), 87 | energy_min=10 * u.GeV, 88 | energy_max=100 * u.TeV, 89 | max_impact=1 * u.km, 90 | spectral_index=-2, 91 | viewcone_min=0 * u.deg, 92 | viewcone_max=0 * u.deg, 93 | ) 94 | 95 | powerlaw = PowerLaw.from_simulation(simulated_events, obstime=obstime) 96 | assert powerlaw.index == -2 97 | # regression test, maybe better come up with an easy to analytically verify parameter combination? 98 | assert u.isclose(powerlaw.normalization, 1.76856511e-08 / (u.TeV * u.m**2 * u.s)) 99 | 100 | 101 | simulated_events = SimulatedEventsInfo( 102 | n_showers=int(1e6), 103 | energy_min=10 * u.GeV, 104 | energy_max=100 * u.TeV, 105 | max_impact=1 * u.km, 106 | spectral_index=-2, 107 | viewcone_min=5 * u.deg, 108 | viewcone_max=10 * u.deg, 109 | ) 110 | 111 | powerlaw = PowerLaw.from_simulation(simulated_events, obstime=obstime) 112 | assert powerlaw.index == -2 113 | # regression test, maybe better come up with an easy to analytically verify parameter combination? 114 | assert u.isclose(powerlaw.normalization, 2.471917427911683e-07 / (u.TeV * u.m**2 * u.s * u.sr)) 115 | -------------------------------------------------------------------------------- /pyirf/tests/test_statistics.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | 4 | 5 | def test_lima(): 6 | from pyirf.statistics import li_ma_significance 7 | 8 | assert li_ma_significance(10, 2, 0.2) > 5 9 | assert li_ma_significance(10, 0, 0.2) > 5 10 | assert li_ma_significance(1, 6, 0.2) == 0 11 | 12 | 13 | def test_lima_gammapy(): 14 | pytest.importorskip("gammapy") 15 | from gammapy.stats import WStatCountsStatistic 16 | from pyirf.statistics import li_ma_significance 17 | 18 | n_ons = [100, 50, 10] 19 | n_offs = [10, 20, 30] 20 | alphas = [2, 1, 0.2] 21 | for n_on, n_off, alpha in zip(n_ons, n_offs, alphas): 22 | sig_gammapy = WStatCountsStatistic(n_on, n_off, alpha).sqrt_ts 23 | assert np.isclose(li_ma_significance(n_on, n_off, alpha), sig_gammapy) 24 | 25 | 26 | def test_lima_accuracy(): 27 | from pyirf.statistics import li_ma_significance 28 | 29 | noff = 1e7 30 | nexcess = 1e4 31 | 32 | res_f64 = li_ma_significance( 33 | np.float64(noff + nexcess), np.float64(noff / 0.2), 0.2 34 | ) 35 | res_f32 = li_ma_significance( 36 | np.float32(noff + nexcess), np.float32(noff / 0.2), 0.2 37 | ) 38 | 39 | assert np.isclose(res_f64, res_f32) 40 | -------------------------------------------------------------------------------- /pyirf/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import astropy.units as u 3 | from astropy.table import QTable 4 | import pytest 5 | 6 | 7 | def test_is_scalar(): 8 | from pyirf.utils import is_scalar 9 | 10 | assert is_scalar(1.0) 11 | assert is_scalar(5 * u.m) 12 | assert is_scalar(np.array(5)) 13 | 14 | assert not is_scalar([1, 2, 3]) 15 | assert not is_scalar([1, 2, 3] * u.m) 16 | assert not is_scalar(np.ones(5)) 17 | assert not is_scalar(np.ones((3, 4))) 18 | 19 | 20 | def test_cone_solid_angle(): 21 | from pyirf.utils import cone_solid_angle 22 | 23 | # whole sphere 24 | assert u.isclose(cone_solid_angle(np.pi * u.rad), 4 * np.pi * u.sr) 25 | 26 | # half the sphere 27 | assert u.isclose(cone_solid_angle(90 * u.deg), 2 * np.pi * u.sr) 28 | 29 | # zero 30 | assert u.isclose(cone_solid_angle(0 * u.deg), 0 * u.sr) 31 | 32 | 33 | def test_rectangle_solid_angle(): 34 | from pyirf.utils import rectangle_solid_angle 35 | 36 | # whole sphere 37 | assert u.isclose( 38 | rectangle_solid_angle(0 * u.deg, 360 * u.deg, -90 * u.deg, 90 * u.deg), 39 | 4 * np.pi * u.sr, 40 | ) 41 | 42 | # half the sphere 43 | assert u.isclose( 44 | rectangle_solid_angle(0 * u.deg, 180 * u.deg, -90 * u.deg, 90 * u.deg), 45 | 2 * np.pi * u.sr, 46 | ) 47 | 48 | # zero 49 | assert u.isclose( 50 | rectangle_solid_angle(0 * u.deg, 0 * u.deg, 0* u.deg, 0 * u.deg), 51 | 0 * u.sr, 52 | ) 53 | 54 | 55 | def test_check_table(): 56 | from pyirf.exceptions import MissingColumns, WrongColumnUnit 57 | from pyirf.utils import check_table 58 | 59 | t = QTable({'bar': [0, 1, 2] * u.TeV}) 60 | 61 | with pytest.raises(MissingColumns): 62 | check_table(t, required_columns=['foo']) 63 | 64 | t = QTable({'bar': [0, 1, 2] * u.TeV}) 65 | with pytest.raises(WrongColumnUnit): 66 | check_table(t, required_units={'bar': u.m}) 67 | 68 | t = QTable({'bar': [0, 1, 2] * u.m}) 69 | with pytest.raises(MissingColumns): 70 | check_table(t, required_units={'foo': u.m}) 71 | 72 | # m is convertible 73 | check_table(t, required_units={'bar': u.cm}) 74 | -------------------------------------------------------------------------------- /pyirf/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import astropy.units as u 3 | from astropy.coordinates import angular_separation 4 | from .coordinates import gadf_fov_coords_theta_phi, gadf_fov_coords_lon_lat 5 | 6 | from .compat import COPY_IF_NEEDED 7 | from .exceptions import MissingColumns, WrongColumnUnit 8 | 9 | 10 | __all__ = [ 11 | "is_scalar", 12 | "calculate_theta", 13 | "calculate_source_fov_offset", 14 | "calculate_source_fov_position_angle", 15 | "check_histograms", 16 | "cone_solid_angle", 17 | ] 18 | 19 | 20 | def is_scalar(val): 21 | """Workaround that also supports astropy quantities 22 | 23 | Parameters 24 | ---------- 25 | val : object 26 | Any object (value, list, etc...) 27 | 28 | Returns 29 | ------- 30 | result: bool 31 | True is if input object is a scalar, False otherwise. 32 | """ 33 | result = np.array(val, copy=COPY_IF_NEEDED).shape == tuple() 34 | return result 35 | 36 | 37 | @u.quantity_input(assumed_source_az=u.deg, assumed_source_alt=u.deg) 38 | def calculate_theta(events, assumed_source_az, assumed_source_alt): 39 | """Calculate sky separation between assumed and reconstructed positions. 40 | 41 | Parameters 42 | ---------- 43 | events : astropy.QTable 44 | Astropy Table object containing the reconstructed events information. 45 | assumed_source_az: astropy.units.Quantity 46 | Assumed Azimuth angle of the source. 47 | assumed_source_alt: astropy.units.Quantity 48 | Assumed Altitude angle of the source. 49 | 50 | Returns 51 | ------- 52 | theta: astropy.units.Quantity 53 | Angular separation between the assumed and reconstructed positions 54 | in the sky. 55 | """ 56 | theta = angular_separation( 57 | assumed_source_az, 58 | assumed_source_alt, 59 | events["reco_az"], 60 | events["reco_alt"], 61 | ) 62 | 63 | return theta.to(u.deg) 64 | 65 | 66 | def calculate_source_fov_offset(events, prefix="true"): 67 | """Calculate angular separation between true and pointing positions. 68 | 69 | Parameters 70 | ---------- 71 | events : astropy.QTable 72 | Astropy Table object containing the reconstructed events information. 73 | 74 | prefix: str 75 | Column prefix for az / alt, can be used to calculate reco or true 76 | source fov offset. 77 | 78 | Returns 79 | ------- 80 | theta: astropy.units.Quantity 81 | Angular separation between the true and pointing positions 82 | in the sky. 83 | """ 84 | theta = angular_separation( 85 | events[f"{prefix}_az"], 86 | events[f"{prefix}_alt"], 87 | events["pointing_az"], 88 | events["pointing_alt"], 89 | ) 90 | 91 | return theta.to(u.deg) 92 | 93 | 94 | def calculate_source_fov_position_angle(events, prefix="true"): 95 | """Calculate position_angle of true positions relative to pointing positions. 96 | 97 | Parameters 98 | ---------- 99 | events : astropy.QTable 100 | Astropy Table object containing the reconstructed events information. 101 | 102 | prefix: str 103 | Column prefix for az / alt, can be used to calculate reco or true 104 | source fov position_angle. 105 | 106 | Returns 107 | ------- 108 | phi: astropy.units.Quantity 109 | Position angle of the true positions relative to the pointing positions 110 | in the sky. 111 | """ 112 | _, phi = gadf_fov_coords_theta_phi( 113 | events[f"{prefix}_az"], 114 | events[f"{prefix}_alt"], 115 | events["pointing_az"], 116 | events["pointing_alt"], 117 | ) 118 | 119 | return phi.to(u.deg) 120 | 121 | 122 | def calculate_source_fov_lonlat(events, prefix="true"): 123 | """Calculate position_angle of true positions relative to pointing positions. 124 | 125 | Parameters 126 | ---------- 127 | events : astropy.QTable 128 | Astropy Table object containing the reconstructed events information. 129 | 130 | prefix: str 131 | Column prefix for az / alt, can be used to calculate reco or true 132 | source fov position_angle. 133 | 134 | Returns 135 | ------- 136 | phi: astropy.units.Quantity 137 | Position angle of the true positions relative to the pointing positions 138 | in the sky. 139 | """ 140 | lon, lat = gadf_fov_coords_lon_lat( 141 | events[f"{prefix}_az"], 142 | events[f"{prefix}_alt"], 143 | events["pointing_az"], 144 | events["pointing_alt"], 145 | ) 146 | 147 | return lon.to(u.deg), lat.to(u.deg) 148 | 149 | 150 | def check_histograms(hist1, hist2, key="reco_energy"): 151 | """ 152 | Check if two histogram tables have the same binning 153 | 154 | Parameters 155 | ---------- 156 | hist1: ``~astropy.table.Table`` 157 | First histogram table, as created by 158 | ``~pyirf.binning.create_histogram_table`` 159 | hist2: ``~astropy.table.Table`` 160 | Second histogram table 161 | """ 162 | 163 | # check binning information and add to output 164 | for k in ("low", "center", "high"): 165 | k = key + "_" + k 166 | if not np.all(hist1[k] == hist2[k]): 167 | raise ValueError( 168 | "Binning for signal_hist and background_hist must be equal" 169 | ) 170 | 171 | 172 | def cone_solid_angle(angle): 173 | """Calculate the solid angle of a view cone. 174 | 175 | Parameters 176 | ---------- 177 | angle: astropy.units.Quantity or astropy.coordinates.Angle 178 | Opening angle of the view cone. 179 | 180 | Returns 181 | ------- 182 | solid_angle: astropy.units.Quantity 183 | Solid angle of a view cone with opening angle ``angle``. 184 | 185 | """ 186 | solid_angle = 2 * np.pi * (1 - np.cos(angle)) * u.sr 187 | return solid_angle 188 | 189 | 190 | def rectangle_solid_angle(lon_low, lon_high, lat_low, lat_high): 191 | """Calculate the solid angle of a latitude-longitude rectangle 192 | 193 | Parameters 194 | ---------- 195 | lon_low: astropy.units.Quantity[angle] 196 | Lower longitude coordinate of the rectangle corner 197 | lat_low: astropy.units.Quantity[angle] 198 | Lower latitude coordinate of the rectangle corner 199 | lon_high: astropy.units.Quantity[angle] 200 | Higher longitude coordinate of the rectangle corner 201 | lat_high: astropy.units.Quantity[angle] 202 | Higher Latitude coordinates of the rectangle corner 203 | 204 | Returns 205 | ------- 206 | solid angle: astropy.units.Quantity[solid angle] 207 | 208 | """ 209 | diff_lon = (lon_high - lon_low).to_value(u.rad) 210 | diff_lat = np.sin(lat_high.to_value(u.rad)) - np.sin(lat_low.to_value(u.rad)) 211 | 212 | solid_angle = diff_lon * diff_lat * u.sr 213 | return solid_angle 214 | 215 | 216 | def check_table(table, required_columns=None, required_units=None): 217 | """Check a table for required columns and units. 218 | 219 | Parameters 220 | ---------- 221 | table: astropy.table.QTable 222 | Table to check 223 | required_columns: iterable[str] 224 | Column names that are required to be present 225 | required_units: Mapping[str->astropy.units.Unit] 226 | Required units for columns as a Mapping from column names to units. 227 | Checks if the units are convertible, not if they are identical 228 | 229 | Raises 230 | ------ 231 | MissingColumns: If any of the columns specified in ``required_columns`` or 232 | as keys in ``required_units are`` not present in the table. 233 | WrongColumnUnit: if any column has the wrong unit 234 | """ 235 | if required_columns is not None: 236 | missing = set(required_columns) - set(table.colnames) 237 | if missing: 238 | raise MissingColumns(missing) 239 | 240 | if required_units is not None: 241 | for col, expected in required_units.items(): 242 | if col not in table.colnames: 243 | raise MissingColumns(col) 244 | 245 | unit = table[col].unit 246 | if not expected.is_equivalent(unit): 247 | raise WrongColumnUnit(col, unit, expected) 248 | -------------------------------------------------------------------------------- /pyirf/version.py: -------------------------------------------------------------------------------- 1 | # this is adapted from https://github.com/astropy/astropy/blob/master/astropy/version.py 2 | # see https://github.com/astropy/astropy/pull/10774 for a discussion on why this needed. 3 | 4 | try: 5 | try: 6 | from ._dev_version import version 7 | except ImportError: 8 | from ._version import version 9 | except Exception: 10 | import warnings 11 | 12 | warnings.warn( 13 | "Could not determine pyirf version; this indicates a broken installation." 14 | " Install pyirf from PyPI or from a local git repository." 15 | " Installing github's autogenerated source release tarballs " 16 | " does not include version information and should be avoided." 17 | ) 18 | del warnings 19 | version = "0.0.0" 20 | 21 | __version__ = version 22 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=60", "setuptools-scm[toml]>=8.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "pyirf" 7 | description = "A python library to calculate IACT IRFs and Sensitivities" 8 | readme = "README.rst" 9 | requires-python = ">=3.10" 10 | license = "MIT" 11 | authors = [ 12 | { name = "CTA Consortium, Analysis and Simulation Working Group", email = "maximilian.linhoff@tu-dortmund.de" } 13 | ] 14 | dynamic = [ 15 | "version", 16 | ] 17 | classifiers = [ 18 | "Development Status :: 4 - Beta", 19 | "Intended Audience :: Science/Research", 20 | "Topic :: Scientific/Engineering :: Astronomy", 21 | "Topic :: Scientific/Engineering :: Physics", 22 | "Programming Language :: Python :: 3 :: Only", 23 | "Programming Language :: Python :: 3.10", 24 | "Programming Language :: Python :: 3.11", 25 | "Programming Language :: Python :: 3.12", 26 | "Programming Language :: Python :: 3.13", 27 | ] 28 | 29 | dependencies = [ 30 | "astropy>=5.3,<8.0.0a0", 31 | "numpy>=1.21", 32 | "scipy", 33 | "tqdm", 34 | "packaging", 35 | ] 36 | 37 | [project.urls] 38 | "Homepage" = "https://github.com/cta-observatory/pyirf" 39 | "Bug Tracker" = "https://github.com/cta-observatory/pyirf/issues" 40 | "Documentation" = "https://cta-observatory.github.io/pyirf" 41 | "Source Code" = "https://github.com/cta-observatory/pyirf" 42 | 43 | [project.optional-dependencies] 44 | gammapy = [ 45 | "gammapy ~=1.0" 46 | ] 47 | docs = [ 48 | "sphinx", 49 | "sphinx_rtd_theme", 50 | "sphinx_automodapi", 51 | "numpydoc", 52 | "nbsphinx", 53 | "notebook", 54 | "tables", 55 | "towncrier", 56 | "pyirf[gammapy]", 57 | ] 58 | tests = [ 59 | "pytest", 60 | "pytest-cov", 61 | "ogadf-schema ~=0.2.3", 62 | "pyirf[gammapy]", 63 | ] 64 | dev = [ 65 | "setuptools_scm", 66 | ] 67 | all = [ 68 | "pyirf[tests,docs,dev]", 69 | ] 70 | 71 | [tool.setuptools] 72 | include-package-data = true 73 | 74 | [tool.setuptools.packages.find] 75 | exclude = ["pyirf._dev_version", "docs*", "examples*"] 76 | 77 | [tool.setuptools_scm] 78 | write_to = "pyirf/_version.py" 79 | 80 | [tool.towncrier] 81 | package = "pyirf" 82 | directory = "docs/changes" 83 | filename = "CHANGES.rst" 84 | template = "docs/changes/template.rst" 85 | # let towncrier create proper links to the merged PR 86 | issue_format = "`#{issue} `__" 87 | 88 | [tool.towncrier.fragment.feature] 89 | name = "New Features" 90 | showcontent = true 91 | 92 | [tool.towncrier.fragment.bugfix] 93 | name = "Bug Fixes" 94 | showcontent = true 95 | 96 | [tool.towncrier.fragment.api] 97 | name = "API Changes" 98 | showcontent = true 99 | 100 | [tool.towncrier.fragment.optimization] 101 | name = "Refactoring and Optimization" 102 | showcontent = true 103 | 104 | [tool.towncrier.fragment.maintenance] 105 | name = "Maintenance" 106 | showcontent = true 107 | 108 | [[tool.towncrier.section]] 109 | name = "" 110 | path = "" 111 | 112 | 113 | [tool.pytest.ini_options] 114 | filterwarnings = [ 115 | "ignore:pkg_resources is deprecated:DeprecationWarning", 116 | "ignore:Deprecated call to `pkg_resources:DeprecationWarning", 117 | ] 118 | --------------------------------------------------------------------------------