├── .coveragerc ├── .flake8 ├── .github ├── dependabot.yml ├── fftw-env.yaml └── workflows │ ├── auto-merge-deps.yml │ ├── deploy.yaml │ ├── run-docs-code.yaml │ ├── test-with-warnings.yaml │ └── testsuite.yaml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── CHANGELOG.rst ├── CONTRIBUTING.rst ├── CONTRIBUTORS.rst ├── LICENSE.rst ├── README.rst ├── devel ├── hankel_transform_of_cosmo_power.ipynb └── testing_angular_averaging.ipynb ├── docs ├── Makefile ├── api.rst ├── api │ ├── dft.rst │ ├── powerbox.rst │ └── tools.rst ├── authors.rst ├── changelog.rst ├── conf.py ├── contributing.rst ├── demos │ ├── algorithm.ipynb │ ├── cosmological_fields.ipynb │ ├── dft.ipynb │ ├── getting_started.ipynb │ └── mpi.ipynb ├── examples.rst ├── index.rst ├── license.rst └── templates │ ├── class.rst │ └── modules.rst ├── paper ├── Makefile ├── joss-logo.png ├── latex.template ├── paper.aux ├── paper.bcf ├── paper.bib ├── paper.log ├── paper.md ├── paper.out ├── paper.pdf ├── paper.run.xml ├── paper.tex └── paper.zip ├── pyproject.toml ├── src └── powerbox │ ├── __init__.py │ ├── dft.py │ ├── dft_backend.py │ ├── powerbox.py │ └── tools.py └── tests ├── test_direct.py ├── test_discrete.py ├── test_fft.py ├── test_lognormal.py ├── test_power.py ├── test_stats.py └── test_tools.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = */tests/* 3 | 4 | [report] 5 | omit = */tests/* 6 | exclude_lines = 7 | pragma: no cover 8 | @abstract 9 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = 3 | E203 4 | E266 5 | E501 6 | W503 7 | F403 8 | F401 9 | # Imperative mood -- doesn't work for cached-property. 10 | D401 11 | # TODO: remove this (function name should be lower-case) 12 | N802 13 | # no using lru_cache on methods 14 | B019 15 | # logging uses f-string 16 | G004 17 | # no docstring in __init__ 18 | D107 19 | # no upper-case variables (we have lots...) 20 | N806 21 | # no upper case argument names 22 | N803 23 | max-line-length = 88 24 | max-complexity = 19 25 | rst-roles = 26 | class 27 | func 28 | mod 29 | data 30 | const 31 | meth 32 | attr 33 | exc 34 | obj 35 | rst-directives = 36 | note 37 | warning 38 | versionadded 39 | versionchanged 40 | deprecated 41 | seealso 42 | per-file-ignores = 43 | tests/*:D, T201 44 | setup.py:D 45 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: monthly 7 | - package-ecosystem: pip 8 | directory: "/.github/workflows" 9 | schedule: 10 | interval: monthly 11 | - package-ecosystem: pip 12 | directory: "/docs" 13 | schedule: 14 | interval: monthly 15 | - package-ecosystem: pip 16 | directory: "/" 17 | schedule: 18 | interval: monthly 19 | versioning-strategy: lockfile-only 20 | allow: 21 | - dependency-type: "all" 22 | -------------------------------------------------------------------------------- /.github/fftw-env.yaml: -------------------------------------------------------------------------------- 1 | name: withfftw 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - pyfftw 7 | - pip: 8 | - methodtools 9 | -------------------------------------------------------------------------------- /.github/workflows/auto-merge-deps.yml: -------------------------------------------------------------------------------- 1 | name: auto-merge 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | auto-merge: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - uses: ahmadnassri/action-dependabot-auto-merge@v2 12 | with: 13 | target: minor 14 | github-token: ${{ secrets.AUTO_MERGE }} 15 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Python distributions to PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build-n-publish: 10 | name: Build and publish to PyPI 11 | runs-on: ubuntu-latest 12 | env: 13 | ENV_NAME: publish 14 | PYTHON: "3.11" 15 | steps: 16 | - uses: actions/checkout@main 17 | with: 18 | fetch-depth: 0 19 | 20 | - name: Set up Python 21 | uses: actions/setup-python@v5 22 | with: 23 | python-version: ${{ env.PYTHON }} 24 | 25 | - name: Install build 26 | run: pip install build 27 | 28 | - name: Build a binary wheel and a source tarball 29 | run: | 30 | python -m build 31 | 32 | - name: Publish to PyPI 33 | if: startsWith(github.event.ref, 'refs/tags') 34 | uses: pypa/gh-action-pypi-publish@release/v1 35 | with: 36 | user: __token__ 37 | password: ${{ secrets.pypi_password }} 38 | -------------------------------------------------------------------------------- /.github/workflows/run-docs-code.yaml: -------------------------------------------------------------------------------- 1 | name: Run Demo 2 | on: [pull_request] 3 | 4 | 5 | jobs: 6 | tests: 7 | name: Run Demo 8 | runs-on: ubuntu-latest 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | demo: ["cosmological_fields", "dft", "getting_started"] 13 | steps: 14 | - uses: actions/checkout@master 15 | with: 16 | fetch-depth: 1 17 | 18 | - name: Set up Python 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: "3.11" 22 | 23 | - name: Install Deps 24 | run: | 25 | pip install .[dev] 26 | pip install papermill ipykernel 27 | 28 | - name: Install ipykernel 29 | run: python -m ipykernel install --user --name docs --display-name "docs" 30 | 31 | - name: Run Notebook 32 | run: | 33 | papermill -k docs docs/demos/${{ matrix.demo }}.ipynb output-${{ matrix.demo }}.ipynb 34 | 35 | - uses: actions/upload-artifact@v4 36 | with: 37 | name: ${{ matrix.demo }} 38 | path: output-${{ matrix.demo }}.ipynb 39 | -------------------------------------------------------------------------------- /.github/workflows/test-with-warnings.yaml: -------------------------------------------------------------------------------- 1 | name: Warnings Tests 2 | on: [push] 3 | 4 | 5 | jobs: 6 | tests: 7 | name: With Warnings 8 | runs-on: ${{ matrix.os }} 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | os: [ubuntu-latest, macos-latest, windows-latest] 13 | python: ["3.11"] 14 | steps: 15 | - uses: actions/checkout@master 16 | with: 17 | fetch-depth: 1 18 | 19 | - uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python }} 22 | 23 | - name: Install Test Deps 24 | run: | 25 | pip install .[tests] 26 | 27 | - name: Run Tests 28 | run: | 29 | python -m pytest -W error 30 | 31 | - uses: codecov/codecov-action@v5 32 | if: success() 33 | with: 34 | files: ./coverage.xml #optional 35 | token: ${{ secrets.CODECOV_TOKEN }} #required 36 | -------------------------------------------------------------------------------- /.github/workflows/testsuite.yaml: -------------------------------------------------------------------------------- 1 | name: Test Suite 2 | on: [push, pull_request] 3 | 4 | 5 | jobs: 6 | tests: 7 | name: Test Suite 8 | runs-on: ${{ matrix.os }} 9 | 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | os: [ubuntu-latest, macos-latest, windows-latest] 14 | python: ["3.9", "3.10", "3.11", "3.12"] 15 | steps: 16 | - uses: actions/checkout@master 17 | with: 18 | fetch-depth: 1 19 | 20 | - uses: conda-incubator/setup-miniconda@v3 21 | with: 22 | python-version: ${{ matrix.python }} 23 | channels: conda-forge,defaults 24 | channel-priority: true 25 | activate-environment: withfftw 26 | environment-file: .github/fftw-env.yaml 27 | 28 | - name: Install Test Deps 29 | shell: bash -el {0} 30 | run: | 31 | which pip 32 | python --version 33 | pip install .[tests,fftw] 34 | 35 | - name: Run Tests 36 | shell: bash -el {0} 37 | run: | 38 | python -m pytest --cov=powerbox --cov-config=.coveragerc --cov-report xml:./coverage.xml --junitxml=test-reports/xunit.xml 39 | 40 | - name: Upload Test Coverage 41 | uses: codecov/codecov-action@v5 42 | with: 43 | token: ${{ secrets.CODECOV_TOKEN }} 44 | files: ./coverage.xml 45 | flags: unittests 46 | name: codecov-umbrella 47 | fail_ci_if_error: true 48 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | dist/* 3 | .idea* 4 | *.egg-info 5 | *.pyc 6 | *checkpoint.ipynb 7 | docs/_* 8 | \.pytest_cache/v/cache/ 9 | .tox/* 10 | \.coverage 11 | _version.py 12 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | line_length=88 3 | indent=' ' 4 | skip=.tox,.venv,build,dist 5 | known_standard_library=setuptools,pkg_resources 6 | known_test=pytest 7 | known_first_party=21cmSense 8 | sections=FUTURE,STDLIB,TEST,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 9 | default_section=THIRDPARTY 10 | multi_line_output=3 11 | profile=black 12 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: '^docs/conf.py' 2 | 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v5.0.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: check-added-large-files 9 | - id: check-ast 10 | - id: check-json 11 | - id: check-merge-conflict 12 | - id: check-xml 13 | - id: debug-statements 14 | - id: end-of-file-fixer 15 | - id: requirements-txt-fixer 16 | - id: mixed-line-ending 17 | args: ['--fix=no'] 18 | 19 | - repo: https://github.com/PyCQA/flake8 20 | rev: 7.1.1 21 | hooks: 22 | - id: flake8 23 | additional_dependencies: 24 | - flake8-builtins 25 | # - flake8-eradicate # flake8 6 incompatible 26 | - pep8-naming 27 | - flake8-pytest 28 | - flake8-docstrings 29 | - flake8-rst-docstrings 30 | - flake8-rst 31 | # - flake8-copyright # flake8 6 incompatible 32 | - flake8-markdown 33 | - flake8-bugbear 34 | # - flake8-comprehensions # flake8 6 incompatible 35 | - flake8-print 36 | 37 | 38 | - repo: https://github.com/psf/black-pre-commit-mirror 39 | rev: 25.1.0 40 | hooks: 41 | - id: black 42 | 43 | - repo: https://github.com/PyCQA/isort 44 | rev: 6.0.0 45 | hooks: 46 | - id: isort 47 | 48 | - repo: https://github.com/pre-commit/pygrep-hooks 49 | rev: v1.10.0 50 | hooks: 51 | - id: rst-backticks 52 | 53 | - repo: https://github.com/asottile/pyupgrade 54 | rev: v3.19.1 55 | hooks: 56 | - id: pyupgrade 57 | args: [--py38-plus] 58 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/conf.py 11 | 12 | # Optionally build your docs in additional formats such as PDF and ePub 13 | formats: all 14 | 15 | 16 | build: 17 | os: "ubuntu-22.04" 18 | tools: 19 | python: "mambaforge-4.10" 20 | jobs: 21 | pre_build: 22 | - echo `pandoc --version` 23 | - conda list 24 | 25 | conda: 26 | environment: docs/environment.yml 27 | 28 | # Optionally set the version of Python and requirements required to build your docs 29 | python: 30 | install: 31 | - method: pip 32 | path: . 33 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | 0.6.1 5 | ----- 6 | **Bugfixes** 7 | 8 | - Fix error when doing cross-power [Issue #5]. 9 | 10 | 0.6.0 11 | ----- 12 | **Features** 13 | 14 | - New ``left_edge`` argument in fft/ifft which gives the ability to specify where 15 | the x- (or k-) co-ordinates are, in order to set appropriate phase information. 16 | NOTE: this changes the default behaviour of the function. While the forward and 17 | inverse transforms remain inverses by default, the phases are interpreted as 18 | having zero at the centre (for both transforms). See the phasing tutorial for 19 | more information. 20 | - Fixed transpose issue caused by default behavior of ``numpy.meshgrid``, which 21 | led to broken correspondence between discrete sample of field and original 22 | field. See [Issue #15]. 23 | 24 | **Bugfixes** 25 | 26 | - Make warning about pyFFTW slightly less obnoxious. 27 | 28 | 29 | v0.5.7 [24 Oct 2018] 30 | -------------------- 31 | **Enhancements** 32 | 33 | - Added ability to use weights on k-modes in ``get_power``. 34 | 35 | **Bugfixes** 36 | 37 | - Fixed bug on using ``ignore_zero_mode`` introduced in v0.5.6 38 | - Added tests for ``ignore_zero_mode``` and ``k_weights`` 39 | 40 | v0.5.6 [23 Oct 2018] 41 | -------------------- 42 | **Enhancements** 43 | 44 | - Added ``ignore_zero_mode`` parameter to ``get_power``. 45 | 46 | **Bugfixes** 47 | 48 | - Removed redundant ``seed`` parameter from ``create_discrete_sample()``. 49 | 50 | v0.5.5 [19 July 2018] 51 | --------------------- 52 | **Bugfixes** 53 | 54 | - log_bins wasn't being passed through to angular_average correctly. 55 | 56 | **Enhancements** 57 | 58 | - ``angular_average()`` no longer requires coords to be passed as box of magnitudes. 59 | - improved docs. 60 | - fixed source divide by zero warning in PowerBox() 61 | 62 | v0.5.4 [30 May 2018] 63 | -------------------- 64 | **Enhancements** 65 | 66 | - Added ability to do angular averaging in log-space bins 67 | - When not all radial bins have co-ordinates in them, a more reasonable warning message is emitted. 68 | - Removed redundant bincount call when only summing, not averaging (angularly). 69 | 70 | **Bugfixes** 71 | 72 | - Now properly deals with co-ordinates outside the bin range in angular_average (will only make a difference when bins 73 | is passed as a vector). Note that this has meant that by default the highest-valued co-ordinate in the box will *not* 74 | contribute to any bins any more. 75 | - Fixed a bunch of tests in test_power which were using the wrong power index! 76 | 77 | **Internals** 78 | 79 | - Re-factored getting radial bins into _getbins() function. 80 | 81 | v0.5.3 [22 May 2018] 82 | -------------------- 83 | **Bugfixes** 84 | 85 | - Fixed a bug introduced in v0.5.1 where using bin_ave=False in angular_average_nd would fail. 86 | 87 | v0.5.2 [17 May 2018] 88 | -------------------- 89 | **Enhancements** 90 | 91 | - Added ability to calculate the variance of an angularly averaged quantity. 92 | - Removed a redundant calculation of the bin weights in angular_average 93 | 94 | **Internals** 95 | 96 | - Updated version numbers of dev requirements. 97 | 98 | v0.5.1 [4 May 2018] 99 | ------------------- 100 | **Enhancements** 101 | 102 | - Added ability to *not* have dimensionless power spectra from get_power. 103 | - Also return linearly-spaced radial bin edges from angular_average_nd 104 | - Python 3 compatibility 105 | 106 | **Bugfixes** 107 | 108 | - Fixed bug where field was modified in-place unexpectedly in angular_average 109 | - Now correctly flattens weights before getting the field average in angular_average_nd 110 | 111 | v0.5.0 [7 Nov 2017] 112 | ------------------~ 113 | **Features** 114 | 115 | - Input boxes to get_power no longer need to have same length on every dimension. 116 | - New angular_average_nd function to average over first n dimensions of an array. 117 | 118 | **Enhancements** 119 | 120 | - Huge (5x or so) speed-up for angular_average function (with resulting speedup for get_power). 121 | - Huge memory reduction in fft/ifft routines, with potential loss of some speed (TODO: optimise) 122 | - Better memory consumption in PowerBox classes, at the expense of an API change (cached properties no 123 | longer cached, or properties). 124 | - Modified fftshift in dft to handle astropy Quantity objects (bit of a hack really) 125 | 126 | **Bugfixes** 127 | 128 | - Fixed issue where if the boxlength was passed as an integer (to fft/ifft), then incorrect results occurred. 129 | - Fixed issue where incorrect first_edge assignment in get_power resulted in bad power spectrum. No longer require this arg. 130 | 131 | v0.4.3 [29 March 2017] 132 | ---------------------- 133 | **Bugfixes** 134 | 135 | - Fixed volume normalisation in get_power. 136 | 137 | v0.4.2 [28 March 2017] 138 | ---------------------- 139 | **Features** 140 | 141 | - Added ability to cross-correlate boxes in get_power. 142 | 143 | v0.4.1 144 | ------ 145 | **Bugfixes** 146 | 147 | - Fixed cubegrid return value for dft functions when input boxes have different sizes on each dimension. 148 | 149 | 150 | v0.4.0 151 | ------ 152 | **Features** 153 | 154 | - Added fft/ifft wrappers which consistently return fourier transforms with arbitrary Fourier conventions. 155 | - Boxes now may be composed with arbitrary Fourier conventions. 156 | - Documentation! 157 | 158 | **Enhancements** 159 | 160 | - New test to compare LogNormalPowerBox with standard PowerBox. 161 | - New project structure to make for easier location of functions. 162 | - Code quality improvements 163 | - New tests, better coverage. 164 | 165 | **Bugfixes** 166 | 167 | - Fixed incorrect boxsize for an odd number of cells 168 | - Ensure mean density is correct in LogNormalPowerBox 169 | 170 | v0.3.2 171 | ------ 172 | **Bugfixes** 173 | 174 | - Fixed bug in pyFFTW cache setting 175 | 176 | v0.3.1 177 | ------ 178 | **Enhancements** 179 | 180 | - New interface with pyFFTW to make fourier transforms ~twice as fast. No difference to the API. 181 | 182 | v0.3.0 183 | ------ 184 | **Features** 185 | 186 | - New functionality in ``get_power`` function to measure power-spectra of discrete samples. 187 | 188 | **Enhancements** 189 | 190 | - Added option to not store discrete positions in class (just return them) 191 | - ``get_power`` now more streamlined and intuitive in its API 192 | 193 | v0.2.3 [11 Jan 2017] 194 | -------------------- 195 | **Enhancements** 196 | 197 | - Improved estimation of power (in ``get_power``) for lowest k bin. 198 | 199 | v0.2.2 [11 Jan 2017] 200 | -------------------- 201 | **Bugfixes** 202 | 203 | - Fixed a bug in which the output power spectrum was a factor of sqrt(2) off in normalisation 204 | 205 | v0.2.1 [10 Jan 2017] 206 | -------------------- 207 | **Bugfixes** 208 | 209 | - Fixed output of ``create_discrete_sample`` when not randomising positions. 210 | 211 | **Enhancements** 212 | 213 | - New option to set bounds of discrete particles to (0, boxlength) rather than centring at 0. 214 | 215 | v0.2.0 [10 Jan 2017] 216 | -------------------- 217 | **Features** 218 | 219 | - New ``LogNormalPowerBox`` class for creating log-normal fields 220 | 221 | **Enhancements** 222 | 223 | - Restructuring of code for more flexibility after creation. Now requires ``cached_property`` package. 224 | 225 | v0.1.0 [27 Oct 2016] 226 | -------------------- 227 | First working version. Only Gaussian fields working. 228 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Contributing 2 | ------------ 3 | 4 | Contributions are welcome, and they are greatly appreciated! Every 5 | little bit helps, and credit will always be given. 6 | 7 | Bug reports 8 | ~~~~~~~~~~~ 9 | 10 | When `reporting a bug `_ please include: 11 | 12 | * Your operating system name and version. 13 | * Any details about your local setup that might be helpful in troubleshooting. 14 | * Detailed steps to reproduce the bug. 15 | 16 | Documentation improvements 17 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 | 19 | powerbox could always use more documentation, whether as part of the 20 | official powerbox docs, in docstrings, or even on the web in blog posts, 21 | articles, and such. 22 | 23 | Feature requests and feedback 24 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | 26 | The best way to send feedback is to file an issue at https://github.com/steven-murray/powerbox/issues. 27 | 28 | If you are proposing a feature: 29 | 30 | * Explain in detail how it would work. 31 | * Keep the scope as narrow as possible, to make it easier to implement. 32 | * Remember that this is a volunteer-driven project, and that code contributions are welcome :) 33 | 34 | Development 35 | ~~~~~~~~~~~ 36 | 37 | To set up ``powerbox`` for local development: 38 | 39 | 1. Fork `powerbox `_ 40 | (look for the "Fork" button). 41 | 2. Clone your fork locally:: 42 | 43 | git clone git@github.com:your_name_here/powerbox.git 44 | 45 | 3. Create a branch for local development:: 46 | 47 | git checkout -b name-of-your-bugfix-or-feature 48 | 49 | Now you can make your changes locally. 50 | 51 | 4. When you're done making changes, run all the checks, doc builder and spell checker with `tox `_ one command:: 52 | 53 | tox 54 | 55 | 5. Commit your changes and push your branch to GitHub:: 56 | 57 | git add . 58 | git commit -m "Your detailed description of your changes." 59 | git push origin name-of-your-bugfix-or-feature 60 | 61 | 6. Submit a pull request through the GitHub website. 62 | 63 | Pull Request Guidelines 64 | +++++++++++++++++++++++ 65 | 66 | If you need some code review or feedback while you're developing the code just make the pull request. 67 | 68 | For merging, you should: 69 | 70 | 1. Include passing tests (run ``tox``) [1]_. 71 | 2. Update documentation when there's new API, functionality etc. 72 | 3. Add a note to ``CHANGELOG.rst`` about the changes. 73 | 4. Add yourself to ``CONTRIBUTORS.rst``. 74 | 75 | .. [1] If you don't have all the necessary python versions available locally you can rely on Travis - it will 76 | `run the tests `_ for each change you add in the pull request. 77 | 78 | It will be slower though ... 79 | 80 | Tips 81 | ++++ 82 | 83 | To run a subset of tests:: 84 | 85 | tox -e envname -- py.test -k test_myfeature 86 | 87 | To run all the test environments in *parallel* (you need to ``pip install detox``):: 88 | 89 | detox 90 | -------------------------------------------------------------------------------- /CONTRIBUTORS.rst: -------------------------------------------------------------------------------- 1 | Authors 2 | ------- 3 | 4 | * `Steven Murray `_ 5 | 6 | Comments, corrections and suggestions 7 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 | * `Chris Jordan `_ 9 | * `Jordan Mirocha `_ 10 | -------------------------------------------------------------------------------- /LICENSE.rst: -------------------------------------------------------------------------------- 1 | License 2 | ------- 3 | 4 | Copyright (c) 2016 Steven Murray 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | powerbox 3 | ======== 4 | .. image:: https://img.shields.io/pypi/v/powerbox.svg 5 | :target: https://pypi.python.org/pypi/powerbox 6 | .. image:: https://travis-ci.org/steven-murray/powerbox.svg?branch=master 7 | :target: https://travis-ci.org/steven-murray/powerbox 8 | .. image:: https://coveralls.io/repos/github/steven-murray/powerbox/badge.svg?branch=master 9 | :target: https://coveralls.io/github/steven-murray/powerbox?branch=master 10 | .. image:: https://api.codacy.com/project/badge/Grade/5853411c78444a5a9c6ec4058c6dbda9 11 | :target: https://www.codacy.com/app/steven-murray/powerbox?utm_source=github.com&utm_medium=referral&utm_content=steven-murray/powerbox&utm_campaign=Badge_Grade 12 | .. image:: https://zenodo.org/badge/72076717.svg 13 | :target: https://zenodo.org/badge/latestdoi/72076717 14 | .. image:: http://joss.theoj.org/papers/10.21105/joss.00850/status.svg 15 | :target: https://doi.org/10.21105/joss.00850 16 | 17 | **Make arbitrarily structured, arbitrary-dimension boxes and log-normal mocks.** 18 | 19 | ``powerbox`` is a pure-python code for creating density grids (or boxes) that have an 20 | arbitrary two-point distribution (i.e. power spectrum). Primary motivations for creating 21 | the code were the simple creation of log-normal mock galaxy distributions, but the 22 | methodology can be used for other applications. 23 | 24 | Features 25 | -------- 26 | * Works in any number of dimensions. 27 | * Really simple. 28 | * Arbitrary isotropic power-spectra. 29 | * Create Gaussian or Log-Normal fields 30 | * Create discrete samples following the field, assuming it describes an over-density. 31 | * Measure power spectra of output fields to ensure consistency. 32 | * Seamlessly uses pyFFTW if available for ~double the speed. 33 | 34 | Installation 35 | ------------ 36 | Simply ``pip install powerbox``. If you want ~2x speedup for large boxes, you can also 37 | install ``pyfftw`` by doing ``pip install powerbox[all]``. If you are a conda user, you 38 | may want to install ``numpy`` with conda first. If you want to develop ``powerbox``, 39 | clone the repo and install with ``python -m pip install -e ".[dev]"``. 40 | 41 | Acknowledgment 42 | -------------- 43 | If you find ``powerbox`` useful in your research, please cite the Journal of Open Source Software paper at 44 | https://doi.org/10.21105/joss.00850. 45 | 46 | QuickLinks 47 | ---------- 48 | * Docs: https://powerbox.readthedocs.io 49 | * Quickstart: http://powerbox.readthedocs.io/en/latest/demos/getting_started.html 50 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = powerbox 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Summary 2 | =========== 3 | .. toctree:: 4 | api/powerbox 5 | api/dft 6 | api/tools 7 | -------------------------------------------------------------------------------- /docs/api/dft.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: powerbox.dft 2 | :no-inheritance-diagram: 3 | -------------------------------------------------------------------------------- /docs/api/powerbox.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: powerbox.powerbox 2 | :no-inheritance-diagram: 3 | -------------------------------------------------------------------------------- /docs/api/tools.rst: -------------------------------------------------------------------------------- 1 | .. automodapi:: powerbox.tools 2 | :no-inheritance-diagram: 3 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTORS.rst 2 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGELOG.rst 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # powerbox documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Feb 23 07:59:57 2017. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | import os 20 | import sys 21 | sys.path.insert(0, os.path.abspath('../')) 22 | 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | # 28 | # needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = ['sphinx.ext.autodoc', 34 | 'sphinx.ext.doctest', 35 | 'sphinx.ext.intersphinx', 36 | 'sphinx.ext.coverage', 37 | 'sphinx.ext.mathjax', 38 | 'sphinx.ext.viewcode', 39 | 'numpydoc', 40 | 'sphinx.ext.autosummary', 41 | 'nbsphinx', 42 | 'IPython.sphinxext.ipython_console_highlighting', 43 | 'sphinx_automodapi.automodapi' 44 | ] 45 | 46 | automodapi_toctreedirnm = "_api" 47 | automodsumm_inherited_members=True 48 | 49 | #autosummary_generate = True 50 | numpydoc_show_class_members=False 51 | 52 | # Add any paths that contain templates here, relative to this directory. 53 | #templates_path = ['templates'] 54 | 55 | #autodoc_mock_imports = ['numpy'] 56 | #imported_members = False 57 | 58 | # The suffix(es) of source filenames. 59 | # You can specify multiple suffix as a list of string: 60 | # 61 | # source_suffix = ['.rst', '.md'] 62 | source_suffix = '.rst' 63 | 64 | # The master toctree document. 65 | master_doc = 'index' 66 | 67 | # General information about the project. 68 | project = 'powerbox' 69 | copyright = '2017, Steven Murray' 70 | author = 'Steven Murray' 71 | 72 | # The version info for the project you're documenting, acts as replacement for 73 | # |version| and |release|, also used in various other places throughout the 74 | # built documents. 75 | # 76 | import re, io 77 | def read(*names, **kwargs): 78 | with io.open( 79 | os.path.join(os.path.dirname(__file__), *names), 80 | encoding=kwargs.get("encoding", "utf8") 81 | ) as fp: 82 | return fp.read() 83 | 84 | 85 | def find_version(*file_paths): 86 | version_file = read(*file_paths) 87 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 88 | version_file, re.M) 89 | if version_match: 90 | return version_match.group(1) 91 | raise RuntimeError("Unable to find version string.") 92 | 93 | # The short X.Y version. 94 | version = find_version("..","powerbox", "__init__.py") 95 | # The full version, including alpha/beta/rc tags. 96 | release = find_version("..","powerbox", "__init__.py") 97 | 98 | # The language for content autogenerated by Sphinx. Refer to documentation 99 | # for a list of supported languages. 100 | # 101 | # This is also used if you do content translation via gettext catalogs. 102 | # Usually you set "language" from the command line for these cases. 103 | language = None 104 | 105 | # List of patterns, relative to source directory, that match files and 106 | # directories to ignore when looking for source files. 107 | # This patterns also effect to html_static_path and html_extra_path 108 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store','templates','**.ipynb_checkpoints'] 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # If true, `todo` and `todoList` produce output, else they produce nothing. 114 | todo_include_todos = False 115 | 116 | 117 | # -- Options for HTML output ---------------------------------------------- 118 | 119 | # The theme to use for HTML and HTML Help pages. See the documentation for 120 | # a list of builtin themes. 121 | # 122 | html_theme = 'sphinx_rtd_theme' 123 | 124 | # Theme options are theme-specific and customize the look and feel of a theme 125 | # further. For a list of options available for each theme, see the 126 | # documentation. 127 | # 128 | # html_theme_options = {} 129 | 130 | # Add any paths that contain custom static files (such as style sheets) here, 131 | # relative to this directory. They are copied after the builtin static files, 132 | # so a file named "default.css" will overwrite the builtin "default.css". 133 | html_static_path = ['_static'] 134 | 135 | 136 | # -- Options for HTMLHelp output ------------------------------------------ 137 | 138 | # Output file base name for HTML help builder. 139 | htmlhelp_basename = 'powerboxdoc' 140 | 141 | 142 | # -- Options for LaTeX output --------------------------------------------- 143 | 144 | latex_elements = { 145 | # The paper size ('letterpaper' or 'a4paper'). 146 | # 147 | # 'papersize': 'letterpaper', 148 | 149 | # The font size ('10pt', '11pt' or '12pt'). 150 | # 151 | # 'pointsize': '10pt', 152 | 153 | # Additional stuff for the LaTeX preamble. 154 | # 155 | # 'preamble': '', 156 | 157 | # Latex figure (float) alignment 158 | # 159 | # 'figure_align': 'htbp', 160 | } 161 | 162 | # Grouping the document tree into LaTeX files. List of tuples 163 | # (source start file, target name, title, 164 | # author, documentclass [howto, manual, or own class]). 165 | latex_documents = [ 166 | (master_doc, 'powerbox.tex', 'powerbox Documentation', 167 | 'Steven Murray', 'manual'), 168 | ] 169 | 170 | 171 | # -- Options for manual page output --------------------------------------- 172 | 173 | # One entry per manual page. List of tuples 174 | # (source start file, name, description, authors, manual section). 175 | man_pages = [ 176 | (master_doc, 'powerbox', 'powerbox Documentation', 177 | [author], 1) 178 | ] 179 | 180 | 181 | # -- Options for Texinfo output ------------------------------------------- 182 | 183 | # Grouping the document tree into Texinfo files. List of tuples 184 | # (source start file, target name, title, author, 185 | # dir menu entry, description, category) 186 | texinfo_documents = [ 187 | (master_doc, 'powerbox', 'powerbox Documentation', 188 | author, 'powerbox', 'One line description of project.', 189 | 'Miscellaneous'), 190 | ] 191 | 192 | 193 | 194 | 195 | # Example configuration for intersphinx: refer to the Python standard library. 196 | intersphinx_mapping = {'https://docs.python.org/': None} 197 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/demos/algorithm.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# How Does Powerbox Work?" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "It may be useful to understand the workings of powerbox to some extent -- either to diagnose performance issues or to understand its behaviour in certain contexts.\n", 15 | "\n", 16 | "The basic algorithm (for a Gaussian field) is the following:\n", 17 | "\n", 18 | "1. Given a box length $L$ (parameter ``boxlength``) and number of cells along a side, $N$ (parameter ``N``), as well as Fourier convention parameters $(a,b)$, determine wavenumbers along a side of the box: $k = 2\\pi j/(bL)$, for $j\\in (-N/2,..., N/2)$.\n", 19 | "2. From these wavenumbers along each side, determine the *magnitude* of the wavenumbers at every point of the $d$-dimensional box, $k_j= \\sqrt{\\sum_{i=1}^d k_{i,j}^2}$.\n", 20 | "3. Create an array, $G_j$, which assigns a complex number to each grid point. The complex number will have magnitude drawn from a standard normal, and phase distributed evenly on $(0,2\\pi)$.\n", 21 | "4. Determine $\\delta_{k,j} = G_j \\sqrt{P(k_j)}$.\n", 22 | "5. Determine $\\delta_x = V \\mathcal{F}^{-1}(\\delta_k)$, with $V = \\prod_{i=1}^{d} L_i$." 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "For a Log-Normal field, the steps are slightly more complex, and involve determining the power spectrum that *would be* required on a Gaussian field to yield the same power spectrum for a log-normal field. The details of this approach can be found in [Coles and Jones (1991)](http://adsabs.harvard.edu/abs/1991MNRAS.248....1C) or [Beutler et al. (2011)](https://academic.oup.com/mnras/article/416/4/3017/976636)." 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "One characteristic of this algorithm is that it contains *no information* below the resolution scale $L/N$. Thus, a good rule-of-thumb is to choose $N$ large enough to ensure that the smallest scale of interest is covered by a factor of 1.5, i.e., if the smallest length-scale of interest is $s$, then use $N = 1.5 L/s$.\n", 37 | "\n", 38 | "The range of $k$ used with this choice of $N$ also depends on the Fourier Convention used. For the default convention of $b=1$, the smallest scales are equivalent to $k = \\pi N/L$." 39 | ] 40 | } 41 | ], 42 | "metadata": { 43 | "kernelspec": { 44 | "display_name": "Python [conda env:powerbox]", 45 | "language": "python", 46 | "name": "conda-env-powerbox-py" 47 | }, 48 | "language_info": { 49 | "codemirror_mode": { 50 | "name": "ipython", 51 | "version": 3 52 | }, 53 | "file_extension": ".py", 54 | "mimetype": "text/x-python", 55 | "name": "python", 56 | "nbconvert_exporter": "python", 57 | "pygments_lexer": "ipython3", 58 | "version": "3.6.5" 59 | }, 60 | "latex_envs": { 61 | "LaTeX_envs_menu_present": true, 62 | "autoclose": false, 63 | "autocomplete": true, 64 | "bibliofile": "biblio.bib", 65 | "cite_by": "apalike", 66 | "current_citInitial": 1, 67 | "eqLabelWithNumbers": true, 68 | "eqNumInitial": 1, 69 | "hotkeys": { 70 | "equation": "Ctrl-E", 71 | "itemize": "Ctrl-I" 72 | }, 73 | "labels_anchors": false, 74 | "latex_user_defs": false, 75 | "report_style_numbering": false, 76 | "user_envs_cfg": false 77 | }, 78 | "toc": { 79 | "base_numbering": 1, 80 | "nav_menu": {}, 81 | "number_sections": true, 82 | "sideBar": true, 83 | "skip_h1_title": false, 84 | "title_cell": "Table of Contents", 85 | "title_sidebar": "Contents", 86 | "toc_cell": false, 87 | "toc_position": {}, 88 | "toc_section_display": true, 89 | "toc_window_display": false 90 | } 91 | }, 92 | "nbformat": 4, 93 | "nbformat_minor": 2 94 | } 95 | -------------------------------------------------------------------------------- /docs/demos/mpi.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Multiprocessing with Powerbox" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "There are two ways to parallelize the FFT calculations in `powerbox`. If you have `pyfftw` installed, you can take advantage of the multithreaded FFT computations that it offers (numpy does not support this), simply by setting `nthreads` to a number greater than one. However, if you would like to run many FFT's simultaneously, you may wish to parallelize on a higher level, i.e. run each FFT on a different process (using `multiprocessing` or `mpi` or similar). In this case, it is important that the underlying FFT library use only a single thread, or you will get **VERY SLOW** computation times because the two layers of threads don't communicate well. In this notebook, we show how to use both multiple threads via `pyfftw` and also multiple processes with either the `numpy` or `pyfftw` backends." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": {}, 21 | "outputs": [ 22 | { 23 | "data": { 24 | "text/plain": [ 25 | "'0.7.4.dev19+g811310f'" 26 | ] 27 | }, 28 | "execution_count": 1, 29 | "metadata": {}, 30 | "output_type": "execute_result" 31 | } 32 | ], 33 | "source": [ 34 | "import powerbox as pb\n", 35 | "pb.__version__" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 2, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "from powerbox import get_power\n", 45 | "import numpy as np \n", 46 | "from time import time\n", 47 | "from multiprocessing import Pool\n", 48 | "from functools import partial" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "First, let's define a simple `powerbox` operation to test. This function calculates a power spectrum on a random box of dimension $300^3$ and returns the computation time." 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 3, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "shape = (256,) * 3 # Size of the box to FT\n", 65 | "arr = np.random.rand(np.prod(shape)).reshape(shape) # Random box on which to calculate the FFT\n", 66 | "ncalls = 4\n", 67 | "nthreads = 4\n", 68 | "\n", 69 | "def run_pb(idx, **kwargs):\n", 70 | " t0 = time()\n", 71 | " # default is nthreads = None which uses nthreads = number of available CPUs.\n", 72 | " get_power(arr, shape, bins = 50, **kwargs) \n", 73 | " return time() - t0" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "### Single-thread using `pyFFTW`" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 5, 86 | "metadata": {}, 87 | "outputs": [ 88 | { 89 | "name": "stdout", 90 | "output_type": "stream", 91 | "text": [ 92 | "Total wall time: 12.31 sec\n", 93 | "Total CPU time: 12.31 sec\n" 94 | ] 95 | } 96 | ], 97 | "source": [ 98 | "start = time()\n", 99 | "all_times = [run_pb(i, nthreads=1) for i in range(ncalls)]\n", 100 | "end = time()\n", 101 | "print(f'Total wall time: {end - start:.2f} sec')\n", 102 | "print(f\"Total CPU time: {np.sum(all_times):.2f} sec\")" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "### Multi-threaded `pyFFTW`" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 6, 115 | "metadata": {}, 116 | "outputs": [ 117 | { 118 | "name": "stdout", 119 | "output_type": "stream", 120 | "text": [ 121 | "Total wall time: 9.86 sec\n", 122 | "Total CPU time: 9.86 sec\n" 123 | ] 124 | } 125 | ], 126 | "source": [ 127 | "start = time()\n", 128 | "all_times = [run_pb(i, nthreads=nthreads) for i in range(ncalls)]\n", 129 | "end = time()\n", 130 | "print(f'Total wall time: {end - start:.2f} sec')\n", 131 | "print(f\"Total CPU time: {np.sum(all_times):.2f} sec\")" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "Here, we see that if `pyFFTW` is installed, it can use multiple threads to compute the FFTs, reducing walltime by ~20%. \n", 139 | "This is the fastest way to compute the power spectrum in `powerbox` if you have multiple cores available, and only one FFT to perform." 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "## Multiprocessing with `pyFFTW` as a backend" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "metadata": {}, 152 | "source": [ 153 | "We can keep using `pyFFTW` as a backend by setting the `nthreads` argument to 1." 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 9, 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "Total wall time: 5.01 sec\n", 166 | "Total CPU time: 16.77 sec\n" 167 | ] 168 | } 169 | ], 170 | "source": [ 171 | "nprocs = ncalls\n", 172 | "\n", 173 | "run_pb1 = partial(run_pb, nthreads=1)\n", 174 | "\n", 175 | "start = time()\n", 176 | "p = Pool(processes=nprocs)\n", 177 | "all_times = p.map(run_pb1, range(ncalls))\n", 178 | "end = time()\n", 179 | "print(f'Total wall time: {end - start:.2f} sec')\n", 180 | "print(f\"Total CPU time: {np.sum(all_times):.2f} sec\")" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "Here, the total wall time is reduced by ~50% because we are doing each of the 4 FFTs \n", 188 | "in parallel. Note that here there is significant overhead in starting the processes,\n", 189 | "which leads to the meager gains." 190 | ] 191 | }, 192 | { 193 | "cell_type": "markdown", 194 | "metadata": {}, 195 | "source": [ 196 | "## Multiprocessing with `numpy` as a backend" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "metadata": {}, 202 | "source": [ 203 | "We can also just use the `numpy` FFT backend by setting `nthreads` to `False`." 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 10, 209 | "metadata": {}, 210 | "outputs": [ 211 | { 212 | "name": "stdout", 213 | "output_type": "stream", 214 | "text": [ 215 | "Total wall time: 4.70 sec\n", 216 | "Total CPU time: 15.61 sec\n" 217 | ] 218 | } 219 | ], 220 | "source": [ 221 | "run_pb1 = partial(run_pb, nthreads=False)\n", 222 | "\n", 223 | "start = time()\n", 224 | "p = Pool(processes=nprocs)\n", 225 | "all_times = p.map(run_pb1, range(ncalls))\n", 226 | "end = time()\n", 227 | "print(f'Total wall time: {end - start:.2f} sec')\n", 228 | "print(f\"Total CPU time: {np.sum(all_times):.2f} sec\")" 229 | ] 230 | }, 231 | { 232 | "cell_type": "markdown", 233 | "metadata": {}, 234 | "source": [ 235 | "The runtime is roughly the same whether we use `numpy` or single-threaded `pyFFTW`." 236 | ] 237 | } 238 | ], 239 | "metadata": { 240 | "kernelspec": { 241 | "display_name": "powerbox", 242 | "language": "python", 243 | "name": "powerbox" 244 | }, 245 | "language_info": { 246 | "codemirror_mode": { 247 | "name": "ipython", 248 | "version": 3 249 | }, 250 | "file_extension": ".py", 251 | "mimetype": "text/x-python", 252 | "name": "python", 253 | "nbconvert_exporter": "python", 254 | "pygments_lexer": "ipython3", 255 | "version": "3.11.0" 256 | } 257 | }, 258 | "nbformat": 4, 259 | "nbformat_minor": 4 260 | } 261 | -------------------------------------------------------------------------------- /docs/examples.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | To help get you started using ``powerbox``, we've compiled a few simple examples. 5 | Other examples can be found in the :doc:`API documentation ` for each object or by looking at some of the tests. 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | demos/getting_started 11 | demos/algorithm 12 | demos/cosmological_fields 13 | demos/dft 14 | demos/mpi 15 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. powerbox documentation master file, created by 2 | sphinx-quickstart on Mon Feb 13 10:17:24 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root ``toctree`` directive. 5 | 6 | 7 | .. include:: ../README.rst 8 | 9 | Contents 10 | -------- 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | examples 16 | license 17 | changelog 18 | authors 19 | contributing 20 | api 21 | 22 | 23 | 24 | Indices and tables 25 | ------------------ 26 | 27 | * :ref:`genindex` 28 | * :ref:`modindex` 29 | * :ref:`search` 30 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../LICENSE.rst 2 | -------------------------------------------------------------------------------- /docs/templates/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | 6 | .. autoclass:: {{ objname }} 7 | 8 | {% block methods %} 9 | 10 | {% if methods %} 11 | .. rubric:: Methods 12 | 13 | .. autosummary:: 14 | :toctree: {{ objname }} 15 | {% for item in methods %} 16 | ~{{ name }}.{{ item }} 17 | {%- endfor %} 18 | {% endif %} 19 | {% endblock %} 20 | 21 | {% block attributes %} 22 | {% if attributes %} 23 | .. rubric:: Attributes 24 | 25 | .. autosummary:: 26 | :toctree: {{ objname }} 27 | {% for item in attributes %} 28 | ~{{ name }}.{{ item }} 29 | {%- endfor %} 30 | {% endif %} 31 | {% endblock %} 32 | -------------------------------------------------------------------------------- /docs/templates/modules.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. automodule:: {{ fullname }} 5 | 6 | {% block functions %} 7 | {% if functions %} 8 | .. rubric:: Functions 9 | 10 | .. autosummary:: 11 | :toctree: {{ objname }} 12 | {% for item in functions %} 13 | {{ item }} 14 | {%- endfor %} 15 | {% endif %} 16 | {% endblock %} 17 | 18 | {% block classes %} 19 | {% if classes %} 20 | .. rubric:: Classes 21 | 22 | .. autosummary:: 23 | :toctree: {{ objname }} 24 | :template: class.rst 25 | {% for item in classes %} 26 | {{ item }} 27 | {%- endfor %} 28 | {% endif %} 29 | {% endblock %} 30 | 31 | {% block exceptions %} 32 | {% if exceptions %} 33 | .. rubric:: Exceptions 34 | 35 | .. autosummary:: 36 | {% for item in exceptions %} 37 | {{ item }} 38 | {%- endfor %} 39 | {% endif %} 40 | {% endblock %} 41 | -------------------------------------------------------------------------------- /paper/Makefile: -------------------------------------------------------------------------------- 1 | # ============================================================================= 2 | # @file Makefile 3 | # @brief Makefile for generating previews of the paper 4 | # @author Michael Hucka 5 | # @license Please see the file named LICENSE in the project directory 6 | # @website https://github.com/casics/dassie 7 | # ============================================================================= 8 | 9 | # Change the following values to match your configuration. 10 | # ............................................................................. 11 | 12 | input := paper 13 | 14 | vol := 3 15 | issue := 28 16 | gh_issue := 850 17 | year := 2018 18 | submitted := 23 July $(year) 19 | accepted := 21 August $(year) 20 | 21 | # Main code -- no more customization variables after this point 22 | # ............................................................................. 23 | 24 | title := $(shell grep title: $(input).md | sed 's/title: *//' | tr -d "'") 25 | authors := $(shell sed -n '/authors:/,/affiliations:/p' $(input).d | grep name: | sed 's/- name: *//' | paste -d, -s - | sed 's/,/, /g') 26 | repo := $(shell git remote get-url origin | sed 's|git@github.com:|https://github.com/|' | sed 's/\.git//') 27 | 28 | $(input).tex: $(input).md $(input).bib Makefile 29 | /usr/bin/pandoc \ 30 | -V paper_title="$(title)" \ 31 | -V footnote_paper_title="$(title)" \ 32 | -V citation_author="$(authors)" \ 33 | -V repository="$(repo)" \ 34 | -V archive_doi="http://dx.doi.org/10.21105/zenodo.1400822" \ 35 | -V formatted_doi="10.21105/joss.00850" \ 36 | -V paper_url="http://joss.theoj.org/papers/" \ 37 | -V review_issue_url="https://github.com/openjournals/joss-reviews/issues/$(issue)" \ 38 | -V issue="$(issue)" \ 39 | -V volume="$(vol)" \ 40 | -V year="$(year)" \ 41 | -V submitted="$(submitted)" \ 42 | -V published="$(accepted)" \ 43 | -V page="$(issue)" \ 44 | -V graphics="true" \ 45 | -V logo_path="joss-logo.png" \ 46 | -V geometry:margin=1in \ 47 | --verbose \ 48 | -o $(input).tex \ 49 | --pdf-engine=xelatex \ 50 | --filter /usr/bin/pandoc-citeproc $(input).md \ 51 | --from markdown+autolink_bare_uris \ 52 | --template "latex.template" 53 | 54 | $(input).pdf: $(input).tex 55 | xelatex $(input).tex 56 | 57 | autorefresh:; 58 | ((ls $(input).md $(input).bib | entr make $(input).tex) &) 59 | -------------------------------------------------------------------------------- /paper/joss-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven-murray/powerbox/69c5cc10464fb6abf730bf2e4b67136aa1d2d4b2/paper/joss-logo.png -------------------------------------------------------------------------------- /paper/latex.template: -------------------------------------------------------------------------------- 1 | \documentclass[10pt,a4paper,onecolumn]{article} 2 | \usepackage{marginnote} 3 | \usepackage{graphicx} 4 | \usepackage{xcolor} 5 | \usepackage{authblk,etoolbox} 6 | \usepackage{titlesec} 7 | \usepackage{calc} 8 | \usepackage{tikz} 9 | \usepackage{hyperref} 10 | \hypersetup{colorlinks,breaklinks, 11 | urlcolor=[rgb]{0.0, 0.5, 1.0}, 12 | linkcolor=[rgb]{0.0, 0.5, 1.0}} 13 | \usepackage{caption} 14 | \usepackage{tcolorbox} 15 | \usepackage{amssymb,amsmath} 16 | \usepackage{ifxetex,ifluatex} 17 | \usepackage{seqsplit} 18 | \usepackage{fixltx2e} % provides \textsubscript 19 | \usepackage[ 20 | backend=biber, 21 | % style=alphabetic, 22 | % citestyle=numeric 23 | ]{biblatex} 24 | %\bibliography{$bibliography$} 25 | 26 | 27 | % --- Page layout ------------------------------------------------------------- 28 | \usepackage[top=3.5cm, bottom=3cm, right=1.5cm, left=1.0cm, 29 | headheight=2.2cm, reversemp, includemp, marginparwidth=4.5cm]{geometry} 30 | 31 | % --- Default font ------------------------------------------------------------ 32 | % \renewcommand\familydefault{\sfdefault} 33 | 34 | % --- Style ------------------------------------------------------------------- 35 | \renewcommand{\bibfont}{\small \sffamily} 36 | \renewcommand{\captionfont}{\small\sffamily} 37 | \renewcommand{\captionlabelfont}{\bfseries} 38 | 39 | % --- Section/SubSection/SubSubSection ---------------------------------------- 40 | \titleformat{\section} 41 | {\normalfont\sffamily\Large\bfseries} 42 | {}{0pt}{} 43 | \titleformat{\subsection} 44 | {\normalfont\sffamily\large\bfseries} 45 | {}{0pt}{} 46 | \titleformat{\subsubsection} 47 | {\normalfont\sffamily\bfseries} 48 | {}{0pt}{} 49 | \titleformat*{\paragraph} 50 | {\sffamily\normalsize} 51 | 52 | 53 | % --- Header / Footer --------------------------------------------------------- 54 | \usepackage{fancyhdr} 55 | \pagestyle{fancy} 56 | \fancyhf{} 57 | %\renewcommand{\headrulewidth}{0.50pt} 58 | \renewcommand{\headrulewidth}{0pt} 59 | \fancyhead[L]{\hspace{-0.75cm}\includegraphics[width=5.5cm]{$logo_path$}} 60 | \fancyhead[C]{} 61 | \fancyhead[R]{} 62 | \renewcommand{\footrulewidth}{0.25pt} 63 | 64 | \fancyfoot[L]{\footnotesize{\sffamily $citation_author$, ($year$). $footnote_paper_title$. \textit{$journal_name$}, $volume$($issue$), $page$. \href{https://doi.org/$formatted_doi$}{https://doi.org/$formatted_doi$}}} 65 | 66 | 67 | \fancyfoot[R]{\sffamily \thepage} 68 | \makeatletter 69 | \let\ps@plain\ps@fancy 70 | \fancyheadoffset[L]{4.5cm} 71 | \fancyfootoffset[L]{4.5cm} 72 | 73 | % --- Macros --------- 74 | 75 | \definecolor{linky}{rgb}{0.0, 0.5, 1.0} 76 | 77 | \newtcolorbox{repobox} 78 | {colback=red, colframe=red!75!black, 79 | boxrule=0.5pt, arc=2pt, left=6pt, right=6pt, top=3pt, bottom=3pt} 80 | 81 | \newcommand{\ExternalLink}{% 82 | \tikz[x=1.2ex, y=1.2ex, baseline=-0.05ex]{% 83 | \begin{scope}[x=1ex, y=1ex] 84 | \clip (-0.1,-0.1) 85 | --++ (-0, 1.2) 86 | --++ (0.6, 0) 87 | --++ (0, -0.6) 88 | --++ (0.6, 0) 89 | --++ (0, -1); 90 | \path[draw, 91 | line width = 0.5, 92 | rounded corners=0.5] 93 | (0,0) rectangle (1,1); 94 | \end{scope} 95 | \path[draw, line width = 0.5] (0.5, 0.5) 96 | -- (1, 1); 97 | \path[draw, line width = 0.5] (0.6, 1) 98 | -- (1, 1) -- (1, 0.6); 99 | } 100 | } 101 | 102 | % --- Title / Authors --------------------------------------------------------- 103 | % patch \maketitle so that it doesn't center 104 | \patchcmd{\@maketitle}{center}{flushleft}{}{} 105 | \patchcmd{\@maketitle}{center}{flushleft}{}{} 106 | % patch \maketitle so that the font size for the title is normal 107 | \patchcmd{\@maketitle}{\LARGE}{\LARGE\sffamily}{}{} 108 | % patch the patch by authblk so that the author block is flush left 109 | \def\maketitle{{% 110 | \renewenvironment{tabular}[2][] 111 | {\begin{flushleft}} 112 | {\end{flushleft}} 113 | \AB@maketitle}} 114 | \makeatletter 115 | \renewcommand\AB@affilsepx{ \protect\Affilfont} 116 | %\renewcommand\AB@affilnote[1]{{\bfseries #1}\hspace{2pt}} 117 | \renewcommand\AB@affilnote[1]{{\bfseries #1}\hspace{3pt}} 118 | \makeatother 119 | \renewcommand\Authfont{\sffamily\bfseries} 120 | \renewcommand\Affilfont{\sffamily\small\mdseries} 121 | \setlength{\affilsep}{1em} 122 | 123 | 124 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 125 | \usepackage[$if(fontenc)$$fontenc$$else$T1$endif$]{fontenc} 126 | \usepackage[utf8]{inputenc} 127 | 128 | \else % if luatex or xelatex 129 | \ifxetex 130 | \usepackage{mathspec} 131 | \else 132 | \usepackage{fontspec} 133 | \fi 134 | \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} 135 | 136 | \fi 137 | % use upquote if available, for straight quotes in verbatim environments 138 | \IfFileExists{upquote.sty}{\usepackage{upquote}}{} 139 | % use microtype if available 140 | \IfFileExists{microtype.sty}{% 141 | \usepackage{microtype} 142 | \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts 143 | }{} 144 | 145 | \usepackage{hyperref} 146 | $if(colorlinks)$ 147 | \PassOptionsToPackage{usenames,dvipsnames}{color} % color is loaded by hyperref 148 | $endif$ 149 | \hypersetup{unicode=true, 150 | $if(title-meta)$ 151 | pdftitle={$title-meta$}, 152 | $endif$ 153 | $if(author-meta)$ 154 | pdfauthor={$author-meta$}, 155 | $endif$ 156 | $if(keywords)$ 157 | pdfkeywords={$for(keywords)$$keywords$$sep$; $endfor$}, 158 | $endif$ 159 | $if(colorlinks)$ 160 | colorlinks=true, 161 | linkcolor=$if(linkcolor)$$linkcolor$$else$Maroon$endif$, 162 | citecolor=$if(citecolor)$$citecolor$$else$Blue$endif$, 163 | urlcolor=$if(urlcolor)$$urlcolor$$else$Blue$endif$, 164 | $else$ 165 | pdfborder={0 0 0}, 166 | $endif$ 167 | breaklinks=true} 168 | \urlstyle{same} % don't use monospace font for urls 169 | $if(lang)$ 170 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 171 | \usepackage[shorthands=off,$for(babel-otherlangs)$$babel-otherlangs$,$endfor$main=$babel-lang$]{babel} 172 | $if(babel-newcommands)$ 173 | $babel-newcommands$ 174 | $endif$ 175 | \else 176 | \usepackage{polyglossia} 177 | \setmainlanguage[$polyglossia-lang.options$]{$polyglossia-lang.name$} 178 | $for(polyglossia-otherlangs)$ 179 | \setotherlanguage[$polyglossia-otherlangs.options$]{$polyglossia-otherlangs.name$} 180 | $endfor$ 181 | \fi 182 | $endif$ 183 | $if(natbib)$ 184 | \usepackage{natbib} 185 | \bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$} 186 | $endif$ 187 | $if(biblatex)$ 188 | \usepackage$if(biblio-style)$[style=$biblio-style$]$endif${biblatex} 189 | $if(biblatexoptions)$\ExecuteBibliographyOptions{$for(biblatexoptions)$$biblatexoptions$$sep$,$endfor$}$endif$ 190 | $for(bibliography)$ 191 | \addbibresource{$bibliography$} 192 | $endfor$ 193 | $endif$ 194 | $if(listings)$ 195 | \usepackage{listings} 196 | $endif$ 197 | $if(lhs)$ 198 | \lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small\ttfamily}}{} 199 | $endif$ 200 | $if(highlighting-macros)$ 201 | $highlighting-macros$ 202 | $endif$ 203 | $if(verbatim-in-note)$ 204 | \usepackage{fancyvrb} 205 | \VerbatimFootnotes % allows verbatim text in footnotes 206 | $endif$ 207 | $if(tables)$ 208 | \usepackage{longtable,booktabs} 209 | $endif$ 210 | $if(graphics)$ 211 | \usepackage{graphicx,grffile} 212 | \makeatletter 213 | \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} 214 | \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} 215 | \makeatother 216 | % Scale images if necessary, so that they will not overflow the page 217 | % margins by default, and it is still possible to overwrite the defaults 218 | % using explicit options in \includegraphics[width, height, ...]{} 219 | \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} 220 | $endif$ 221 | $if(links-as-notes)$ 222 | % Make links footnotes instead of hotlinks: 223 | \renewcommand{\href}[2]{#2\footnote{\url{#1}}} 224 | $endif$ 225 | $if(strikeout)$ 226 | \usepackage[normalem]{ulem} 227 | % avoid problems with \sout in headers with hyperref: 228 | \pdfstringdefDisableCommands{\renewcommand{\sout}{}} 229 | $endif$ 230 | $if(indent)$ 231 | $else$ 232 | \IfFileExists{parskip.sty}{% 233 | \usepackage{parskip} 234 | }{% else 235 | \setlength{\parindent}{0pt} 236 | \setlength{\parskip}{6pt plus 2pt minus 1pt} 237 | } 238 | $endif$ 239 | \setlength{\emergencystretch}{3em} % prevent overfull lines 240 | \providecommand{\tightlist}{% 241 | \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} 242 | $if(numbersections)$ 243 | \setcounter{secnumdepth}{5} 244 | $else$ 245 | \setcounter{secnumdepth}{0} 246 | $endif$ 247 | $if(subparagraph)$ 248 | $else$ 249 | % Redefines (sub)paragraphs to behave more like sections 250 | \ifx\paragraph\undefined\else 251 | \let\oldparagraph\paragraph 252 | \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} 253 | \fi 254 | \ifx\subparagraph\undefined\else 255 | \let\oldsubparagraph\subparagraph 256 | \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} 257 | \fi 258 | $endif$ 259 | $if(dir)$ 260 | \ifxetex 261 | % load bidi as late as possible as it modifies e.g. graphicx 262 | $if(latex-dir-rtl)$ 263 | \usepackage[RTLdocument]{bidi} 264 | $else$ 265 | \usepackage{bidi} 266 | $endif$ 267 | \fi 268 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 269 | \TeXXeTstate=1 270 | \newcommand{\RL}[1]{\beginR #1\endR} 271 | \newcommand{\LR}[1]{\beginL #1\endL} 272 | \newenvironment{RTL}{\beginR}{\endR} 273 | \newenvironment{LTR}{\beginL}{\endL} 274 | \fi 275 | $endif$ 276 | $for(header-includes)$ 277 | $header-includes$ 278 | $endfor$ 279 | 280 | $if(title)$ 281 | \title{$title$$if(thanks)$\thanks{$thanks$}$endif$} 282 | $endif$ 283 | $if(subtitle)$ 284 | \providecommand{\subtitle}[1]{} 285 | \subtitle{$subtitle$} 286 | $endif$ 287 | 288 | $if(authors)$ 289 | $for(authors)$ 290 | $if(authors.affiliation)$ 291 | \author[$authors.affiliation$]{$authors.name$} 292 | $else$ 293 | \author{$authors.name$} 294 | $endif$ 295 | $endfor$ 296 | $endif$ 297 | 298 | $if(affiliations)$ 299 | $for(affiliations)$ 300 | \affil[$affiliations.index$]{$affiliations.name$} 301 | $endfor$ 302 | $endif$ 303 | \date{\vspace{-5ex}} 304 | 305 | \begin{document} 306 | $if(title)$ 307 | \maketitle 308 | $endif$ 309 | $if(abstract)$ 310 | \begin{abstract} 311 | $abstract$ 312 | \end{abstract} 313 | $endif$ 314 | 315 | \marginpar{ 316 | %\hrule 317 | \sffamily\small 318 | 319 | {\bfseries DOI:} \href{https://doi.org/$formatted_doi$}{\color{linky}{$formatted_doi$}} 320 | 321 | \vspace{2mm} 322 | 323 | {\bfseries Software} 324 | \begin{itemize} 325 | \setlength\itemsep{0em} 326 | \item \href{$review_issue_url$}{\color{linky}{Review}} \ExternalLink 327 | \item \href{$repository$}{\color{linky}{Repository}} \ExternalLink 328 | \item \href{$archive_doi$}{\color{linky}{Archive}} \ExternalLink 329 | \end{itemize} 330 | 331 | \vspace{2mm} 332 | 333 | {\bfseries Submitted:} $submitted$\\ 334 | {\bfseries Published:} $published$ 335 | 336 | \vspace{2mm} 337 | {\bfseries License}\\ 338 | Authors of papers retain copyright and release the work under a Creative Commons Attribution 4.0 International License (\href{http://creativecommons.org/licenses/by/4.0/}{\color{linky}{CC-BY}}). 339 | } 340 | 341 | $for(include-before)$ 342 | $include-before$ 343 | 344 | $endfor$ 345 | $if(toc)$ 346 | { 347 | $if(colorlinks)$ 348 | \hypersetup{linkcolor=$if(toccolor)$$toccolor$$else$black$endif$} 349 | $endif$ 350 | \setcounter{tocdepth}{$toc-depth$} 351 | \tableofcontents 352 | } 353 | $endif$ 354 | $if(lot)$ 355 | \listoftables 356 | $endif$ 357 | $if(lof)$ 358 | \listoffigures 359 | $endif$ 360 | $body$ 361 | 362 | $if(natbib)$ 363 | $if(bibliography)$ 364 | $if(biblio-title)$ 365 | $if(book-class)$ 366 | \renewcommand\bibname{$biblio-title$} 367 | $else$ 368 | \renewcommand\refname{$biblio-title$} 369 | $endif$ 370 | $endif$ 371 | \bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} 372 | 373 | $endif$ 374 | $endif$ 375 | $if(biblatex)$ 376 | \printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ 377 | 378 | $endif$ 379 | $for(include-after)$ 380 | $include-after$ 381 | 382 | $endfor$ 383 | \end{document} 384 | -------------------------------------------------------------------------------- /paper/paper.aux: -------------------------------------------------------------------------------- 1 | \relax 2 | \providecommand\hyper@newdestlabel[2]{} 3 | \providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} 4 | \HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined 5 | \global\let\oldcontentsline\contentsline 6 | \gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} 7 | \global\let\oldnewlabel\newlabel 8 | \gdef\newlabel#1#2{\newlabelxx{#1}#2} 9 | \gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} 10 | \AtEndDocument{\ifx\hyper@anchor\@undefined 11 | \let\contentsline\oldcontentsline 12 | \let\newlabel\oldnewlabel 13 | \fi} 14 | \fi} 15 | \global\let\hyper@last\relax 16 | \gdef\HyperFirstAtBeginDocument#1{#1} 17 | \providecommand*\HyPL@Entry[1]{} 18 | \abx@aux@refcontext{nty/global//global/global} 19 | \HyPL@Entry{0<>} 20 | \providecommand\tcolorbox@label[2]{} 21 | \@writefile{toc}{\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax } 22 | \@writefile{lof}{\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax } 23 | \@writefile{lot}{\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax } 24 | \@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{Summary}{1}{section*.1}} 25 | \newlabel{summary}{{}{1}{Summary}{section*.1}{}} 26 | \@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{Acknowledgements}{2}{section*.2}} 27 | \newlabel{acknowledgements}{{}{2}{Acknowledgements}{section*.2}{}} 28 | \newlabel{references}{{}{2}{}{section*.2}{}} 29 | \@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{References}{2}{section*.2}} 30 | -------------------------------------------------------------------------------- /paper/paper.bib: -------------------------------------------------------------------------------- 1 | 2 | @article{Wolz2018, 3 | archivePrefix = {arXiv}, 4 | eprinttype = {arxiv}, 5 | eprint = {1803.02477}, 6 | primaryClass = {astro-ph}, 7 | title = {Intensity Mapping Cross-Correlations {{II}}: {{HI}} Halo Models Including Shot Noise}, 8 | shorttitle = {Intensity Mapping Cross-Correlations {{II}}}, 9 | abstract = {HI intensity mapping data traces the large-scale structure matter distribution using the integrated emission of neutral hydrogen gas (HI). The cross-correlation of the intensity maps with optical galaxy surveys can mitigate foreground and systematic effects, but has been shown to significantly depend on galaxy evolution parameters of the HI and the optical sample. Previously, we have shown that the shot noise of the cross-correlation scales with the HI content of the optical samples, such that the shot noise estimation infers the average HI masses of these samples. In this article, we present an adaptive framework for the cross-correlation of HI intensity maps with galaxy samples using our implementation of the halo model formalism (Murray et al 2018, in prep) which utilises the halo occupation distribution of galaxies to predict their power spectra. We compare two HI population models, tracing the spatial halo and the galaxy distribution respectively, and present their auto- and cross-power spectra with an associated galaxy sample. We find that the choice of the HI model and the distribution of the HI within the galaxy sample have minor significance for the shape of the auto- and cross-correlations, but highly impact the measured shot noise amplitude of the estimators, a finding we confirm with simulations. We demonstrate parameter estimation of the HI halo occupation models and advocate this framework for the interpretation of future experimental data, with the prospect of determining the HI masses of optical galaxy samples via the cross-correlation shot noise.}, 10 | journal = {ArXiv180302477 Astro-Ph}, 11 | author = {Wolz, L. and Murray, S. G. and Blake, C. and Wyithe, J. S.}, 12 | month = mar, 13 | year = {2018}, 14 | keywords = {Astrophysics - Cosmology and Nongalactic Astrophysics}, 15 | annote = {Comment: 15 pages, 8 figures, 3 tables. Comments welcome} 16 | } 17 | 18 | @book{Monin2007, 19 | address = {New York}, 20 | series = {Dover Books on Physics}, 21 | title = {Statistical {{Fluid Mechanics}}}, 22 | volume = {1}, 23 | publisher = {{Dover Publications}}, 24 | author = {Monin, A. S. and Yaglom, A. M.}, 25 | year = {2007} 26 | } 27 | 28 | @article{Murray2017, 29 | title = {An {{Improved Statistical Point}}-Source {{Foreground Model}} for the {{Epoch}} of {{Reionization}}}, 30 | volume = {845}, 31 | issn = {0004-637X}, 32 | doi = {10.3847/1538-4357/aa7d0a}, 33 | abstract = {We present a sophisticated statistical point-source foreground model for low-frequency radio Epoch of Reionization (EoR) experiments using the 21 cm neutral hydrogen emission line. Motivated by our understanding of the low-frequency radio sky, we enhance the realism of two model components compared with existing models: the source count distributions as a function of flux density and spatial position (source clustering), extending current formalisms for the foreground covariance of 2D power-spectral modes in 21 cm EoR experiments. The former we generalize to an arbitrarily broken power law, and the latter to an arbitrary isotropically correlated field. This paper presents expressions for the modified covariance under these extensions, and shows that for a more realistic source spatial distribution, extra covariance arises in the EoR window that was previously unaccounted for. Failure to include this contribution can yield bias in the final power-spectrum and under-estimate uncertainties, potentially leading to a false detection of signal. The extent of this effect is uncertain, owing to ignorance of physical model parameters, but we show that it is dependent on the relative abundance of faint sources, to the effect that our extension will become more important for future deep surveys. Finally, we show that under some parameter choices, ignoring source clustering can lead to false detections on large scales, due to both the induced bias and an artificial reduction in the estimated measurement uncertainty.}, 34 | language = {en}, 35 | number = {1}, 36 | journal = {ApJ}, 37 | author = {Murray, S. G. and Trott, C. M. and Jordan, C. H.}, 38 | year = {2017}, 39 | keywords = {Astrophysics - Cosmology and Nongalactic Astrophysics}, 40 | pages = {7}, 41 | file = {/home/steven/Dropbox/PaperCatalogueZotero/The Astrophysical Journal/2017/Murray et al_2017_An Improved Statistical Point-Source Foreground Model for the Epoch of.pdf;/home/steven/Zotero/storage/GGMBDD6X/1706.html}, 42 | annote = {Comment: 18 pages, 10 figures, accepted in ApJ} 43 | } 44 | 45 | @article{Beutler2011, 46 | title = {The {{6dF Galaxy Survey}}: Baryon Acoustic Oscillations and the Local {{Hubble}} Constant}, 47 | volume = {416}, 48 | doi = {10.1111/j.1365-2966.2011.19250.x}, 49 | number = {4}, 50 | journal = {Mon. Not. R. Astron. Soc.}, 51 | author = {Beutler, Florian and Blake, Chris and Colless, M. and Jones, D. H. and {Staveley-Smith}, L. and Campbell, Lachlan A. and Parker, Q. and Saunders, W. and Watson, F.}, 52 | month = oct, 53 | year = {2011}, 54 | pages = {3017-3032}, 55 | file = {/home/steven/Dropbox/PaperCatalogueZotero/Monthly Notices of the Royal Astronomical Society/2011/Beutler et al_2011_The 6dF Galaxy Survey.pdf} 56 | } 57 | 58 | @article{Coles1991, 59 | title = {A Lognormal Model for the Cosmological Mass Distribution}, 60 | volume = {248}, 61 | doi = {10.1093/mnras/248.1.1}, 62 | abstract = {SUMMARY We discuss the use of a lognormal (LN) random field as a model for the distribution of matter in the Universe. We find a number of reasons why this should be a plausible approximation to the distribution of density irregularities obtained by evolving from Gaussian initial conditions. Unlike straightforward linear theory, the model always has p$>$0 but is arbitrarily close to the Gaussian at early times. It has the added advantage that, like the Gaussian model, all its statistical properties can be formulated in terms of one covariance function. A number of interesting and important difficulties with the statistical treatment of density perturbations are revealed by an analysis of this model. In particular, the LN model is not completely specified by its moments. We explain why this could be true for the actual matter field. We also show that the usual method of representing the three-and four-point correlation functions of galaxies, in terms of the parameters Q and R, is not useful for discriminating between Gaussian and non-Gaussian fluctua-tions, and propose better parameterizations in terms of the skewness and kurtosis of the three-and four-point distributions, respectively. Other characteristics of the model, such as topology (genus curves, etc.), multi-fractal behaviour, void probabilities and biasing (behaviour of 'peaks' relative to background fluctuations) are also discussed. The model also provides a way of check-ing the consistency of treatments of large-scale streaming motions in the Universe by allowing us to determine the scale at which linear theory cannot be accurate for both the matter and velocity fields. We discuss a possible model for the number-count distribution of galaxies, based on the LN distribution but allowing for discreteness effects which can make the distribution of log n appear non-Gaussian, and show how to construct Monte-Carlo simulations of point patterns (in one-, two, or three-dimensions) which contain correlations of all orders.}, 63 | journal = {Mon. Not. R. Astron. Soc.}, 64 | author = {Coles, Peter and Jones, Bernard}, 65 | year = {1991}, 66 | pages = {1-13}, 67 | file = {/home/steven/Dropbox/PaperCatalogueZotero/Mon. Not. R. astr. Soc/1991/Coles_Jones_1991_A lognormal model for the cosmological mass distribution.pdf} 68 | } 69 | 70 | @inbook{Peacock1999, 71 | place={Cambridge}, 72 | title={Cosmological density fields}, 73 | DOI={10.1017/CBO9780511804533.017}, 74 | booktitle={Cosmological Physics}, 75 | publisher={Cambridge University Press}, 76 | author={Peacock, J. A.}, 77 | year={1998}, 78 | pages={495–552} 79 | } 80 | 81 | 82 | @online{github, 83 | author = {S. G. Murray}, 84 | title = {powerbox: Make arbitrarily structured, atbitrary-dimension boxes and log-normal mocks}, 85 | year = 2018, 86 | url = {https://github.com/steven-murray/powerbox}, 87 | urldate = {2018-07-23} 88 | } 89 | -------------------------------------------------------------------------------- /paper/paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'powerbox: A Python package for creating structured fields with isotropic power spectra' 3 | tags: 4 | - Python 5 | - astronomy 6 | - power spectrum 7 | - signal analysis 8 | authors: 9 | - name: Steven G. Murray 10 | orcid: 0000-0003-3059-3823 11 | affiliation: 1, 2 12 | affiliations: 13 | - name: International Centre for Radio Astronomy Research (ICRAR), Curtin University, Bentley, WA 6102, Australia 14 | index: 1 15 | - name: ARC Centre of Excellence for All-Sky Astrophysics in 3 Dimensions (ASTRO 3D) 16 | index: 2 17 | date: 19 July 2018 18 | bibliography: paper.bib 19 | --- 20 | 21 | # Summary 22 | 23 | The power spectrum is a cornerstone of both signal analysis and spatial statistics, 24 | encoding the variance of a signal or field on different scales. 25 | Its common usage is in no small part attributable to the fact that it is a *full* 26 | description of a purely Gaussian process -- for such statistical processes, no information 27 | is contained in higher-order statistics. The prevalence of such processes (or 28 | close approximations to them) in physical systems serves to justify the popularity 29 | of the power spectrum as a key descriptive statistic in various physical sciences, 30 | eg. cosmology [@Peacock1999] and fluid mechanics [@Monin2007]. It furthermore 31 | readily avails itself to efficient numerical evaluation, being the absolute square of 32 | the Fourier Transform. 33 | 34 | Another feature of many approximate physical systems, especially those already mentioned, 35 | is that they are both homogeneous and isotropic (at least in some local sample). 36 | In this case, the *n*-dimensional power spectrum may be losslessly compressed into a single 37 | dimension, which is radial in Fourier-space. Such processes approximately describe for example 38 | the over-density field of the early Universe and locally isotropic turbulent flows. 39 | Thus it is of great use to have a numerical code which simplifies the dual operations of; (i) 40 | producing random homogeneous/isotropic fields (of arbitrary dimensionality) consistent with a 41 | given 1D radial power spectrum, and (ii) determination of the 1D radial power spectrum of 42 | random fields (or a sample of tracers of that field). ``powerbox`` exists to perform these 43 | duals tasks with both simplicity and efficiency. 44 | 45 | Performing the first of these tasks is especially non-trivial. While the power spectrum 46 | can be evaluated on any field (though it may not fully describe the given field), the 47 | precise machinery for *creating* a field from a given power spectrum depends on the 48 | probability density function (PDF) of the process itself. The machinery for creating a 49 | Gaussian field is well-known. However, other PDF's -- especially those that are positively 50 | bounded -- are extremely useful for describing such physical entities as density fields. 51 | In these cases, the *log-normal* PDF has become a standard approximation [@Coles1991], 52 | and ``powerbox`` makes a point of supporting the machinery for generating log-normal 53 | fields [@Beutler2011] for this purpose. Indeed, ``powerbox`` is *primarily* geared 54 | towards supporting cosmological applications, such as measuring and and producing 55 | samples of galaxy positions in a log-normal density field (while account for standard 56 | effects such as shot-noise and standard normalisation conventions). It is nevertheless 57 | flexible enough to support research in any field (with its own particular conventions) 58 | that is based on the homogeneous and isotropic power spectrum. 59 | 60 | ``Powerbox`` is a pure-Python package devoted to the simple and efficient solution 61 | of the previous considerations. As the most popular language for astronomy, Python 62 | is the natural language of choice for ``powerbox``, with its focus on cosmological 63 | applications, and it also provides for great ease-of-use and extensibility. As an 64 | example of the former, all functions/classes within ``powerbox`` are able to work 65 | in arbitrary numbers of dimensions (memory permitting), simply by setting a single 66 | parameter *n*. As an 67 | example of the latter, the class-based structure of the field-generator may be used 68 | to extend the generation to fields with PDF's other than either Gaussian or log-normal 69 | (indeed, the log-normal class is itself sub-classed from the Gaussian one). 70 | ``powerbox`` does not sacrifice efficiency for its high-level interface. By default, the 71 | underlying FFT's are performed by ``numpy``, which uses underlying fast C code. In addition, 72 | if the ``pyFFTW`` package is installed, ``powerbox`` will seamlessly switch to using its 73 | optimized C code for up to double the efficiency. It is also written with an eye for 74 | conserving memory, which is important for the often very large fields that may be required. 75 | 76 | ``Powerbox`` was written due to research-demand, and as such it is highly likely to be suited 77 | to the requirements of research of a similar nature. Furthermore, as previously stated, 78 | every effort has been made to sufficiently generalize its scope to be of use in related 79 | fields of research. It has already been instrumental in several publications [@Murray2017; @Wolz2018], 80 | and we hope it will be a useful tool for approximate theoretical simulations by many others. 81 | 82 | # Acknowledgements 83 | 84 | The author acknowledges helpful discussions and contributions from Cathryn Trott, Chris Jordan 85 | and Laura Wolz during the initial development of this project. 86 | Parts of this research were supported by the Australian Research Council Centre of Excellence 87 | for All Sky Astrophysics in 3 Dimensions (ASTRO 3D), through project number CE170100013 88 | 89 | # References 90 | -------------------------------------------------------------------------------- /paper/paper.out: -------------------------------------------------------------------------------- 1 | \BOOKMARK [1][-]{section*.1}{Summary}{}% 1 2 | \BOOKMARK [1][-]{section*.2}{Acknowledgements}{}% 2 3 | \BOOKMARK [1][-]{section*.2}{References}{}% 3 4 | -------------------------------------------------------------------------------- /paper/paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven-murray/powerbox/69c5cc10464fb6abf730bf2e4b67136aa1d2d4b2/paper/paper.pdf -------------------------------------------------------------------------------- /paper/paper.run.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 23 | 28 | 33 | 36 | 39 | 42 | ]> 43 | 44 | 45 | latex 46 | 47 | paper.bcf 48 | 49 | 50 | paper.bbl 51 | 52 | 53 | blx-dm.def 54 | blx-unicode.def 55 | blx-compat.def 56 | biblatex.def 57 | standard.bbx 58 | numeric.bbx 59 | numeric.cbx 60 | biblatex.cfg 61 | english.lbx 62 | 63 | 64 | 65 | biber 66 | 67 | biber 68 | paper 69 | 70 | 71 | paper.bcf 72 | 73 | 74 | paper.bbl 75 | 76 | 77 | paper.bbl 78 | 79 | 80 | paper.bcf 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /paper/paper.tex: -------------------------------------------------------------------------------- 1 | \documentclass[10pt,a4paper,onecolumn]{article} 2 | \usepackage{marginnote} 3 | \usepackage{graphicx} 4 | \usepackage{xcolor} 5 | \usepackage{authblk,etoolbox} 6 | \usepackage{titlesec} 7 | \usepackage{calc} 8 | \usepackage{tikz} 9 | \usepackage{hyperref} 10 | \hypersetup{colorlinks,breaklinks, 11 | urlcolor=[rgb]{0.0, 0.5, 1.0}, 12 | linkcolor=[rgb]{0.0, 0.5, 1.0}} 13 | \usepackage{caption} 14 | \usepackage{tcolorbox} 15 | \usepackage{amssymb,amsmath} 16 | \usepackage{ifxetex,ifluatex} 17 | \usepackage{seqsplit} 18 | \usepackage{fixltx2e} % provides \textsubscript 19 | \usepackage[ 20 | backend=biber, 21 | % style=alphabetic, 22 | % citestyle=numeric 23 | ]{biblatex} 24 | %\bibliography{paper.bib} 25 | 26 | 27 | % --- Page layout ------------------------------------------------------------- 28 | \usepackage[top=3.5cm, bottom=3cm, right=1.5cm, left=1.0cm, 29 | headheight=2.2cm, reversemp, includemp, marginparwidth=4.5cm]{geometry} 30 | 31 | % --- Default font ------------------------------------------------------------ 32 | % \renewcommand\familydefault{\sfdefault} 33 | 34 | % --- Style ------------------------------------------------------------------- 35 | \renewcommand{\bibfont}{\small \sffamily} 36 | \renewcommand{\captionfont}{\small\sffamily} 37 | \renewcommand{\captionlabelfont}{\bfseries} 38 | 39 | % --- Section/SubSection/SubSubSection ---------------------------------------- 40 | \titleformat{\section} 41 | {\normalfont\sffamily\Large\bfseries} 42 | {}{0pt}{} 43 | \titleformat{\subsection} 44 | {\normalfont\sffamily\large\bfseries} 45 | {}{0pt}{} 46 | \titleformat{\subsubsection} 47 | {\normalfont\sffamily\bfseries} 48 | {}{0pt}{} 49 | \titleformat*{\paragraph} 50 | {\sffamily\normalsize} 51 | 52 | 53 | % --- Header / Footer --------------------------------------------------------- 54 | \usepackage{fancyhdr} 55 | \pagestyle{fancy} 56 | \fancyhf{} 57 | %\renewcommand{\headrulewidth}{0.50pt} 58 | \renewcommand{\headrulewidth}{0pt} 59 | \fancyhead[L]{\hspace{-0.75cm}\includegraphics[width=5.5cm]{joss-logo.png}} 60 | \fancyhead[C]{} 61 | \fancyhead[R]{} 62 | \renewcommand{\footrulewidth}{0.25pt} 63 | 64 | \fancyfoot[L]{\footnotesize{\sffamily , (2018). powerbox: A Python package for creating structured fields with isotropic power spectra. \textit{}, 3(28), 28. \href{https://doi.org/10.21105/joss.00850}{https://doi.org/10.21105/joss.00850}}} 65 | 66 | 67 | \fancyfoot[R]{\sffamily \thepage} 68 | \makeatletter 69 | \let\ps@plain\ps@fancy 70 | \fancyheadoffset[L]{4.5cm} 71 | \fancyfootoffset[L]{4.5cm} 72 | 73 | % --- Macros --------- 74 | 75 | \definecolor{linky}{rgb}{0.0, 0.5, 1.0} 76 | 77 | \newtcolorbox{repobox} 78 | {colback=red, colframe=red!75!black, 79 | boxrule=0.5pt, arc=2pt, left=6pt, right=6pt, top=3pt, bottom=3pt} 80 | 81 | \newcommand{\ExternalLink}{% 82 | \tikz[x=1.2ex, y=1.2ex, baseline=-0.05ex]{% 83 | \begin{scope}[x=1ex, y=1ex] 84 | \clip (-0.1,-0.1) 85 | --++ (-0, 1.2) 86 | --++ (0.6, 0) 87 | --++ (0, -0.6) 88 | --++ (0.6, 0) 89 | --++ (0, -1); 90 | \path[draw, 91 | line width = 0.5, 92 | rounded corners=0.5] 93 | (0,0) rectangle (1,1); 94 | \end{scope} 95 | \path[draw, line width = 0.5] (0.5, 0.5) 96 | -- (1, 1); 97 | \path[draw, line width = 0.5] (0.6, 1) 98 | -- (1, 1) -- (1, 0.6); 99 | } 100 | } 101 | 102 | % --- Title / Authors --------------------------------------------------------- 103 | % patch \maketitle so that it doesn't center 104 | \patchcmd{\@maketitle}{center}{flushleft}{}{} 105 | \patchcmd{\@maketitle}{center}{flushleft}{}{} 106 | % patch \maketitle so that the font size for the title is normal 107 | \patchcmd{\@maketitle}{\LARGE}{\LARGE\sffamily}{}{} 108 | % patch the patch by authblk so that the author block is flush left 109 | \def\maketitle{{% 110 | \renewenvironment{tabular}[2][] 111 | {\begin{flushleft}} 112 | {\end{flushleft}} 113 | \AB@maketitle}} 114 | \makeatletter 115 | \renewcommand\AB@affilsepx{ \protect\Affilfont} 116 | %\renewcommand\AB@affilnote[1]{{\bfseries #1}\hspace{2pt}} 117 | \renewcommand\AB@affilnote[1]{{\bfseries #1}\hspace{3pt}} 118 | \makeatother 119 | \renewcommand\Authfont{\sffamily\bfseries} 120 | \renewcommand\Affilfont{\sffamily\small\mdseries} 121 | \setlength{\affilsep}{1em} 122 | 123 | 124 | \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex 125 | \usepackage[T1]{fontenc} 126 | \usepackage[utf8]{inputenc} 127 | 128 | \else % if luatex or xelatex 129 | \ifxetex 130 | \usepackage{mathspec} 131 | \else 132 | \usepackage{fontspec} 133 | \fi 134 | \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} 135 | 136 | \fi 137 | % use upquote if available, for straight quotes in verbatim environments 138 | \IfFileExists{upquote.sty}{\usepackage{upquote}}{} 139 | % use microtype if available 140 | \IfFileExists{microtype.sty}{% 141 | \usepackage{microtype} 142 | \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts 143 | }{} 144 | 145 | \usepackage{hyperref} 146 | \hypersetup{unicode=true, 147 | pdftitle={powerbox: A Python package for creating structured fields with isotropic power spectra}, 148 | pdfborder={0 0 0}, 149 | breaklinks=true} 150 | \urlstyle{same} % don't use monospace font for urls 151 | \usepackage{graphicx,grffile} 152 | \makeatletter 153 | \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} 154 | \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} 155 | \makeatother 156 | % Scale images if necessary, so that they will not overflow the page 157 | % margins by default, and it is still possible to overwrite the defaults 158 | % using explicit options in \includegraphics[width, height, ...]{} 159 | \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} 160 | \IfFileExists{parskip.sty}{% 161 | \usepackage{parskip} 162 | }{% else 163 | \setlength{\parindent}{0pt} 164 | \setlength{\parskip}{6pt plus 2pt minus 1pt} 165 | } 166 | \setlength{\emergencystretch}{3em} % prevent overfull lines 167 | \providecommand{\tightlist}{% 168 | \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} 169 | \setcounter{secnumdepth}{0} 170 | % Redefines (sub)paragraphs to behave more like sections 171 | \ifx\paragraph\undefined\else 172 | \let\oldparagraph\paragraph 173 | \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} 174 | \fi 175 | \ifx\subparagraph\undefined\else 176 | \let\oldsubparagraph\subparagraph 177 | \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} 178 | \fi 179 | 180 | \title{powerbox: A Python package for creating structured fields with isotropic 181 | power spectra} 182 | 183 | \author[1, 2]{Steven G. Murray} 184 | 185 | \affil[1]{International Centre for Radio Astronomy Research (ICRAR), Curtin 186 | University, Bentley, WA 6102, Australia} 187 | \affil[2]{ARC Centre of Excellence for All-Sky Astrophysics in 3 Dimensions (ASTRO 188 | 3D)} 189 | \date{\vspace{-5ex}} 190 | 191 | \begin{document} 192 | \maketitle 193 | 194 | \marginpar{ 195 | %\hrule 196 | \sffamily\small 197 | 198 | {\bfseries DOI:} \href{https://doi.org/10.21105/joss.00850}{\color{linky}{10.21105/joss.00850}} 199 | 200 | \vspace{2mm} 201 | 202 | {\bfseries Software} 203 | \begin{itemize} 204 | \setlength\itemsep{0em} 205 | \item \href{https://github.com/openjournals/joss-reviews/issues/28}{\color{linky}{Review}} \ExternalLink 206 | \item \href{https://github.com/steven-murray/powerbox}{\color{linky}{Repository}} \ExternalLink 207 | \item \href{http://dx.doi.org/10.21105/zenodo.1400822}{\color{linky}{Archive}} \ExternalLink 208 | \end{itemize} 209 | 210 | \vspace{2mm} 211 | 212 | {\bfseries Submitted:} 23 July 2018\\ 213 | {\bfseries Published:} 21 August 2018 214 | 215 | \vspace{2mm} 216 | {\bfseries License}\\ 217 | Authors of papers retain copyright and release the work under a Creative Commons Attribution 4.0 International License (\href{http://creativecommons.org/licenses/by/4.0/}{\color{linky}{CC-BY}}). 218 | } 219 | 220 | \hypertarget{summary}{% 221 | \section{Summary}\label{summary}} 222 | 223 | The power spectrum is a cornerstone of both signal analysis and spatial 224 | statistics, encoding the variance of a signal or field on different 225 | scales. Its common usage is in no small part attributable to the fact 226 | that it is a \emph{full} description of a purely Gaussian process -- for 227 | such statistical processes, no information is contained in higher-order 228 | statistics. The prevalence of such processes (or close approximations to 229 | them) in physical systems serves to justify the popularity of the power 230 | spectrum as a key descriptive statistic in various physical sciences, 231 | eg. cosmology (Peacock 1998) and fluid mechanics (Monin and Yaglom 232 | 2007). It furthermore readily avails itself to efficient numerical 233 | evaluation, being the absolute square of the Fourier Transform. 234 | 235 | Another feature of many approximate physical systems, especially those 236 | already mentioned, is that they are both homogeneous and isotropic (at 237 | least in some local sample). In this case, the \emph{n}-dimensional 238 | power spectrum may be losslessly compressed into a single dimension, 239 | which is radial in Fourier-space. Such processes approximately describe 240 | for example the over-density field of the early Universe and locally 241 | isotropic turbulent flows. Thus it is of great use to have a numerical 242 | code which simplifies the dual operations of; (i) producing random 243 | homogeneous/isotropic fields (of arbitrary dimensionality) consistent 244 | with a given 1D radial power spectrum, and (ii) determination of the 1D 245 | radial power spectrum of random fields (or a sample of tracers of that 246 | field). \texttt{powerbox} exists to perform these duals tasks with both 247 | simplicity and efficiency. 248 | 249 | Performing the first of these tasks is especially non-trivial. While the 250 | power spectrum can be evaluated on any field (though it may not fully 251 | describe the given field), the precise machinery for \emph{creating} a 252 | field from a given power spectrum depends on the probability density 253 | function (PDF) of the process itself. The machinery for creating a 254 | Gaussian field is well-known. However, other PDF's -- especially those 255 | that are positively bounded -- are extremely useful for describing such 256 | physical entities as density fields. In these cases, the 257 | \emph{log-normal} PDF has become a standard approximation (Coles and 258 | Jones 1991), and \texttt{powerbox} makes a point of supporting the 259 | machinery for generating log-normal fields (Beutler et al. 2011) for 260 | this purpose. Indeed, \texttt{powerbox} is \emph{primarily} geared 261 | towards supporting cosmological applications, such as measuring and and 262 | producing samples of galaxy positions in a log-normal density field 263 | (while account for standard effects such as shot-noise and standard 264 | normalisation conventions). It is nevertheless flexible enough to 265 | support research in any field (with its own particular conventions) that 266 | is based on the homogeneous and isotropic power spectrum. 267 | 268 | \texttt{Powerbox} is a pure-Python package devoted to the simple and 269 | efficient solution of the previous considerations. As the most popular 270 | language for astronomy, Python is the natural language of choice for 271 | \texttt{powerbox}, with its focus on cosmological applications, and it 272 | also provides for great ease-of-use and extensibility. As an example of 273 | the former, all functions/classes within \texttt{powerbox} are able to 274 | work in arbitrary numbers of dimensions (memory permitting), simply by 275 | setting a single parameter \emph{n}. As an example of the latter, the 276 | class-based structure of the field-generator may be used to extend the 277 | generation to fields with PDF's other than either Gaussian or log-normal 278 | (indeed, the log-normal class is itself sub-classed from the Gaussian 279 | one). \texttt{powerbox} does not sacrifice efficiency for its high-level 280 | interface. By default, the underlying FFT's are performed by 281 | \texttt{numpy}, which uses underlying fast C code. In addition, if the 282 | \texttt{pyFFTW} package is installed, \texttt{powerbox} will seamlessly 283 | switch to using its optimized C code for up to double the efficiency. It 284 | is also written with an eye for conserving memory, which is important 285 | for the often very large fields that may be required. 286 | 287 | \texttt{Powerbox} was written due to research-demand, and as such it is 288 | highly likely to be suited to the requirements of research of a similar 289 | nature. Furthermore, as previously stated, every effort has been made to 290 | sufficiently generalize its scope to be of use in related fields of 291 | research. It has already been instrumental in several publications 292 | (Murray, Trott, and Jordan 2017; Wolz et al. 2018), and we hope it will 293 | be a useful tool for approximate theoretical simulations by many others. 294 | 295 | \hypertarget{acknowledgements}{% 296 | \section{Acknowledgements}\label{acknowledgements}} 297 | 298 | The author acknowledges helpful discussions and contributions from 299 | Cathryn Trott, Chris Jordan and Laura Wolz during the initial 300 | development of this project. Parts of this research were supported by 301 | the Australian Research Council Centre of Excellence for All Sky 302 | Astrophysics in 3 Dimensions (ASTRO 3D), through project number 303 | CE170100013 304 | 305 | \hypertarget{references}{% 306 | \section*{References}\label{references}} 307 | \addcontentsline{toc}{section}{References} 308 | 309 | \hypertarget{refs}{} 310 | \leavevmode\hypertarget{ref-Beutler2011}{}% 311 | Beutler, Florian, Chris Blake, M. Colless, D. H. Jones, L. 312 | Staveley-Smith, Lachlan A. Campbell, Q. Parker, W. Saunders, and F. 313 | Watson. 2011. ``The 6dF Galaxy Survey: Baryon Acoustic Oscillations and 314 | the Local Hubble Constant.'' \emph{Mon. Not. R. Astron. Soc.} 416 (4): 315 | 3017--32. \url{https://doi.org/10.1111/j.1365-2966.2011.19250.x}. 316 | 317 | \leavevmode\hypertarget{ref-Coles1991}{}% 318 | Coles, Peter, and Bernard Jones. 1991. ``A Lognormal Model for the 319 | Cosmological Mass Distribution.'' \emph{Mon. Not. R. Astron. Soc.} 248: 320 | 1--13. \url{https://doi.org/10.1093/mnras/248.1.1}. 321 | 322 | \leavevmode\hypertarget{ref-Monin2007}{}% 323 | Monin, A. S., and A. M. Yaglom. 2007. \emph{Statistical Fluid 324 | Mechanics}. Vol. 1. Dover Books on Physics. New York: Dover 325 | Publications. 326 | 327 | \leavevmode\hypertarget{ref-Murray2017}{}% 328 | Murray, S. G., C. M. Trott, and C. H. Jordan. 2017. ``An Improved 329 | Statistical Point-Source Foreground Model for the Epoch of 330 | Reionization.'' \emph{ApJ} 845 (1): 7. 331 | \url{https://doi.org/10.3847/1538-4357/aa7d0a}. 332 | 333 | \leavevmode\hypertarget{ref-Peacock1999}{}% 334 | Peacock, J. A. 1998. ``Cosmological Density Fields.'' In 335 | \emph{Cosmological Physics}, 495--552. Cambridge University Press. 336 | \url{https://doi.org/10.1017/CBO9780511804533.017}. 337 | 338 | \leavevmode\hypertarget{ref-Wolz2018}{}% 339 | Wolz, L., S. G. Murray, C. Blake, and J. S. Wyithe. 2018. ``Intensity 340 | Mapping Cross-Correlations II: HI Halo Models Including Shot Noise.'' 341 | \emph{ArXiv180302477 Astro-Ph}, March. 342 | \url{http://arxiv.org/abs/1803.02477}. 343 | 344 | \end{document} 345 | -------------------------------------------------------------------------------- /paper/paper.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven-murray/powerbox/69c5cc10464fb6abf730bf2e4b67136aa1d2d4b2/paper/paper.zip -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools_scm] 6 | write_to = "src/powerbox/_version.py" 7 | parentdir_prefix_version = "powerbox-" 8 | fallback_version = "0.0.0" 9 | 10 | [project] 11 | name="powerbox" 12 | authors=[ 13 | {name = "Steven Murray", email = "steven.g.murray@asu.edu"} 14 | ] 15 | description="Create arbitrary boxes with isotropic power spectra" 16 | license= {text= "MIT"} 17 | requires-python = ">=3.9" 18 | keywords=["power-spectrum", "signal processing"] 19 | dependencies = [ 20 | "numpy>1.6.2" 21 | ] 22 | classifiers = [ 23 | "Programming Language :: Python :: 3", 24 | ] 25 | dynamic = ["readme", "version"] 26 | 27 | [project.urls] 28 | repository="https://github.com/steven-murray/powerbox" 29 | documentation="https://powerbox.readthedocs.io" 30 | 31 | [tool.setuptools.dynamic] 32 | readme = {file = ["README.rst"]} 33 | 34 | [project.optional-dependencies] 35 | tests = [ 36 | "wheel", 37 | "flake8", 38 | "pytest", 39 | "pytest-cov", 40 | "scipy", 41 | ] 42 | docs = [ 43 | "sphinx", 44 | "numpydoc", 45 | "nbsphinx", 46 | "ipykernel", 47 | "pandoc", 48 | "sphinx_rtd_theme", 49 | "sphinx-automodapi==0.7", 50 | "matplotlib", 51 | "hmf", 52 | "packaging", # required for camb 53 | ] 54 | dev = [ 55 | "powerbox[tests,docs,fftw]", 56 | "pre-commit" 57 | ] 58 | fftw = [ 59 | 'pyfftw' 60 | ] 61 | all = [ 62 | "powerbox[dev]" 63 | ] 64 | -------------------------------------------------------------------------------- /src/powerbox/__init__.py: -------------------------------------------------------------------------------- 1 | """A package for creating mocks from input isotropic power spectra.""" 2 | 3 | try: 4 | from importlib.metadata import PackageNotFoundError, version 5 | except ImportError: 6 | from importlib_metadata import PackageNotFoundError, version 7 | 8 | try: 9 | from ._version import version as __version__ 10 | except ModuleNotFoundError: # pragma: no cover 11 | try: 12 | __version__ = version("powerbox") 13 | except PackageNotFoundError: 14 | # package is not installed 15 | __version__ = "unknown" 16 | 17 | from .dft_backend import FFTW, NumpyFFT, get_fft_backend 18 | from .powerbox import LogNormalPowerBox, PowerBox 19 | from .tools import ( 20 | angular_average, 21 | angular_average_nd, 22 | get_power, 23 | ignore_zero_absk, 24 | ignore_zero_ki, 25 | power2delta, 26 | ) 27 | -------------------------------------------------------------------------------- /src/powerbox/dft.py: -------------------------------------------------------------------------------- 1 | r""" 2 | A module defining some "nicer" fourier transform functions. 3 | 4 | We define only two functions -- an arbitrary-dimension forward transform, and its inverse. In each case, the transform 5 | is designed to replicate the continuous transform. That is, the transform is volume-normalised and obeys correct 6 | Fourier conventions. 7 | 8 | The actual FFT backend is provided by ``pyFFTW`` if it is installed, which provides a significant speedup, and 9 | multi-threading. 10 | 11 | Conveniently, we allow for arbitrary Fourier convention, according to the scheme in 12 | http://mathworld.wolfram.com/FourierTransform.html. That is, we define the forward and inverse *n*-dimensional 13 | transforms respectively as 14 | 15 | .. math:: F(k) = \sqrt{\frac{|b|}{(2\pi)^{1-a}}}^n \int f(r) e^{-i b\mathbf{k}\cdot\mathbf{r}} d^n\mathbf{r} 16 | 17 | and 18 | 19 | .. math:: f(r) = \sqrt{\frac{|b|}{(2\pi)^{1+a}}}^n \int F(k) e^{+i b\mathbf{k}\cdot\mathbf{r}} d^n \mathbf{k}. 20 | 21 | In both transforms, the corresponding co-ordinates are returned so a completely consistent transform is simple to get. 22 | This makes switching from standard frequency to angular frequency very simple. 23 | 24 | We note that currently, only positive values for b are implemented (in fact, using negative b is consistent, but 25 | one must be careful that the frequencies returned are descending, rather than ascending). 26 | """ 27 | 28 | from __future__ import annotations 29 | 30 | __all__ = ["fft", "ifft", "fftfreq", "fftshift", "ifftshift"] 31 | 32 | # To avoid MKL-related bugs, numpy needs to be imported after pyfftw: see https://github.com/pyFFTW/pyFFTW/issues/40 33 | import numpy as np 34 | 35 | from .dft_backend import FFTBackend, get_fft_backend 36 | 37 | 38 | def fftshift(x, *args, **kwargs): # noqa: D103 39 | backend = kwargs.pop("backend", get_fft_backend(kwargs.pop("nthreads", None))) 40 | return backend.fftshift(x, *args, **kwargs) 41 | 42 | 43 | fftshift.__doc__ = get_fft_backend().fftshift.__doc__ 44 | 45 | 46 | def ifftshift(x, *args, **kwargs): # noqa: D103 47 | backend = kwargs.pop("backend", get_fft_backend(kwargs.pop("nthreads", None))) 48 | return backend.ifftshift(x, *args, **kwargs) 49 | 50 | 51 | ifftshift.__doc__ = get_fft_backend().ifftshift.__doc__ 52 | 53 | 54 | def fftfreq(x, *args, **kwargs): # noqa: D103 55 | backend = kwargs.pop("backend", get_fft_backend(kwargs.pop("nthreads", None))) 56 | return backend.fftfreq(x, *args, **kwargs) 57 | 58 | 59 | fftfreq.__doc__ = get_fft_backend().fftfreq.__doc__ 60 | 61 | 62 | def fft( 63 | X, 64 | L=None, 65 | Lk=None, 66 | a=0, 67 | b=2 * np.pi, 68 | left_edge=None, 69 | axes=None, 70 | ret_cubegrid=False, 71 | nthreads=None, 72 | backend: FFTBackend = None, 73 | ): 74 | r""" 75 | Arbitrary-dimension nice Fourier Transform. 76 | 77 | This function wraps numpy's ``fftn`` and applies some nice properties. Notably, the 78 | returned fourier transform is equivalent to what would be expected from a continuous 79 | Fourier Transform (including normalisations etc.). In addition, arbitrary 80 | conventions are supported (see :mod:`powerbox.dft` for details). 81 | 82 | Default parameters have the same normalising conventions as ``numpy.fft.fftn``. 83 | 84 | The output object always has the zero in the centre, with monotonically increasing 85 | spectral arguments. 86 | 87 | Parameters 88 | ---------- 89 | X : array 90 | An array with arbitrary dimensions defining the field to be transformed. Should 91 | correspond exactly to the continuous function for which it is an analogue. A 92 | lower-dimensional transform can be specified by using the ``axes`` argument. 93 | L : float or array-like, optional 94 | The length of the box which defines ``X``. If a scalar, each transformed 95 | dimension in ``X`` is assumed to have the same length. If array-like, must be of 96 | the same length as the number of transformed dimensions. The default returns the 97 | un-normalised DFT (same as numpy). 98 | Lk : float or array-like, optional 99 | The length of the fourier-space box which defines the dual of ``X``. Only one of 100 | L/Lk needs to be provided. If provided, L takes precedence. If a scalar, each 101 | transformed dimension in ``X`` is assumed to have the same length. If 102 | array-like, must be of the same length as the number of transformed dimensions. 103 | a,b : float, optional 104 | These define the Fourier convention used. See :mod:`powerbox.dft` for details. 105 | The defaults return the standard DFT as defined in :mod:`numpy.fft`. 106 | left_edge : float or array-like, optional 107 | The co-ordinate at the left-edge for each dimension that is being transformed. 108 | By default, sets the left edge to -L/2, so that the input is centred before 109 | transforming (i.e. equivalent to ``fftshift(fft(fftshift(X)))``) 110 | axes : sequence of ints, optional 111 | The axes to take the transform over. The default is to use all axes for the 112 | transform. 113 | ret_cubegrid : bool, optional 114 | Whether to return the entire grid of frequency magnitudes. 115 | nthreads : bool or int, optional 116 | If set to False, uses numpy's FFT routine. If set to None, uses pyFFTW with 117 | number of threads equal to the number of available CPUs. If int, uses pyFFTW 118 | with number of threads equal to the input value. 119 | backend : FFTBackend, optional 120 | The backend to use for the FFT. If not provided, the backend is chosen based on 121 | the value of nthreads. 122 | 123 | Returns 124 | ------- 125 | ft : array 126 | The DFT of X, normalised to be consistent with the continuous transform. 127 | freq : list of arrays 128 | The frequencies in each dimension, consistent with the Fourier conventions 129 | specified. 130 | grid : array 131 | Only returned if ``ret_cubegrid`` is ``True``. An array with shape given by 132 | ``axes`` specifying the magnitude of the frequencies at each point of the 133 | fourier transform. 134 | """ 135 | if backend is None: 136 | backend = get_fft_backend(nthreads) 137 | 138 | if axes is None: 139 | axes = list(range(len(X.shape))) 140 | 141 | N = np.array([X.shape[axis] for axis in axes]) 142 | 143 | # Get the box volume if given the fourier-space box volume 144 | if L is None and Lk is None: 145 | L = N 146 | elif L is not None: # give precedence to L 147 | if np.isscalar(L): 148 | L = L * np.ones(len(axes)) 149 | else: 150 | if np.isscalar(Lk): 151 | Lk = Lk * np.ones(len(axes)) 152 | L = N * 2 * np.pi / (Lk * b) # Take account of the fourier convention. 153 | 154 | left_edge = _set_left_edge(left_edge, axes, L) 155 | 156 | V = float(np.prod(L)) # Volume of box 157 | Vx = V / np.prod(N) # Volume of cell 158 | 159 | ft = ( 160 | Vx 161 | * backend.fftshift(backend.fftn(X, axes=axes), axes=axes) 162 | * np.sqrt(np.abs(b) / (2 * np.pi) ** (1 - a)) ** len(axes) 163 | ) 164 | 165 | dx = np.array([float(length) / float(n) for length, n in zip(L, N)]) 166 | 167 | freq = [backend.fftfreq(n, d=d, b=b) for n, d in zip(N, dx)] 168 | 169 | # Adjust phases of the result to align with the left edge properly. 170 | ft = _adjust_phase(ft, left_edge, freq, axes, b) 171 | return _retfunc(ft, freq, axes, ret_cubegrid) 172 | 173 | 174 | def ifft( 175 | X, 176 | Lk=None, 177 | L=None, 178 | a=0, 179 | b=2 * np.pi, 180 | axes=None, 181 | left_edge=None, 182 | ret_cubegrid=False, 183 | nthreads: int | None = None, 184 | backend: FFTBackend | None = None, 185 | ): 186 | r""" 187 | Arbitrary-dimension nice inverse Fourier Transform. 188 | 189 | This function wraps numpy's ``ifftn`` and applies some nice properties. Notably, 190 | the returned fourier transform is equivalent to what would be expected from a 191 | continuous inverse Fourier Transform (including normalisations etc.). In addition, 192 | arbitrary conventions are supported (see :mod:`powerbox.dft` for details). 193 | 194 | Default parameters have the same normalising conventions as ``numpy.fft.ifftn``. 195 | 196 | Parameters 197 | ---------- 198 | X : array 199 | An array with arbitrary dimensions defining the field to be transformed. Should 200 | correspond exactly to the continuous function for which it is an analogue. A 201 | lower-dimensional transform can be specified by using the ``axes`` argument. 202 | Note that if using a non-periodic function, the co-ordinates should be 203 | monotonically increasing. 204 | Lk : float or array-like, optional 205 | The length of the box which defines ``X``. If a scalar, each transformed 206 | dimension in ``X`` is assumed to have the same length. If array-like, must be of 207 | the same length as the number of transformed dimensions. The default returns the 208 | un-normalised DFT (the same as numpy). 209 | L : float or array-like, optional 210 | The length of the real-space box, defining the dual of ``X``. Only one of Lk/L 211 | needs to be passed. If L is passed, it is used. If a scalar, each transformed 212 | dimension in ``X`` is assumed to have the same length. If array-like, must be of 213 | the same length as the number of transformed dimensions. The default of ``Lk=1`` 214 | returns the un-normalised DFT. 215 | a,b : float, optional 216 | These define the Fourier convention used. See :mod:`powerbox.dft` for details. 217 | The defaults return the standard DFT as defined in :mod:`numpy.fft`. 218 | axes : sequence of ints, optional 219 | The axes to take the transform over. The default is to use all axes for the 220 | transform. 221 | left_edge : float or array-like, optional 222 | The co-ordinate at the left-edge (in k-space) for each dimension that is being 223 | transformed. By default, sets the left edge to -Lk/2, equivalent to the standard 224 | numpy ifft. This affects only the phases of the result. 225 | ret_cubegrid : bool, optional 226 | Whether to return the entire grid of real-space co-ordinate magnitudes. 227 | nthreads : bool or int, optional 228 | If set to False, uses numpy's FFT routine. If set to None, uses pyFFTW with 229 | number of threads equal to the number of available CPUs. If int, uses pyFFTW 230 | with number of threads equal to the input value. 231 | backend : FFTBackend, optional 232 | The backend to use for the FFT. If not provided, the backend is chosen based on 233 | the value of nthreads. 234 | 235 | Returns 236 | ------- 237 | ft : array 238 | The IDFT of X, normalised to be consistent with the continuous transform. 239 | freq : list of arrays 240 | The real-space co-ordinate grid in each dimension, consistent with the Fourier 241 | conventions specified. 242 | grid : array 243 | Only returned if ``ret_cubegrid`` is ``True``. An array with shape given by 244 | ``axes`` specifying the magnitude of the real-space co-ordinates at each point 245 | of the inverse fourier transform. 246 | """ 247 | if backend is None: 248 | backend = get_fft_backend(nthreads) 249 | 250 | if axes is None: 251 | axes = list(range(len(X.shape))) 252 | 253 | N = np.array([X.shape[axis] for axis in axes]) 254 | 255 | # Get the box volume if given the real-space box volume 256 | if Lk is None and L is None: 257 | Lk = 1 258 | elif L is not None: 259 | if np.isscalar(L): 260 | L = np.array([L] * len(axes)) 261 | 262 | dx = L / N 263 | Lk = 2 * np.pi / (dx * b) 264 | 265 | elif np.isscalar(Lk): 266 | Lk = [Lk] * len(axes) 267 | 268 | Lk = np.array(Lk) 269 | left_edge = _set_left_edge(left_edge, axes, Lk) 270 | 271 | V = np.prod(Lk) 272 | dk = np.array([float(lk) / float(n) for lk, n in zip(Lk, N)]) 273 | 274 | ft = ( 275 | V 276 | * backend.ifftn(X, axes=axes) 277 | * np.sqrt(np.abs(b) / (2 * np.pi) ** (1 + a)) ** len(axes) 278 | ) 279 | ft = backend.ifftshift(ft, axes=axes) 280 | 281 | freq = [backend.fftfreq(n, d=d, b=b) for n, d in zip(N, dk)] 282 | 283 | ft = _adjust_phase(ft, left_edge, freq, axes, -b) 284 | return _retfunc(ft, freq, axes, ret_cubegrid) 285 | 286 | 287 | def _adjust_phase(ft, left_edge, freq, axes, b): 288 | for i, (l, f) in enumerate(zip(left_edge, freq)): 289 | xp = np.exp(-b * 1j * f * l) 290 | obj = ( 291 | tuple([None] * axes[i]) 292 | + (slice(None, None, None),) 293 | + tuple([None] * (ft.ndim - axes[i] - 1)) 294 | ) 295 | ft *= xp[obj] 296 | return ft 297 | 298 | 299 | def _set_left_edge(left_edge, axes, L): 300 | if left_edge is None: 301 | left_edge = [-length / 2.0 for length in L] 302 | elif np.isscalar(left_edge): 303 | left_edge = [left_edge] * len(axes) 304 | else: 305 | assert len(left_edge) == len(axes) 306 | 307 | return left_edge 308 | 309 | 310 | def _retfunc(ft, freq, axes, ret_cubegrid): 311 | if not ret_cubegrid: 312 | return ft, freq 313 | grid = freq[0] ** 2 314 | for i in range(1, len(axes)): 315 | grid = np.add.outer(grid, freq[i] ** 2) 316 | 317 | return ft, freq, np.sqrt(grid) 318 | -------------------------------------------------------------------------------- /src/powerbox/dft_backend.py: -------------------------------------------------------------------------------- 1 | """FFT backends.""" 2 | 3 | from __future__ import annotations 4 | 5 | import numpy as np 6 | import warnings 7 | from abc import ABC, abstractmethod 8 | from functools import cache 9 | from multiprocessing import cpu_count 10 | 11 | try: 12 | import pyfftw 13 | except ImportError: 14 | pass 15 | 16 | 17 | class FFTBackend(ABC): # noqa: B024 18 | """Abstract base class for FFT backends.""" 19 | 20 | def fftshift(self, x, *args, **kwargs): 21 | """ 22 | The same as numpy, except that it preserves units (if Astropy quantities are used). 23 | 24 | All extra arguments are passed directly to numpy's ``fftshift``. 25 | """ 26 | out = self._fftshift(x, *args, **kwargs) 27 | 28 | return out * x.unit if hasattr(x, "unit") else out 29 | 30 | def ifftshift(self, x, *args, **kwargs): 31 | """ 32 | The same as numpy except it preserves units (if Astropy quantities are used). 33 | 34 | All extra arguments are passed directly to numpy's ``ifftshift``. 35 | """ 36 | out = self._ifftshift(x, *args, **kwargs) 37 | 38 | return out * x.unit if hasattr(x, "unit") else out 39 | 40 | def fftfreq(self, N, d=1.0, b=2 * np.pi): 41 | """ 42 | Return fourier frequencies for a box with N cells, using general Fourier convention. 43 | 44 | Parameters 45 | ---------- 46 | N : int 47 | The number of grid cells 48 | d : float, optional 49 | The interval between cells 50 | b : float, optional 51 | The fourier-convention of the frequency component (see :mod:`powerbox.dft` for 52 | details). 53 | 54 | Returns 55 | ------- 56 | freq : array 57 | The N symmetric frequency components of the Fourier transform. Always centred at 0. 58 | """ 59 | return self.fftshift(self._fftfreq(N, d=d)) * (2 * np.pi / b) 60 | 61 | 62 | class NumpyFFT(FFTBackend): 63 | """FFT backend using numpy.fft.""" 64 | 65 | def __init__(self): 66 | self.fftn = np.fft.fftn 67 | 68 | self.ifftn = np.fft.ifftn 69 | 70 | self._fftshift = np.fft.fftshift 71 | self._ifftshift = np.fft.ifftshift 72 | self._fftfreq = np.fft.fftfreq 73 | 74 | self.empty = np.empty 75 | self.have_fftw = False 76 | self.nthreads = 1 77 | 78 | 79 | class FFTW(FFTBackend): 80 | """FFT backend using pyfftw.""" 81 | 82 | def __init__(self, nthreads=None): 83 | try: 84 | import pyfftw 85 | except ImportError: 86 | raise ImportError("pyFFTW could not be imported...") 87 | 88 | try: 89 | pyfftw.builders._utils._default_threads(4) 90 | except ValueError: 91 | if nthreads and nthreads > 1: 92 | warnings.warn( 93 | "pyFFTW was not installed with multithreading. Using 1 thread.", 94 | stacklevel=2, 95 | ) 96 | nthreads = 1 97 | 98 | if nthreads is None: 99 | nthreads = cpu_count() 100 | 101 | self.nthreads = nthreads 102 | 103 | self._fftshift = pyfftw.interfaces.numpy_fft.fftshift 104 | self._ifftshift = pyfftw.interfaces.numpy_fft.ifftshift 105 | self._fftfreq = pyfftw.interfaces.numpy_fft.fftfreq 106 | self.empty = pyfftw.empty_aligned 107 | 108 | def ifftn(self, *args, **kwargs): 109 | """Inverse fast fourier transform.""" 110 | return pyfftw.interfaces.numpy_fft.ifftn(*args, threads=self.nthreads, **kwargs) 111 | 112 | def fftn(self, *args, **kwargs): 113 | """Fast fourier transform.""" 114 | return pyfftw.interfaces.numpy_fft.fftn(*args, threads=self.nthreads, **kwargs) 115 | 116 | 117 | @cache 118 | def get_fft_backend(nthreads=None): 119 | """Choose a backend based on nthreads. 120 | 121 | Will return the Numpy backend if nthreads is None, otherwise the FFTW backend with 122 | the given number of threads. 123 | """ 124 | if nthreads is None or nthreads > 1: 125 | try: 126 | fftbackend = FFTW(nthreads=nthreads) 127 | except ImportError: 128 | if nthreads is not None: 129 | warnings.warn( 130 | "Could not import pyfftw... Proceeding with numpy.", stacklevel=2 131 | ) 132 | fftbackend = NumpyFFT() 133 | else: 134 | fftbackend = NumpyFFT() 135 | return fftbackend 136 | -------------------------------------------------------------------------------- /src/powerbox/powerbox.py: -------------------------------------------------------------------------------- 1 | """Classes that can create arbitrary-dimensional fields with given power spectra. 2 | 3 | One such function produces *Gaussian* fields, and the other *LogNormal* fields. 4 | 5 | In principle, these may be extended to other 1-point density distributions by 6 | subclassing :class:`PowerBox` and over-writing the same methods as are over-written in 7 | :class:`LogNormalPowerBox`. 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import numpy as np 13 | import warnings 14 | 15 | from . import dft 16 | from .tools import _magnitude_grid 17 | 18 | # TODO: add hankel-transform version of LogNormal 19 | 20 | 21 | def _make_hermitian(mag, pha): 22 | r""" 23 | Take random arrays and convert them to a complex hermitian array. 24 | 25 | Note that this assumes that mag is distributed normally. 26 | 27 | Parameters 28 | ---------- 29 | mag : array 30 | Normally-distributed magnitudes of the complex vector. 31 | 32 | pha : array 33 | Uniformly distributed phases of the complex vector 34 | 35 | Returns 36 | ------- 37 | kspace : array 38 | A complex hermitian array with normally distributed amplitudes. 39 | """ 40 | revidx = (slice(None, None, -1),) * len(mag.shape) 41 | mag = (mag + mag[revidx]) / np.sqrt(2) 42 | pha = (pha - pha[revidx]) / 2 + np.pi 43 | return mag * (np.cos(pha) + 1j * np.sin(pha)) 44 | 45 | 46 | class PowerBox: 47 | r""" 48 | Generate real- and fourier-space Gaussian fields with a given power spectrum. 49 | 50 | Parameters 51 | ---------- 52 | N : int 53 | Number of grid-points on a side for the resulting box (equivalently, number of 54 | wavenumbers to use). 55 | pk : callable 56 | A callable of a single (vector) variable `k`, which is the isotropic power 57 | spectrum. The relationship of the `k` of which this is a function to the 58 | real-space co-ordinates, `x`, is determined by the parameters ``a,b``. 59 | dim : int, default 2 60 | Number of dimensions of resulting box. 61 | boxlength : float, default 1.0 62 | Length of the final signal on a side. This may have arbitrary units, so long 63 | as `pk` is a function of a variable which has the inverse units. 64 | ensure_physical : bool, optional 65 | Interpreting the power spectrum as a spectrum of density fluctuations, the 66 | minimum physical value of the real-space field, :meth:`delta_x`, is -1. With 67 | ``ensure_physical`` set to ``True``, :meth:`delta_x` is clipped to return values 68 | >-1. If this is happening a lot, consider using :class:`LogNormalPowerBox`. 69 | a,b : float, optional 70 | These define the Fourier convention used. See :mod:`powerbox.dft` for details. 71 | The defaults define the standard usage in *cosmology* (for example, as defined 72 | in Cosmological Physics, Peacock, 1999, pg. 496.). Standard numerical usage 73 | (eg. numpy) is (a,b) = (0,2pi). 74 | vol_normalised_power : bool, optional 75 | Whether the input power spectrum, ``pk``, is volume-weighted. Default True 76 | because of standard cosmological usage. 77 | seed: int, optional 78 | A random seed to define the initial conditions. If not set, it will remain 79 | random, and each call to eg. :meth:`delta_x()` will produce a *different* 80 | realisation. 81 | nthreads : bool or int, optional 82 | If set to False, uses numpy's FFT routine. If set to None, uses pyFFTW with 83 | number of threads equal to the number of available CPUs. If int, uses pyFFTW 84 | with number of threads equal to the input value. 85 | 86 | Notes 87 | ----- 88 | A number of conventions need to be listed. 89 | 90 | The conventions of using `x` for "real-space" and `k` for "fourier space" arise from 91 | cosmology, but this does not affect anything -- `x` could just as well stand for 92 | "time domain" and `k` for "frequency domain". 93 | 94 | The important convention is the relationship between `x` and `k`, or in other words, 95 | whether `k` is interpreted as an angular frequency or ordinary frequency. By 96 | default, because of cosmological conventions, `k` is an angular frequency, so that 97 | the fourier transform integrand is delta_k*exp(-ikx). The conventions can be changed 98 | arbitrarily by setting the ``a,b`` parameters (see :mod:`powerbox.dft` for details). 99 | 100 | The primary quantity of interest is :meth:`delta_x`, which is a zero-mean Gaussian 101 | field with a power spectrum equivalent to that which was input. Being zero-mean 102 | enables its direct interpretation as an overdensity field, and this interpretation 103 | is enforced in the :meth:`make_discrete_sample` method. 104 | 105 | .. note:: None of the n-dimensional arrays that are created within the class are 106 | stored, due to the inefficiency in memory consumption that this would 107 | imply. Thus, each large array is created and *returned* by their 108 | respective method, to be stored/discarded by the user. 109 | 110 | .. warning:: Due to the above note, repeated calls to eg. :meth:`delta_x()` will 111 | produce *different* realisations of the real-space field, unless the 112 | `seed` parameter is set in the constructor. 113 | 114 | Examples 115 | -------- 116 | To create a 3-dimensional box of gaussian over-densities, gridded into 100 bins, 117 | with cosmological conventions, and a power-law power spectrum, simply use 118 | 119 | >>> pb = PowerBox(100,lambda k : 0.1*k**-3., dim=3, boxlength=100.0) 120 | >>> overdensities = pb.delta_x() 121 | >>> grid = pb.x 122 | >>> radii = pb.r 123 | 124 | To create a 2D turbulence structure, with arbitrary units, once can use 125 | 126 | >>> import matplotlib.pyplot as plt 127 | >>> pb = PowerBox(1000, lambda k : k**-7./5.) 128 | >>> plt.imshow(pb.delta_x()) 129 | """ 130 | 131 | def __init__( 132 | self, 133 | N, 134 | pk, 135 | dim=2, 136 | boxlength=1.0, 137 | ensure_physical=False, 138 | a=1.0, 139 | b=1.0, 140 | vol_normalised_power=True, 141 | seed=None, 142 | nthreads=None, 143 | ): 144 | self.N = N 145 | self.dim = dim 146 | self.boxlength = boxlength 147 | self.L = boxlength 148 | self.fourier_a = a 149 | self.fourier_b = b 150 | self.vol_normalised_power = vol_normalised_power 151 | self.V = self.boxlength**self.dim 152 | self.fftbackend = dft.get_fft_backend(nthreads) 153 | 154 | if self.vol_normalised_power: 155 | self.pk = lambda k: pk(k) / self.V 156 | else: 157 | self.pk = pk 158 | 159 | self.ensure_physical = ensure_physical 160 | self.Ntot = self.N**self.dim 161 | 162 | self.seed = seed 163 | 164 | if N % 2 == 0: 165 | self._even = True 166 | else: 167 | self._even = False 168 | 169 | self.n = N + 1 if self._even else N 170 | 171 | # Get the grid-size for the final real-space box. 172 | self.dx = float(boxlength) / N 173 | 174 | def k(self): 175 | """The entire grid of wavenumber magitudes.""" 176 | return _magnitude_grid(self.kvec, self.dim) 177 | 178 | @property 179 | def kvec(self): 180 | """The vector of wavenumbers along a side.""" 181 | return self.fftbackend.fftfreq(self.N, d=self.dx, b=self.fourier_b) 182 | 183 | @property 184 | def r(self): 185 | """The radial position of every point in the grid.""" 186 | return _magnitude_grid(self.x, self.dim) 187 | 188 | @property 189 | def x(self): 190 | """The co-ordinates of the grid along a side.""" 191 | return np.arange(-self.boxlength / 2, self.boxlength / 2, self.dx)[: self.N] 192 | 193 | def gauss_hermitian(self): 194 | """A random array which has Gaussian magnitudes and Hermitian symmetry.""" 195 | if self.seed: 196 | np.random.seed(self.seed) 197 | 198 | mag = np.random.normal(0, 1, size=[self.n] * self.dim) 199 | pha = 2 * np.pi * np.random.uniform(size=[self.n] * self.dim) 200 | 201 | dk = _make_hermitian(mag, pha) 202 | 203 | if self._even: 204 | cutidx = (slice(None, -1),) * self.dim 205 | dk = dk[cutidx] 206 | 207 | return dk 208 | 209 | def power_array(self): 210 | """The Power Spectrum (volume normalised) at `self.k`.""" 211 | k = self.k() 212 | mask = k != 0 213 | # Re-use the k array to conserve memory 214 | k[mask] = self.pk(k[mask]) 215 | return k 216 | 217 | def delta_k(self): 218 | """A realisation of the delta_k. 219 | 220 | The gaussianised square root of the power spectrum (i.e. the Fourier 221 | co-efficients). 222 | """ 223 | p = self.power_array() 224 | 225 | if np.any(p < 0): 226 | raise ValueError( 227 | "The power spectrum function has returned negative values." 228 | ) 229 | 230 | gh = self.gauss_hermitian() 231 | gh[...] = np.sqrt(p) * gh 232 | return gh 233 | 234 | def delta_x(self): 235 | """The realised field in real-space from the input power spectrum.""" 236 | # Here we multiply by V because the (inverse) fourier-transform of the (dimensionless) power has 237 | # units of 1/V and we require a unitless quantity for delta_x. 238 | dk = self.fftbackend.empty((self.N,) * self.dim, dtype="complex128") 239 | dk[...] = self.delta_k() 240 | dk[...] = ( 241 | self.V 242 | * dft.ifft( 243 | dk, 244 | L=self.boxlength, 245 | a=self.fourier_a, 246 | b=self.fourier_b, 247 | backend=self.fftbackend, 248 | )[0] 249 | ) 250 | dk = np.real(dk) 251 | 252 | if self.ensure_physical: 253 | np.clip(dk, -1, np.inf, dk) 254 | 255 | return dk 256 | 257 | def create_discrete_sample( 258 | self, 259 | nbar, 260 | randomise_in_cell=True, 261 | min_at_zero=False, 262 | store_pos=False, 263 | delta_x=None, 264 | ): 265 | r"""Create a sample of tracers of the underlying density distribution. 266 | 267 | This function assumes that the real-space signal represents an over-density 268 | with respect to some mean,. 269 | 270 | Parameters 271 | ---------- 272 | nbar : float 273 | Mean tracer density within the box. 274 | randomise_in_cell : bool, optional 275 | Whether to randomise the positions of the tracers within the cells, or put 276 | them at the grid-points (more efficient). 277 | min_at_zero : bool, optional 278 | Whether to make the lower corner of the box at the origin, otherwise the 279 | centre of the box is at the origin. 280 | store_pos : bool, optional 281 | Whether to store the sample of tracers as an instance variable 282 | ``tracer_positions``. 283 | delta_x : numpy.ndarray 284 | Field from which to draw discrete samples. This is likely the 285 | output of a previous call to `delta_x()`, but could in principle be 286 | any field. Note that if not supplied, the field will be generated 287 | from scratch. As a result, unless the user has supplied a random seed 288 | at initialization, the discrete samples will be a new realization of 289 | a field with the specified power spectrum. 290 | 291 | Returns 292 | ------- 293 | tracer_positions : float, array_like 294 | ``(n, d)``-array, with ``n`` the number of tracers and ``d`` the number of 295 | dimensions. Each row represents a single tracer's co-ordinates. 296 | """ 297 | if delta_x is None: 298 | if self.seed is None: 299 | warnings.warn( 300 | "You Should provide `seed` at initialization if one" 301 | " wants a correspondence between parent field and" 302 | " discrete samples.", 303 | stacklevel=2, 304 | ) 305 | dx = self.delta_x() 306 | else: 307 | dx = delta_x 308 | 309 | dx = (dx + 1) * self.dx**self.dim * nbar 310 | n = dx 311 | 312 | self.n_per_cell = np.random.poisson(n) 313 | 314 | # Get all source positions 315 | args = [self.x] * self.dim 316 | X = np.meshgrid(*args, indexing="ij") 317 | 318 | tracer_positions = np.array([x.flatten() for x in X]).T 319 | tracer_positions = tracer_positions.repeat(self.n_per_cell.flatten(), axis=0) 320 | 321 | if randomise_in_cell: 322 | tracer_positions += ( 323 | np.random.uniform(size=(np.sum(self.n_per_cell), self.dim)) * self.dx 324 | ) 325 | 326 | if min_at_zero: 327 | tracer_positions += self.boxlength / 2.0 328 | 329 | if store_pos: 330 | self.tracer_positions = tracer_positions 331 | 332 | return tracer_positions 333 | 334 | 335 | class LogNormalPowerBox(PowerBox): 336 | r"""Calculate Log-Normal density fields with given power spectra. 337 | 338 | See the documentation of :class:`PowerBox` for a detailed explanation of the 339 | arguments, as this class has exactly the same arguments. 340 | 341 | This class calculates an (over-)density field of arbitrary dimension given an input 342 | isotropic power spectrum. In this case, the field has a log-normal distribution of 343 | over-densities, always yielding a physically valid field. 344 | 345 | Examples 346 | -------- 347 | To create a log-normal over-density field: 348 | 349 | >>> from powerbox import LogNormalPowerBox 350 | >>> lnpb = LogNormalPowerBox(100,lambda k : k**-7./5.,dim=2, boxlength=1.0) 351 | >>> overdensities = lnpb.delta_x 352 | >>> grid = lnpb.x 353 | >>> radii = lnpb.r 354 | 355 | To plot the overdensities: 356 | 357 | >>> import matplotlib.pyplot as plt 358 | >>> plt.imshow(pb.delta_x) 359 | 360 | Compare the fields from a Gaussian and Lognormal realisation with the same power: 361 | 362 | >>> lnpb = LogNormalPowerBox(300,lambda k : k**-7./5.,dim=2, boxlength=1.0) 363 | >>> pb = PowerBox(300,lambda k : k**-7./5.,dim=2, boxlength=1.0) 364 | >>> fig,ax = plt.subplots(2,1,sharex=True,sharey=True,figsize=(12,5)) 365 | >>> ax[0].imshow(lnpb.delta_x,aspect="equal",vmin=-1,vmax=lnpb.delta_x.max()) 366 | >>> ax[1].imshow(pb.delta_x,aspect="equal",vmin=-1,vmax = lnpb.delta_x.max()) 367 | 368 | To create and plot a discrete version of the field: 369 | 370 | >>> positions = lnpb.create_discrete_sample( 371 | >>> nbar=1000.0, # Number density in terms of boxlength units 372 | >>> randomise_in_cell=True 373 | >>> ) 374 | >>> plt.scatter(positions[:,0],positions[:,1],s=2,alpha=0.5,lw=0) 375 | """ 376 | 377 | def __init__(self, *args, **kwargs): 378 | super().__init__(*args, **kwargs) 379 | 380 | def correlation_array(self): 381 | """The correlation function from the input power, on the grid.""" 382 | pa = self.fftbackend.empty((self.N,) * self.dim) 383 | pa[...] = self.power_array() 384 | return self.V * np.real( 385 | dft.ifft( 386 | pa, 387 | L=self.boxlength, 388 | a=self.fourier_a, 389 | b=self.fourier_b, 390 | backend=self.fftbackend, 391 | )[0] 392 | ) 393 | 394 | def gaussian_correlation_array(self): 395 | """The correlation function required for a Gaussian field to produce the input power on a lognormal field.""" 396 | return np.log(1 + self.correlation_array()) 397 | 398 | def gaussian_power_array(self): 399 | """The power spectrum required for a Gaussian field to produce the input power on a lognormal field.""" 400 | gca = self.fftbackend.empty((self.N,) * self.dim) 401 | gca[...] = self.gaussian_correlation_array() 402 | gpa = np.abs( 403 | dft.fft( 404 | gca, 405 | L=self.boxlength, 406 | a=self.fourier_a, 407 | b=self.fourier_b, 408 | backend=self.fftbackend, 409 | )[0] 410 | ) 411 | gpa[self.k() == 0] = 0 412 | return gpa 413 | 414 | def delta_k(self): 415 | """ 416 | A realisation of the delta_k. 417 | 418 | i.e. the gaussianised square root of the unitless power spectrum 419 | (i.e. the Fourier co-efficients) 420 | """ 421 | p = self.gaussian_power_array() 422 | gh = self.gauss_hermitian() 423 | gh[...] = np.sqrt(p) * gh 424 | return gh 425 | 426 | def delta_x(self): 427 | """The real-space over-density field, from the input power spectrum.""" 428 | dk = self.fftbackend.empty((self.N,) * self.dim, dtype="complex128") 429 | dk[...] = self.delta_k() 430 | dk[...] = ( 431 | np.sqrt(self.V) 432 | * dft.ifft( 433 | dk, 434 | L=self.boxlength, 435 | a=self.fourier_a, 436 | b=self.fourier_b, 437 | backend=self.fftbackend, 438 | )[0] 439 | ) 440 | dk = np.real(dk) 441 | 442 | sg = np.var(dk) 443 | return np.exp(dk - sg / 2) - 1 444 | -------------------------------------------------------------------------------- /src/powerbox/tools.py: -------------------------------------------------------------------------------- 1 | """Tools for dealing with structured boxes, such as those output by :mod:`powerbox`. 2 | 3 | Tools include those for averaging a field angularly, and generating the isotropic 4 | power spectrum. 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import numpy as np 10 | import warnings 11 | from scipy.interpolate import RegularGridInterpolator 12 | from scipy.special import gamma 13 | 14 | from . import dft 15 | 16 | 17 | def _getbins( 18 | bins: np.ndarray | int, 19 | coord_mags: np.ndarray, 20 | log: bool, 21 | bins_upto_boxlen: bool | None = None, 22 | ): 23 | if np.iterable(bins): 24 | return bins 25 | 26 | if bins_upto_boxlen is None: 27 | warnings.warn( 28 | ( 29 | "In the future, bins will be generated by default up to the smallest " 30 | "length over any dimension, instead of the largest magnitude for the box." 31 | "Set bins_upto_boxlen to silence this warning." 32 | ), 33 | stacklevel=2, 34 | category=FutureWarning, 35 | ) 36 | bins_upto_boxlen = False 37 | 38 | if bins_upto_boxlen: 39 | try: 40 | # Fails if coords is not a cube / inhomogeneous. 41 | max_radius = np.min( 42 | [np.max(coord_mags, axis=i) for i in range(coord_mags.ndim)] 43 | ) 44 | except ValueError: 45 | maxs = [np.max(coord_mags, axis=i) for i in range(coord_mags.ndim)] 46 | maxs_flat = [] 47 | [maxs_flat.extend(m.ravel()) for m in maxs] 48 | max_radius = np.min(maxs_flat) 49 | else: 50 | max_radius = coord_mags.max() 51 | 52 | if not log: 53 | bins = np.linspace(coord_mags.min(), max_radius, bins + 1) 54 | else: 55 | mn = coord_mags[coord_mags > 0].min() 56 | bins = np.logspace(np.log10(mn), np.log10(max_radius), bins + 1) 57 | 58 | return bins 59 | 60 | 61 | def angular_average( 62 | field, 63 | coords, 64 | bins, 65 | weights=1, 66 | average=True, 67 | bin_ave=True, 68 | get_variance=False, 69 | log_bins=False, 70 | interpolation_method=None, 71 | interp_points_generator=None, 72 | return_sumweights=False, 73 | bins_upto_boxlen: bool | None = None, 74 | ): 75 | r""" 76 | Average a given field within radial bins. 77 | 78 | This function can be used in fields of arbitrary dimension (memory permitting), and the field need not be centred 79 | at the origin. The averaging assumes that the grid cells fall completely into the bin which encompasses the 80 | co-ordinate point for the cell (i.e. there is no weighted splitting of cells if they intersect a bin edge). 81 | 82 | It is optimized for applying a set of weights, and obtaining the variance of the mean, at the same time as 83 | averaging. 84 | 85 | Parameters 86 | ---------- 87 | field: nd-array 88 | An array of arbitrary dimension specifying the field to be angularly averaged. 89 | 90 | coords: nd-array or list of n arrays. 91 | Either the *magnitude* of the co-ordinates at each point of `field`, or a list of 1D arrays specifying the 92 | co-ordinates in each dimension. 93 | 94 | bins: float or array. 95 | The ``bins`` argument provided to histogram. Can be an int or array specifying radial bin edges. 96 | 97 | weights: array, optional 98 | An array of the same shape as `field`, giving a weight for each entry. 99 | 100 | average: bool, optional 101 | Whether to take the (weighted) average. If False, returns the (unweighted) sum. 102 | 103 | bin_ave : bool, optional 104 | Whether to return the bin co-ordinates as the (weighted) average of cells within the bin (if True), or 105 | the regularly spaced edges of the bins. 106 | 107 | get_variance : bool, optional 108 | Whether to also return an estimate of the variance of the power in each bin. 109 | 110 | log_bins : bool, optional 111 | Whether to create bins in log-space. 112 | 113 | interpolation_method : str, optional 114 | If None, does not interpolate. Currently only 'linear' is supported. 115 | 116 | interp_points_generator : callable, optional 117 | A function that generates the sample points for the interpolation. 118 | If None, default is regular_angular_generator with resolution = 0.05. 119 | If callable, a nested function whose main function takes in a single 120 | argument `angular_resolution` which defines the angular resolution in radians 121 | for the samples taken for the interpolation. 122 | The nested function inside takes as input `bins`, which the array of bins at 123 | which we want to average the field, and `dims2avg`, which is the number of dims to average over. 124 | The main function returns the nested function. 125 | The nested function returns a 1D array of radii and a 2D array of azimuthal angules with shape 126 | (ndim-1,N), where N is the number of samples. 127 | The azimual angles havephi_n[0,:] :math:`\in [0,2*\pi]`, and phi_n[1:,:] :math:`\in [0,\pi]`. 128 | See the functions `regular_angular_generator` and `above_mu_min_angular_generator` for examples. 129 | 130 | return_sumweights : bool, optional 131 | Whether to return the number of modes in each bin. 132 | Note that for the linear interpolation case, 133 | this corresponds to the number of samples averaged over 134 | (which can be adjusted by supplying a different interp_points_generator 135 | function with a different angular resolution). 136 | 137 | bins_upto_boxlen : bool, optional 138 | If set to True and the bins are determined automatically, calculate bins only 139 | up to the maximum k along any dimension. Otherwise, calculate bins up to the 140 | maximum magnitude of k (i.e. a factor of sqrt(ndim) higher). Default is False 141 | for backwards compatibility. 142 | 143 | Returns 144 | ------- 145 | field_1d : 1D-array 146 | The angularly-averaged field. 147 | 148 | bins : 1D-array 149 | Array of same shape as field_1d specifying the radial co-ordinates of the bins. Either the mean co-ordinate 150 | from the input data, or the regularly spaced bins, dependent on `bin_ave`. 151 | 152 | var : 1D-array, optional 153 | The variance of the averaged field (same shape as bins), estimated from the mean standard error. 154 | Only returned if `get_variance` is True. 155 | 156 | Notes 157 | ----- 158 | If desired, the variance is calculated as the weight unbiased variance, using the formula at 159 | https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights for the variance in each cell, and 160 | normalising by a factor of :math:`V_2/V_1^2` to estimate the variance of the average. 161 | 162 | Examples 163 | -------- 164 | Create a 3D radial function, and average over radial bins: 165 | 166 | >>> import numpy as np 167 | >>> import matplotlib.pyplot as plt 168 | >>> x = np.linspace(-5,5,128) # Setup a grid 169 | >>> X,Y,Z = np.meshgrid(x,x,x) 170 | >>> r = np.sqrt(X**2+Y**2+Z**2) # Get the radial co-ordinate of grid 171 | >>> field = np.exp(-r**2) # Generate a radial field 172 | >>> avgfunc, bins = angular_average(field,r,bins=100) # Call angular_average 173 | >>> plt.plot(bins, np.exp(-bins**2), label="Input Function") # Plot input function versus ang. avg. 174 | >>> plt.plot(bins, avgfunc, label="Averaged Function") 175 | 176 | See Also 177 | -------- 178 | angular_average_nd : Perform an angular average in a subset of the total dimensions. 179 | 180 | """ 181 | if interpolation_method is not None and interpolation_method != "linear": 182 | raise ValueError("Only linear interpolation is supported.") 183 | if len(coords) == len(field.shape): 184 | # coords are a segmented list of dimensional co-ordinates 185 | coord_mags = _magnitude_grid(coords) 186 | elif interpolation_method is not None: 187 | raise ValueError( 188 | "Must supply a list of len(field.shape) of 1D coordinate arrays for coords when interpolating!" 189 | ) 190 | else: 191 | # coords are the magnitude of the co-ordinates 192 | # since we are not interpolating, then we can just use the magnitude of the co-ordinates 193 | coord_mags = coords 194 | 195 | if interpolation_method is None: 196 | indx, bins, sumweights = _get_binweights( 197 | coord_mags, 198 | weights, 199 | bins, 200 | average, 201 | bin_ave=bin_ave, 202 | log_bins=log_bins, 203 | bins_upto_boxlen=bins_upto_boxlen, 204 | ) 205 | 206 | if np.any(sumweights == 0): 207 | warnings.warn( 208 | "One or more radial bins had no cells within it.", stacklevel=2 209 | ) 210 | res = _field_average(indx, field, weights, sumweights) 211 | else: 212 | bins = _getbins(bins, coord_mags, log_bins, bins_upto_boxlen) 213 | 214 | if bin_ave: 215 | if log_bins: 216 | bins = np.exp((np.log(bins[1:]) + np.log(bins[:-1])) / 2) 217 | else: 218 | bins = (bins[1:] + bins[:-1]) / 2 219 | 220 | sample_coords, r_n = _sample_coords_interpolate( 221 | coords, bins, weights, interp_points_generator 222 | ) 223 | res, sumweights = _field_average_interpolate( 224 | coords, field, bins, weights, sample_coords, r_n 225 | ) 226 | if get_variance: 227 | if interpolation_method is None: 228 | var = _field_variance(indx, field, res, weights, sumweights) 229 | else: 230 | raise NotImplementedError( 231 | "Variance calculation not implemented for interpolation" 232 | ) 233 | if return_sumweights: 234 | return res, bins, var, sumweights 235 | else: 236 | return res, bins, var 237 | else: 238 | if return_sumweights: 239 | return res, bins, sumweights 240 | else: 241 | return res, bins 242 | 243 | 244 | def _magnitude_grid(x, dim=None): 245 | if dim is not None: 246 | return np.sqrt(np.sum(np.meshgrid(*([x**2] * dim), indexing="ij"), axis=0)) 247 | else: 248 | return np.sqrt(np.sum(np.meshgrid(*([X**2 for X in x]), indexing="ij"), axis=0)) 249 | 250 | 251 | def _get_binweights( 252 | coord_mags, 253 | weights, 254 | bins, 255 | average=True, 256 | bin_ave=True, 257 | log_bins=False, 258 | bins_upto_boxlen: bool | None = None, 259 | ): 260 | # Get a vector of bin edges 261 | bins = _getbins(bins, coord_mags, log_bins, bins_upto_boxlen=bins_upto_boxlen) 262 | 263 | indx = np.digitize(coord_mags.flatten(), bins) 264 | 265 | if average or bin_ave: 266 | if not np.isscalar(weights): 267 | if coord_mags.shape != weights.shape: 268 | raise ValueError( 269 | "coords and weights must have the same shape!", 270 | coord_mags.shape, 271 | weights.shape, 272 | ) 273 | sumweights = np.bincount( 274 | indx, weights=weights.flatten(), minlength=len(bins) + 1 275 | )[1:-1] 276 | else: 277 | sumweights = np.bincount(indx, minlength=len(bins) + 1)[1:-1] 278 | 279 | if average: 280 | binweight = sumweights 281 | else: 282 | binweight = 1 * sumweights 283 | sumweights = np.ones_like(binweight) 284 | 285 | if bin_ave: 286 | bins = ( 287 | np.bincount( 288 | indx, 289 | weights=(weights * coord_mags).flatten(), 290 | minlength=len(bins) + 1, 291 | )[1:-1] 292 | / binweight 293 | ) 294 | 295 | else: 296 | sumweights = np.ones(len(bins) - 1) 297 | 298 | return indx, bins, sumweights 299 | 300 | 301 | def _spherical2cartesian(r, phi_n): 302 | r"""Convert spherical coordinates to Cartesian coordinates. 303 | 304 | Parameters 305 | ---------- 306 | r_n : array-like 307 | 1D array of radii. 308 | phi_n : array-like 309 | 2D array of azimuthal angles with shape (ndim-1, N), where N is the number of points. 310 | phi_n[0,:] :math:`\in [0,2*\pi]`, and phi_n[1:,:] :math:`\in [0,\pi]`. 311 | 312 | Returns 313 | ------- 314 | coords : array-like 315 | 2D array of Cartesian coordinates with shape (ndim, N). 316 | 317 | For more details, see https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates 318 | 319 | """ 320 | if phi_n.shape[0] == 1: 321 | return r * np.array([np.cos(phi_n[0]), np.sin(phi_n[0])]) 322 | elif phi_n.shape[0] == 2: 323 | return r * np.array( 324 | [ 325 | np.cos(phi_n[0]), 326 | np.sin(phi_n[0]) * np.cos(phi_n[1]), 327 | np.sin(phi_n[0]) * np.sin(phi_n[1]), 328 | ] 329 | ) 330 | else: 331 | phi_n = np.concatenate( 332 | [2 * np.pi * np.ones(phi_n.shape[1])[np.newaxis, ...], phi_n], axis=0 333 | ) 334 | sines = np.sin(phi_n) 335 | sines[0, :] = 1 336 | cum_sines = np.cumprod(sines, axis=0) 337 | cosines = np.roll(np.cos(phi_n), -1, axis=0) 338 | return cum_sines * cosines * r 339 | 340 | 341 | def above_mu_min_angular_generator(angular_resolution=0.1, mu=0.97): 342 | r""" 343 | Returns a set of spherical coordinates above a certain :math:`\\mu` value. 344 | 345 | Parameters 346 | ---------- 347 | bins : array-like 348 | 1D array of radii at which we want to spherically average the field. 349 | dims2avg : int 350 | The number of dimensions to average over. 351 | angular_resolution : float, optional 352 | The angular resolution in radians for the sample points for the interpolation. 353 | mu : float, optional 354 | The minimum value of :math:`\\cos(\theta), \theta = \arctan (k_\\perp/k_\\parallel)` 355 | for the sample points generated for the interpolation. 356 | 357 | Returns 358 | ------- 359 | r_n : array-like 360 | 1D array of radii. 361 | phi_n : array-like 362 | 2D array of azimuthal angles with shape (ndim-1, N), where N is the number of points. 363 | phi_n[0,:] :math:`\\in [0,2*\\pi]`, and phi_n[1:,:] :math:`\\in [0,\\pi]`. 364 | """ 365 | 366 | def generator(bins, dims2avg): 367 | r_n, phi_n = regular_angular_generator(angular_resolution)(bins, dims2avg) 368 | 369 | # sine because the phi_n are wrt x-axis and we need them wrt z-axis. 370 | if len(phi_n) == 1: 371 | mask = np.sin(phi_n[0, :]) >= mu 372 | else: 373 | mask = np.all(np.sin(phi_n[1:, :]) >= mu, axis=0) 374 | return r_n[mask], phi_n[:, mask] 375 | 376 | return generator 377 | 378 | 379 | def regular_angular_generator(angular_resolution=0.05): 380 | r""" 381 | Returns a set of spherical coordinates regularly sampled at a given angular resolution. 382 | 383 | Parameters 384 | ---------- 385 | bins : array-like 386 | 1D array of radii at which we want to spherically average the field. 387 | dims2avg : int 388 | The number of dimensions to average over. 389 | angular_resolution : float, optional 390 | The angular resolution in radians for the sample points for the interpolation. 391 | Defaults to 0.1 rad. 392 | 393 | Returns 394 | ------- 395 | r_n : array-like 396 | 1D array of radii. 397 | phi_n : array-like 398 | 2D array of azimuthal angles with shape (ndim-1, N), where N is the number of points. 399 | phi_n[0,:] :math:`\in [0,2*\pi]`, and phi_n[1:,:] :math:`\in [0,\pi]`. 400 | """ 401 | 402 | def generator(bins, dims2avg): 403 | num_angular_bins = np.array( 404 | np.max( 405 | [ 406 | np.round(2 * np.pi * bins / angular_resolution), 407 | np.ones_like(bins) * 100, 408 | ], 409 | axis=0, 410 | ), 411 | dtype=int, 412 | ) 413 | phi_i = [np.linspace(0.0, np.pi, n) for n in num_angular_bins] 414 | phi_N = [np.linspace(0.0, 2 * np.pi, n) for n in num_angular_bins] 415 | 416 | # Angular resolution is same for all dims 417 | phi_n = np.concatenate( 418 | [ 419 | np.array( 420 | np.meshgrid( 421 | *([phi_N[i]] + [phi_i[i]] * (dims2avg - 1)), sparse=False 422 | ) 423 | ).reshape((dims2avg, num_angular_bins[i] ** dims2avg)) 424 | for i in range(len(bins)) 425 | ], 426 | axis=-1, 427 | ) 428 | r_n = np.concatenate( 429 | [[r] * (num_angular_bins[i] ** dims2avg) for i, r in enumerate(bins)] 430 | ) 431 | return r_n, phi_n 432 | 433 | return generator 434 | 435 | 436 | def _sample_coords_interpolate(coords, bins, weights, interp_points_generator=None): 437 | # Grid is regular + can be ordered only in Cartesian coords. 438 | field_shape = [len(c) for c in coords] 439 | if isinstance(weights, np.ndarray): 440 | weights = weights.reshape(field_shape) 441 | else: 442 | weights = np.ones(field_shape) * weights 443 | # To extrapolate at the edges if needed. 444 | # Evaluate it on points in angular coords that we then convert to Cartesian. 445 | # Number of angular bins for each radius absk on which to calculate the interpolated power when doing the averaging 446 | # Larger wavemodes / radii will have more samples in theta 447 | # "bins" is always 1D 448 | # Max is to set a minimum number of bins for the smaller wavemode bins 449 | if interp_points_generator is None: 450 | interp_points_generator = regular_angular_generator() 451 | if len(coords) > 1: 452 | r_n, phi_n = interp_points_generator(bins, len(coords) - 1) 453 | sample_coords = _spherical2cartesian(r_n, phi_n) 454 | else: 455 | sample_coords = bins.reshape(1, -1) 456 | r_n = bins 457 | # Remove sample coords that are not even on the coords grid (e.g. due to phi) 458 | mask1 = np.all( 459 | sample_coords >= np.array([c.min() for c in coords])[..., np.newaxis], axis=0 460 | ) 461 | mask2 = np.all( 462 | sample_coords <= np.array([c.max() for c in coords])[..., np.newaxis], axis=0 463 | ) 464 | 465 | mask = mask1 & mask2 466 | sample_coords = sample_coords[:, mask] 467 | r_n = r_n[mask] 468 | if len(r_n) == 0: 469 | raise ValueError( 470 | "Generated sample points are outside of the coordinate box provided for the field! Try changing your points generator or field coordinates." 471 | ) 472 | return sample_coords, r_n 473 | 474 | 475 | def _field_average_interpolate(coords, field, bins, weights, sample_coords, r_n): 476 | # Grid is regular + can be ordered only in Cartesian coords. 477 | if isinstance(weights, np.ndarray): 478 | weights = weights.reshape(field.shape) 479 | if not ((weights == 0) | (weights == 1)).all(): 480 | warnings.warn( 481 | "Interpolating with non-binary weights is slow.", 482 | RuntimeWarning, 483 | stacklevel=2, 484 | ) 485 | else: 486 | field = field * weights 487 | else: 488 | weights = np.ones_like(field) * weights 489 | # Set 0 weights to NaNs 490 | field[weights == 0] = np.nan 491 | # Rescale the field (see scipy documentation for RegularGridInterpolator) 492 | mean, std = np.nanmean(field), np.max( 493 | [np.nanstd(field), 1.0] 494 | ) # Avoid division by 0 495 | rescaled_field = (field - mean) / std 496 | fnc = RegularGridInterpolator( 497 | coords, 498 | rescaled_field, # Complex data is accepted. 499 | bounds_error=False, 500 | fill_value=np.nan, 501 | ) # To extrapolate at the edges if needed. 502 | # Evaluate it on points in angular coords that we then convert to Cartesian. 503 | 504 | interped_field = fnc(sample_coords.T) * std + mean 505 | if np.all(np.isnan(interped_field)): 506 | warnings.warn("Interpolator returned all NaNs.", RuntimeWarning, stacklevel=2) 507 | # Average over the spherical shells for each radius / bin value 508 | if not ((weights == 0) | (weights == 1)).all(): 509 | fnc = RegularGridInterpolator( 510 | coords, 511 | weights, # Complex data is accepted. 512 | bounds_error=False, 513 | fill_value=np.nan, 514 | ) 515 | interped_weights = fnc(sample_coords.T) 516 | 517 | avged_field = [] 518 | 519 | final_sumweights = [] 520 | for b in bins: 521 | mbin = np.logical_and(r_n == b, ~np.isnan(interped_field)) 522 | avged_field.append(np.sum(interped_field[mbin] * interped_weights[mbin])) 523 | final_sumweights.append(np.sum(interped_weights[mbin])) 524 | avged_field = np.array(avged_field) / final_sumweights 525 | else: 526 | avged_field = np.array([np.nanmean(interped_field[r_n == b]) for b in bins]) 527 | unique_rn, sumweights = np.unique( 528 | r_n[~np.isnan(interped_field)], 529 | return_counts=True, 530 | ) 531 | final_sumweights = [] 532 | for b in bins: 533 | if b in unique_rn: 534 | final_sumweights.append(sumweights[unique_rn == b][0]) 535 | else: 536 | final_sumweights.append(0) 537 | return avged_field, np.array(final_sumweights) 538 | 539 | 540 | def _field_average(indx, field, weights, sumweights): 541 | if not np.isscalar(weights) and field.shape != weights.shape: 542 | raise ValueError( 543 | "The field and weights must have the same shape!", 544 | field.shape, 545 | weights.shape, 546 | ) 547 | 548 | field = field * weights # Leave like this because field is mutable 549 | 550 | rl = ( 551 | np.bincount( 552 | indx, weights=np.real(field.flatten()), minlength=len(sumweights) + 2 553 | )[1:-1] 554 | / sumweights 555 | ) 556 | if field.dtype.kind == "c": 557 | im = ( 558 | 1j 559 | * np.bincount( 560 | indx, weights=np.imag(field.flatten()), minlength=len(sumweights) + 2 561 | )[1:-1] 562 | / sumweights 563 | ) 564 | else: 565 | im = 0 566 | 567 | return rl + im 568 | 569 | 570 | def _field_variance(indx, field, average, weights, V1): 571 | if field.dtype.kind == "c": 572 | raise NotImplementedError( 573 | "Cannot use a complex field when computing variance, yet." 574 | ) 575 | 576 | # Create a full flattened array of the same shape as field, with the average in that bin. 577 | # We have to pad the average vector with 0s on either side to account for cells outside the bin range. 578 | average_field = np.concatenate(([0], average, [0]))[indx] 579 | 580 | # Create the V2 array 581 | if not np.isscalar(weights): 582 | weights = weights.flatten() 583 | V2 = np.bincount(indx, weights=weights**2, minlength=len(V1) + 2)[1:-1] 584 | else: 585 | V2 = V1 586 | 587 | field = (field.flatten() - average_field) ** 2 * weights 588 | 589 | # This res is the estimated variance of each cell in the bin 590 | res = np.bincount(indx, weights=field, minlength=len(V1) + 2)[1:-1] / (V1 - V2 / V1) 591 | 592 | # Modify to the estimated variance of the sum of the cells in the bin. 593 | res *= V2 / V1**2 594 | 595 | return res 596 | 597 | 598 | def angular_average_nd( # noqa: C901 599 | field, 600 | coords, 601 | bins, 602 | n=None, 603 | weights=1, 604 | average=True, 605 | bin_ave=True, 606 | get_variance=False, 607 | log_bins=False, 608 | interpolation_method=None, 609 | interp_points_generator=None, 610 | return_sumweights=False, 611 | bins_upto_boxlen: bool | None = None, 612 | ): 613 | """ 614 | Average the first n dimensions of a given field within radial bins. 615 | 616 | This function be used to take "hyper-cylindrical" averages of fields. For a 3D field, with `n=2`, this is exactly 617 | a cylindrical average. This function can be used in fields of arbitrary dimension (memory permitting), and the field 618 | need not be centred at the origin. The averaging assumes that the grid cells fall completely into the bin which 619 | encompasses the co-ordinate point for the cell (i.e. there is no weighted splitting of cells if they intersect a bin 620 | edge). 621 | 622 | It is optimized for applying a set of weights, and obtaining the variance of the mean, at the same time as 623 | averaging. 624 | 625 | Parameters 626 | ---------- 627 | field : md-array 628 | An array of arbitrary dimension specifying the field to be angularly averaged. 629 | 630 | coords : list of n arrays 631 | A list of 1D arrays specifying the co-ordinates in each dimension *to be averaged*. 632 | 633 | bins : int or array. 634 | Specifies the radial bins for the averaged dimensions. Can be an int or array specifying radial bin edges. 635 | 636 | n : int, optional 637 | The number of dimensions to be averaged. By default, all dimensions are averaged. Always uses 638 | the first `n` dimensions. 639 | 640 | weights : array, optional 641 | An array of the same shape as the first `n` dimensions of `field`, giving a weight for each entry. 642 | 643 | average : bool, optional 644 | Whether to take the (weighted) average. If False, returns the (unweighted) sum. 645 | 646 | bin_ave : bool, optional 647 | Whether to return the bin co-ordinates as the (weighted) average of cells within the bin (if True), or 648 | the linearly spaced edges of the bins 649 | 650 | get_variance : bool, optional 651 | Whether to also return an estimate of the variance of the power in each bin. 652 | 653 | log_bins : bool, optional 654 | Whether to create bins in log-space. 655 | 656 | interpolation_method : str, optional 657 | If None, does not interpolate. Currently only 'linear' is supported. 658 | 659 | interp_points_generator : callable, optional 660 | A function that generates the sample points for the interpolation. 661 | If None, defaults to regular_angular_generator with resolution = 0.05. 662 | If callable, a function that takes as input an angular resolution for the sampling 663 | and returns a 1D array of radii and 2D array of angles 664 | (see documentation on the inputs of _sphere2cartesian for more details on the outputs). 665 | This function can be used to obtain an angular average over a certain region of the field by 666 | limiting where the samples are taken for the interpolation. See function `above_mu_min_generator` 667 | for an example of such a function. 668 | 669 | return_sumweights : bool, optional 670 | Whether to return the number of modes in each bin. 671 | Note that for the linear interpolation case, 672 | this corresponds to the number of samples averaged over 673 | (which can be adjusted by supplying a different interp_points_generator 674 | function with a different angular resolution). 675 | 676 | bins_upto_boxlen : bool, optional 677 | If set to True and the bins are determined automatically, calculate bins only 678 | up to the maximum k along any dimension. Otherwise, calculate bins up to the 679 | maximum magnitude of k (i.e. a factor of sqrt(ndim) higher). Default is False 680 | for backwards compatibility. 681 | 682 | Returns 683 | ------- 684 | field : (m-n+1)-array 685 | The angularly-averaged field. The first dimension corresponds to `bins`, while the rest correspond to the 686 | unaveraged dimensions. 687 | 688 | bins : 1D-array 689 | The radial co-ordinates of the bins. Either the mean co-ordinate from the input data, or the regularly spaced 690 | bins, dependent on `bin_ave`. 691 | 692 | var : (m-n+1)-array, optional 693 | The variance of the averaged field (same shape as `field`), estimated from the mean standard error. 694 | Only returned if `get_variance` is True. 695 | 696 | Examples 697 | -------- 698 | Create a 3D radial function, and average over radial bins. Equivalent to calling :func:`angular_average`: 699 | 700 | >>> import numpy as np 701 | >>> import matplotlib.pyplot as plt 702 | >>> x = np.linspace(-5,5,128) # Setup a grid 703 | >>> X,Y,Z = np.meshgrid(x,x,x) # "" 704 | >>> r = np.sqrt(X**2+Y**2+Z**2) # Get the radial co-ordinate of grid 705 | >>> field = np.exp(-r**2) # Generate a radial field 706 | >>> avgfunc, bins, _ = angular_average_nd(field,[x,x,x],bins=100) # Call angular_average 707 | >>> plt.plot(bins, np.exp(-bins**2), label="Input Function") # Plot input function versus ang. avg. 708 | >>> plt.plot(bins, avgfunc, label="Averaged Function") 709 | 710 | Create a 2D radial function, extended to 3D, and average over first 2 dimensions (cylindrical average): 711 | 712 | >>> r = np.sqrt(X**2+Y**2) 713 | >>> field = np.exp(-r**2) # 2D field 714 | >>> field = np.repeat(field,len(x)).reshape((len(x),)*3) # Extended to 3D 715 | >>> avgfunc, avbins, coords = angular_average_nd(field, [x,x,x], bins=50, n=2) 716 | >>> plt.plot(avbins, np.exp(-avbins**2), label="Input Function") 717 | >>> plt.plot(avbins, avgfunc[:,0], label="Averaged Function") 718 | """ 719 | if n is None: 720 | n = len(coords) 721 | 722 | if len(coords) != len(field.shape): 723 | raise ValueError("coords should be a list of arrays, one for each dimension.") 724 | 725 | if interpolation_method is not None and interp_points_generator is None: 726 | interp_points_generator = regular_angular_generator() 727 | 728 | if interpolation_method is not None and interpolation_method != "linear": 729 | raise ValueError("Only linear interpolation is supported.") 730 | 731 | if n == len(coords): 732 | return angular_average( 733 | field, 734 | coords, 735 | bins, 736 | weights, 737 | average, 738 | bin_ave, 739 | get_variance, 740 | log_bins=log_bins, 741 | interpolation_method=interpolation_method, 742 | interp_points_generator=interp_points_generator, 743 | return_sumweights=return_sumweights, 744 | bins_upto_boxlen=bins_upto_boxlen, 745 | ) 746 | 747 | if len(coords) == len(field.shape): 748 | # coords are a segmented list of dimensional co-ordinates 749 | coord_mags = _magnitude_grid([c for i, c in enumerate(coords) if i < n]) 750 | elif interpolation_method is not None: 751 | raise ValueError( 752 | "Must supply a list of len(field.shape) of 1D coordinate arrays for coords when interpolating!" 753 | ) 754 | else: 755 | # coords are the magnitude of the co-ordinates 756 | # since we are not interpolating, then we can just use the magnitude of the co-ordinates 757 | coord_mags = coords 758 | 759 | coord_mags = _magnitude_grid([c for i, c in enumerate(coords) if i < n]) 760 | n1 = np.prod(field.shape[:n]) 761 | n2 = np.prod(field.shape[n:]) 762 | if interpolation_method is None: 763 | indx, bins, sumweights = _get_binweights( 764 | coord_mags, 765 | weights, 766 | bins, 767 | average, 768 | bin_ave=bin_ave, 769 | log_bins=log_bins, 770 | bins_upto_boxlen=bins_upto_boxlen, 771 | ) 772 | res = np.zeros((len(sumweights), n2), dtype=field.dtype) 773 | if interpolation_method is not None: 774 | bins = _getbins(bins, coord_mags, log_bins, bins_upto_boxlen) 775 | if bin_ave: 776 | if log_bins: 777 | bins = np.exp((np.log(bins[1:]) + np.log(bins[:-1])) / 2) 778 | else: 779 | bins = (bins[1:] + bins[:-1]) / 2 780 | res = np.zeros((len(bins), n2), dtype=field.dtype) 781 | 782 | if get_variance: 783 | var = np.zeros_like(res) 784 | 785 | for i, fld in enumerate(field.reshape((n1, n2)).T): 786 | try: 787 | w = weights.flatten() 788 | except AttributeError: 789 | w = weights 790 | if interpolation_method is None: 791 | res[:, i] = _field_average(indx, fld, w, sumweights) 792 | if get_variance: 793 | var[:, i] = _field_variance(indx, fld, res[:, i], w, sumweights) 794 | elif interpolation_method == "linear": 795 | sample_coords, r_n = _sample_coords_interpolate( 796 | coords[:n], bins, weights, interp_points_generator 797 | ) 798 | res[:, i], sumweights = _field_average_interpolate( 799 | coords[:n], fld.reshape(field.shape[:n]), bins, w, sample_coords, r_n 800 | ) 801 | if get_variance: 802 | # TODO: Implement variance calculation for interpolation 803 | raise NotImplementedError( 804 | "Variance calculation not implemented for interpolation" 805 | ) 806 | 807 | if not get_variance: 808 | if return_sumweights: 809 | return res.reshape((len(sumweights),) + field.shape[n:]), bins, sumweights 810 | else: 811 | return res.reshape((len(sumweights),) + field.shape[n:]), bins 812 | else: 813 | if return_sumweights: 814 | return ( 815 | res.reshape((len(sumweights),) + field.shape[n:]), 816 | bins, 817 | var, 818 | sumweights, 819 | ) 820 | else: 821 | return res.reshape((len(sumweights),) + field.shape[n:]), bins, var 822 | 823 | 824 | def power2delta(freq: list): 825 | r""" 826 | Convert power P(k) to dimensionless power. 827 | 828 | Calculate the multiplicative factor :math:`\Omega_d |k|^d / (2 \pi)^d`, 829 | where :math:`\Omega_d = \frac{2 \pi^{d/2}}{\Gamma(d/2)}` needed to convert 830 | the power P(k) (in 3D :math:`\rm{[mK}^2 \rm{k}^{-3}]`) into the "dimensionless" power spectrum 831 | :math:`\Delta^2_{21}` (in 3D :math:`\rm{[mK}^2]`). 832 | 833 | Parameters 834 | ---------- 835 | freq : list 836 | A list containing 1D arrays of wavemodes k1, k2, k3, ... 837 | 838 | Returns 839 | ------- 840 | prefactor : np.ndarray 841 | An array of shape (len(k1), len(k2), len(k3), ...) containing the values of the prefactor 842 | :math:`\Omega_d |k|^d / (2 \pi)^d`, where :math:`\Omega_d = \frac{2 \pi^{d/2}}{\Gamma(d/2)}` 843 | is the solid angle and :math:`\Gamma` is the gamma function. For a 3-D sphere, the prefactor 844 | is :math:`|k|^3 / (2\pi^2)`. 845 | 846 | """ 847 | shape = [len(f) for f in freq] 848 | dim = len(shape) 849 | coords = np.meshgrid(*freq, sparse=True) 850 | squares = [c**2 for c in coords] 851 | absk = np.sqrt(sum(squares)) 852 | solid_angle = 2 * np.pi ** (dim / 2) / gamma(dim / 2) 853 | prefactor = solid_angle * (absk / (2 * np.pi)) ** dim 854 | return prefactor 855 | 856 | 857 | def ignore_zero_absk(freq: list, kmag: np.ndarray | None): 858 | r""" 859 | Returns a mask with zero weights where :math:`|k| = 0`. 860 | 861 | Parameters 862 | ---------- 863 | freq : list 864 | A list containing three arrays of wavemodes k1, k2, k3, ... 865 | res_ndim : int, optional 866 | Only perform angular averaging over first `res_ndim` dimensions. By default, 867 | uses all dimensions. 868 | 869 | Returns 870 | ------- 871 | k_weights : np.ndarray 872 | An array of same shape as the averaged field containing the weights of the k-modes. 873 | For example, if the field is not averaged (e.g. 3D power), then the shape is 874 | (len(k1), len(k2), len(k3)). 875 | 876 | """ 877 | k_weights = ~np.isclose(kmag, 0) 878 | return k_weights 879 | 880 | 881 | def ignore_zero_ki(freq: list, kmag: np.ndarray = None): 882 | r""" 883 | Returns a mask with zero weights where k_i == 0, where i = x, y, z for a 3D field. 884 | 885 | Parameters 886 | ---------- 887 | freq : list 888 | A list containing 1D arrays of wavemodes k1, k2, k3, ... 889 | res_ndim : int, optional 890 | Only perform angular averaging over first `res_ndim` dimensions. By default, 891 | uses all dimensions. 892 | 893 | Returns 894 | ------- 895 | k_weights : np.ndarray 896 | An array of same shape as the averaged field containing the weights of the k-modes. 897 | For example, if the field is not averaged (e.g. 3D power), then the shape is 898 | (len(k1), len(k2), len(k3)). 899 | """ 900 | res_ndim = len(kmag.shape) 901 | 902 | coords = np.array(np.meshgrid(*freq[:res_ndim], sparse=False)) 903 | k_weights = ~np.any(coords == 0, axis=0) 904 | return k_weights 905 | 906 | 907 | def discretize_N( 908 | deltax, 909 | boxlength, 910 | deltax2=None, 911 | N=None, 912 | weights=None, 913 | weights2=None, 914 | dimensionless=True, 915 | ): 916 | r""" 917 | Perform binning of a field to obtain a discrete sampling of deltax. 918 | 919 | Parameters 920 | ---------- 921 | deltax : array-like 922 | The field on which to calculate the power spectrum . Can either be arbitrarily 923 | n-dimensional, or 2-dimensional with the first being the number of spatial 924 | dimensions, and the second the positions of discrete particles in the field. The 925 | former should represent a density field, while the latter is a discrete sampling 926 | of a field. This function chooses which to use by checking the value of ``N`` 927 | (see below). Note that if a discrete sampling is used, the power spectrum 928 | calculated is the "overdensity" power spectrum, i.e. the field re-centered about 929 | zero and rescaled by the mean. 930 | boxlength : float or list of floats 931 | The length of the box side(s) in real-space. 932 | deltax2 : array-like 933 | If given, a box of the same shape as deltax, against which deltax will be cross 934 | correlated. 935 | N : int, optional 936 | The number of grid cells per side in the box. Only required if deltax is a 937 | discrete sample. If given, the function will assume a discrete sample. 938 | res_ndim : int, optional 939 | Only perform angular averaging over first `res_ndim` dimensions. By default, 940 | uses all dimensions. 941 | weights, weights2 : array-like, optional 942 | If deltax is a discrete sample, these are weights for each point. 943 | dimensionless: bool, optional 944 | Whether to normalise the cube by its mean prior to taking the power. 945 | 946 | Returns 947 | ------- 948 | deltax : array-like 949 | The field on which to calculate the power spectrum . Can either be arbitrarily 950 | n-dimensional, or 2-dimensional with the first being the number of spatial 951 | dimensions, and the second the positions of discrete particles in the field. The 952 | former should represent a density field, while the latter is a discrete sampling 953 | of a field. This function chooses which to use by checking the value of ``N`` 954 | (see below). Note that if a discrete sampling is used, the power spectrum 955 | calculated is the "overdensity" power spectrum, i.e. the field re-centered about 956 | zero and rescaled by the mean. 957 | deltax2 : array-like 958 | If given, a box of the same shape as deltax, against which deltax will be cross 959 | correlated. 960 | Npart1, Npart2 : array-like 961 | Length of first dimension of deltax and deltax2, respectively. 962 | dim : int 963 | Length of second dimension of deltax. 964 | N : array-like 965 | The number of grid cells per side in the box. Only required if deltax is a 966 | discrete sample. If given, the function will assume a discrete sample. 967 | boxlength : float or list of floats 968 | The length of the box side(s) in real-space. 969 | 970 | """ 971 | if deltax.shape[1] > deltax.shape[0]: 972 | raise ValueError( 973 | "It seems that there are more dimensions than particles! " 974 | "Try transposing deltax." 975 | ) 976 | 977 | if deltax2 is not None and deltax2.shape[1] > deltax2.shape[0]: 978 | raise ValueError( 979 | "It seems that there are more dimensions than particles! " 980 | "Try transposing deltax2." 981 | ) 982 | 983 | dim = deltax.shape[1] 984 | if deltax2 is not None and dim != deltax2.shape[1]: 985 | raise ValueError("deltax and deltax2 must have the same number of dimensions!") 986 | 987 | if not np.iterable(N): 988 | N = [N] * dim 989 | 990 | if not np.iterable(boxlength): 991 | boxlength = [boxlength] * dim 992 | 993 | Npart1 = deltax.shape[0] 994 | 995 | Npart2 = deltax2.shape[0] if deltax2 is not None else Npart1 996 | 997 | # Generate a histogram of the data, with appropriate number of bins. 998 | edges = [np.linspace(0, L, n + 1) for L, n in zip(boxlength, N)] 999 | 1000 | deltax = np.histogramdd(deltax % boxlength, bins=edges, weights=weights)[0].astype( 1001 | "float" 1002 | ) 1003 | 1004 | if deltax2 is not None: 1005 | deltax2 = np.histogramdd(deltax2 % boxlength, bins=edges, weights=weights2)[ 1006 | 0 1007 | ].astype("float") 1008 | 1009 | # Convert sampled data to mean-zero data 1010 | if dimensionless: 1011 | deltax = deltax / np.mean(deltax) - 1 1012 | if deltax2 is not None: 1013 | deltax2 = deltax2 / np.mean(deltax2) - 1 1014 | else: 1015 | deltax -= np.mean(deltax) 1016 | if deltax2 is not None: 1017 | deltax2 -= np.mean(deltax2) 1018 | return deltax, deltax2, Npart1, Npart2, dim, N, boxlength 1019 | 1020 | 1021 | def get_power( 1022 | deltax, 1023 | boxlength, 1024 | deltax2=None, 1025 | N=None, 1026 | a=1.0, 1027 | b=1.0, 1028 | remove_shotnoise=True, 1029 | vol_normalised_power=True, 1030 | bins=None, 1031 | res_ndim=None, 1032 | weights=None, 1033 | weights2=None, 1034 | dimensionless=True, 1035 | bin_ave=True, 1036 | get_variance=False, 1037 | log_bins=False, 1038 | ignore_zero_mode=False, 1039 | k_weights=1, 1040 | nthreads=None, 1041 | prefactor_fnc=None, 1042 | interpolation_method=None, 1043 | interp_points_generator=None, 1044 | return_sumweights=False, 1045 | bins_upto_boxlen: bool | None = None, 1046 | ): 1047 | r""" 1048 | Calculate isotropic power spectrum of a field, or cross-power of two similar fields. 1049 | 1050 | This function, by default, conforms to typical cosmological power spectrum 1051 | conventions -- normalising by the volume of the box and removing shot noise if 1052 | applicable. These options are configurable. 1053 | 1054 | Parameters 1055 | ---------- 1056 | deltax : array-like 1057 | The field on which to calculate the power spectrum . Can either be arbitrarily 1058 | n-dimensional, or 2-dimensional with the first being the number of spatial 1059 | dimensions, and the second the positions of discrete particles in the field. The 1060 | former should represent a density field, while the latter is a discrete sampling 1061 | of a field. This function chooses which to use by checking the value of ``N`` 1062 | (see below). Note that if a discrete sampling is used, the power spectrum 1063 | calculated is the "overdensity" power spectrum, i.e. the field re-centered about 1064 | zero and rescaled by the mean. 1065 | boxlength : float or list of floats 1066 | The length of the box side(s) in real-space. 1067 | deltax2 : array-like 1068 | If given, a box of the same shape as deltax, against which deltax will be cross 1069 | correlated. 1070 | N : int, optional 1071 | The number of grid cells per side in the box. Only required if deltax is a 1072 | discrete sample. If given, the function will assume a discrete sample. 1073 | a,b : float, optional 1074 | These define the Fourier convention used. See :mod:`powerbox.dft` for details. 1075 | The defaults define the standard usage in *cosmology* (for example, as defined 1076 | in Cosmological Physics, Peacock, 1999, pg. 496.). Standard numerical usage 1077 | (eg. numpy) is ``(a,b) = (0,2pi)``. 1078 | remove_shotnoise : bool, optional 1079 | Whether to subtract a shot-noise term after determining the isotropic power. 1080 | This only affects discrete samples. 1081 | vol_weighted_power : bool, optional 1082 | Whether the input power spectrum, ``pk``, is volume-weighted. Default True 1083 | because of standard cosmological usage. 1084 | bins : int or array, optional 1085 | Defines the final k-bins output. If None, chooses a number based on the input 1086 | resolution of the box. Otherwise, if int, this defines the number of kbins, or 1087 | if an array, it defines the exact bin edges. 1088 | res_ndim : int, optional 1089 | Only perform angular averaging over first `res_ndim` dimensions. By default, 1090 | uses all dimensions. 1091 | weights, weights2 : array-like, optional 1092 | If deltax is a discrete sample, these are weights for each point. 1093 | dimensionless: bool, optional 1094 | Whether to normalise the cube by its mean prior to taking the power. 1095 | bin_ave : bool, optional 1096 | Whether to return the bin co-ordinates as the (weighted) average of cells within 1097 | the bin (if True), or the linearly spaced edges of the bins 1098 | get_variance : bool, optional 1099 | Whether to also return an estimate of the variance of the power in each bin. 1100 | log_bins : bool, optional 1101 | Whether to create bins in log-space. 1102 | ignore_zero_mode : bool, optional 1103 | Whether to ignore the k=0 mode (or DC term). 1104 | k_weights : nd-array or callable optional 1105 | The weights of the n-dimensional k modes. This can be used to filter out some 1106 | modes completely. If callable, a function that takes in a a list containing 1107 | arrays of wavemodes [k1, k2, k3, ...] as well as kmag (optional), and returns an array 1108 | of weights of shape (len(k1), len(k2), len(k3), ... ) for a res_ndim = 1. 1109 | nthreads : bool or int, optional 1110 | If set to False, uses numpy's FFT routine. If set to None, uses pyFFTW with 1111 | number of threads equal to the number of available CPUs. If int, uses pyFFTW 1112 | with number of threads equal to the input value. 1113 | prefactor_fnc : callable, optional 1114 | A function that takes in a list containing arrays of wavemodes [k1, k2, k3, ...] 1115 | and returns an array of the same size. This function is applied on the FT before 1116 | the angular averaging. It can be used, for example, to convert linearly-binned 1117 | power into power-per-logarithmic k ($\Delta^2$). 1118 | interpolation_method : str, optional 1119 | If None, does not interpolate. Currently only 'linear' is supported. 1120 | interp_points_generator : callable, optional 1121 | A function that generates the sample points for the interpolation. 1122 | If None, defaults regular_angular_generator with resolution = 0.05. 1123 | If callable, a function that takes as input an angular resolution for the sampling 1124 | and returns a 1D array of radii and 2D array of angles 1125 | (see documentation on the inputs of _sphere2cartesian for more details on the outputs). 1126 | This function can be used to obtain an angular average over a certain region of the field by 1127 | limiting where the samples are taken for the interpolation. See function `above_mu_min_generator` 1128 | for an example of such a function. 1129 | return_sumweights : bool, optional 1130 | Whether to return the number of modes in each bin. 1131 | Note that for the linear interpolation case, 1132 | this corresponds to the number of samples averaged over 1133 | (which can be adjusted with the angular_resolution parameter). 1134 | bins_upto_boxlen : bool, optional 1135 | If set to True and the bins are determined automatically, calculate bins only 1136 | up to the maximum k along any dimension. Otherwise, calculate bins up to the 1137 | maximum magnitude of k (i.e. a factor of sqrt(ndim) higher). Default is False 1138 | for backwards compatibility. 1139 | 1140 | Returns 1141 | ------- 1142 | p_k : array 1143 | The power spectrum averaged over bins of equal :math:`|k|`. 1144 | meank : array 1145 | The bin-centres for the p_k array (in k). This is the mean k-value for cells in 1146 | that bin. 1147 | var : array 1148 | The variance of the power spectrum, estimated from the mean standard error. Only 1149 | returned if `get_variance` is True. 1150 | 1151 | Examples 1152 | -------- 1153 | One can use this function to check whether a box created with :class:`PowerBox` 1154 | has the correct power spectrum: 1155 | 1156 | >>> from powerbox import PowerBox 1157 | >>> import matplotlib.pyplot as plt 1158 | >>> pb = PowerBox(250,lambda k : k**-2.) 1159 | >>> p,k = get_power(pb.delta_x,pb.boxlength) 1160 | >>> plt.plot(k,p) 1161 | >>> plt.plot(k,k**-2.) 1162 | >>> plt.xscale('log') 1163 | >>> plt.yscale('log') 1164 | 1165 | An example of a prefactor_fnc applied to the box in the above example: 1166 | 1167 | >>> from powerbox import get_power 1168 | >>> import numpy as np 1169 | >>> def power2delta(freq): 1170 | >>> kx = freq[0] 1171 | >>> ky = freq[1] 1172 | >>> kz = freq[2] 1173 | >>> absk = np.sqrt(np.add.outer(np.add.outer(kx**2,ky**2), kz**2)) 1174 | >>> return absk ** 3 / (2 * np.pi ** 2) 1175 | >>> p, k = get_power(pb.delta_x, pb.boxlength, prefactor_fnc=power2delta) 1176 | """ 1177 | # Check if the input data is in sampled particle format 1178 | if N is not None: 1179 | deltax, deltax2, Npart1, Npart2, dim, N, boxlength = discretize_N( 1180 | deltax, 1181 | boxlength, 1182 | deltax2=deltax2, 1183 | N=N, 1184 | weights=weights, 1185 | weights2=weights2, 1186 | dimensionless=dimensionless, 1187 | ) 1188 | 1189 | else: 1190 | # If input data is already a density field, just get the dimensions. 1191 | dim = len(deltax.shape) 1192 | 1193 | if not np.iterable(boxlength): 1194 | boxlength = [float(boxlength)] * dim 1195 | else: 1196 | boxlength = [float(val) for val in boxlength] 1197 | 1198 | if deltax2 is not None and deltax.shape != deltax2.shape: 1199 | raise ValueError("deltax and deltax2 must have the same shape!") 1200 | 1201 | N = deltax.shape 1202 | Npart1 = None 1203 | 1204 | V = np.prod(boxlength) 1205 | 1206 | # Calculate the n-D power spectrum and align it with the k from powerbox. 1207 | FT, freq, k = dft.fft( 1208 | deltax, L=boxlength, a=a, b=b, ret_cubegrid=True, nthreads=nthreads 1209 | ) 1210 | 1211 | FT2 = ( 1212 | dft.fft(deltax2, L=boxlength, a=a, b=b, nthreads=nthreads)[0] 1213 | if deltax2 is not None 1214 | else FT 1215 | ) 1216 | 1217 | P = np.real(FT * np.conj(FT2) / V**2) 1218 | 1219 | if vol_normalised_power: 1220 | P *= V 1221 | 1222 | if prefactor_fnc is not None: 1223 | P *= prefactor_fnc(freq) 1224 | 1225 | if res_ndim is None: 1226 | res_ndim = dim 1227 | 1228 | # Determine a nice number of bins. 1229 | if bins is None: 1230 | bins = int(np.prod(N[:res_ndim]) ** (1.0 / res_ndim) / 2.2) 1231 | 1232 | kmag = _magnitude_grid([c for i, c in enumerate(freq) if i < res_ndim]) 1233 | 1234 | if np.isscalar(k_weights): 1235 | k_weights = np.ones_like(kmag) 1236 | 1237 | if callable(k_weights): 1238 | k_weights = k_weights(freq, kmag) 1239 | 1240 | # Set k_weights so that k=0 mode is ignore if desired. 1241 | if ignore_zero_mode: 1242 | k_weights = np.logical_and(k_weights, ignore_zero_absk(freq, kmag)) 1243 | 1244 | # res is (P, k, , ) 1245 | res = angular_average_nd( 1246 | P, 1247 | freq, 1248 | bins, 1249 | n=res_ndim, 1250 | bin_ave=bin_ave, 1251 | get_variance=get_variance, 1252 | log_bins=log_bins, 1253 | weights=k_weights, 1254 | interpolation_method=interpolation_method, 1255 | interp_points_generator=interp_points_generator, 1256 | return_sumweights=return_sumweights, 1257 | bins_upto_boxlen=bins_upto_boxlen, 1258 | ) 1259 | res = list(res) 1260 | # Remove shot-noise 1261 | if remove_shotnoise and Npart1: 1262 | res[0] -= np.sqrt(V**2 / Npart1 / Npart2) 1263 | 1264 | return res + [freq[res_ndim:]] if res_ndim < dim else res 1265 | -------------------------------------------------------------------------------- /tests/test_direct.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from powerbox import PowerBox 4 | 5 | N = 5 6 | 7 | 8 | def ensure_hermitian(b): 9 | revidx = (slice(None, None, -1),) * len(b.shape) 10 | 11 | if len(b) % 2 == 0: 12 | cutidx = (slice(1, None, None),) * len(b.shape) 13 | b = b[cutidx] 14 | 15 | print(b - b[revidx]) 16 | assert np.allclose(np.real(b - b[revidx]), 0) 17 | assert np.allclose(np.imag(b + b[revidx]), 0) 18 | 19 | 20 | def ensure_reality_elementwise(x): 21 | if not np.allclose(np.abs(np.imag(x) / np.real(x)), 0, atol=0.01, rtol=0.01): 22 | print( 23 | "Maximum contribution of imaginary part in any element: ", 24 | np.max(np.abs(np.imag(x) / np.real(x))), 25 | ) 26 | return False 27 | else: 28 | return True 29 | 30 | 31 | def ensure_reality(x): 32 | val = np.sum(np.abs(x)) / np.sum(np.abs(np.real(x))) 33 | if np.isclose(val, 1, rtol=5e-3): 34 | return True 35 | else: 36 | print("Total fractional contribution of imaginary parts", val - 1) 37 | return False 38 | 39 | 40 | class TestDirect: 41 | def setup_method(self, test_method): 42 | self.pb = PowerBox(N, lambda k: k**-2.0, dim=1) 43 | 44 | def test_hermitian(self): 45 | ensure_hermitian(self.pb.delta_k()) 46 | 47 | def test_reality_elementwise(self): 48 | ensure_reality_elementwise(self.pb.delta_x()) 49 | 50 | def test_reality(self): 51 | ensure_reality(self.pb.delta_x()) 52 | 53 | 54 | class TestDirect2(TestDirect): 55 | def setup_method(self, test_method): 56 | self.pb = PowerBox(N, lambda k: k**-2.0, dim=2) 57 | 58 | 59 | class TestDirect3(TestDirect): 60 | def setup_method(self, test_method): 61 | self.pb = PowerBox(N, lambda k: k**-2.0, dim=3) 62 | 63 | 64 | class TestDirect4(TestDirect): 65 | def setup_method(self, test_method): 66 | self.pb = PowerBox(N, lambda k: k**-2.0, dim=4) 67 | 68 | 69 | class TestDirectEven(TestDirect): 70 | def setup_method(self, test_method): 71 | self.pb = PowerBox(N - 1, lambda k: k**-2.0, dim=2) 72 | 73 | def test_reality_elementwise(self): 74 | pass # It won't be element-wise correct for even case. 75 | 76 | def test_reality(self): 77 | ensure_reality(self.pb.delta_x()) 78 | -------------------------------------------------------------------------------- /tests/test_discrete.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import numpy as np 4 | from functools import partial 5 | 6 | from powerbox import LogNormalPowerBox, PowerBox, get_power 7 | 8 | get_power = partial(get_power, bins_upto_boxlen=True) 9 | 10 | 11 | def test_discrete_power_gaussian(): 12 | pb = PowerBox( 13 | N=512, 14 | dim=2, 15 | boxlength=100.0, 16 | pk=lambda u: 0.1 * u**-1.5, 17 | ensure_physical=True, 18 | ) 19 | 20 | box = pb.delta_x() 21 | 22 | sample = pb.create_discrete_sample(nbar=1000.0, delta_x=box) 23 | power, bins = get_power(sample, pb.boxlength, N=pb.N) 24 | 25 | res = np.mean(np.abs(power[50:-50] / (0.1 * bins[50:-50] ** -1.5) - 1)) 26 | 27 | assert res < 1e-1 28 | 29 | # This re-grids the discrete sample into a box, basically to verify the 30 | # indexing used by meshgrid within `create_discrete_sample`. 31 | N = [pb.N] * pb.dim 32 | L = [pb.boxlength] * pb.dim 33 | edges = [np.linspace(-_L / 2.0, _L / 2.0, _n + 1) for _L, _n in zip(L, N)] 34 | delta_samp = np.histogramdd(sample, bins=edges, weights=None)[0].astype("float") 35 | 36 | # Check cross spectrum and assert a strong correlation 37 | cross, bins = get_power(delta_samp, pb.boxlength, deltax2=box) 38 | p2, bins = get_power(box, pb.boxlength) 39 | mask = (power > 0) & (p2 > 0) 40 | corr = cross[mask] / np.sqrt(power[mask]) / np.sqrt(p2[mask]) 41 | corr_bar = np.mean(corr[np.isfinite(corr)]) 42 | assert corr_bar > 10 43 | 44 | 45 | def test_discrete_power_lognormal(): 46 | pb = LogNormalPowerBox( 47 | N=512, 48 | dim=2, 49 | boxlength=100.0, 50 | pk=lambda u: 0.1 * u**-1.5, 51 | ensure_physical=True, 52 | seed=1212, 53 | ) 54 | 55 | sample = pb.create_discrete_sample(nbar=1000.0) 56 | power, bins = get_power(sample, pb.boxlength, N=pb.N) 57 | 58 | res = np.mean(np.abs(power[50:-50] / (0.1 * bins[50:-50] ** -1.5) - 1)) 59 | 60 | assert res < 1e-1 61 | 62 | with pytest.raises(ValueError): 63 | power, bins = get_power(sample.T, pb.boxlength, N=pb.N) 64 | 65 | with pytest.raises(ValueError): 66 | power, bins = get_power(sample.T, pb.boxlength, N=pb.N, deltax2=sample.T) 67 | 68 | with pytest.raises(ValueError): 69 | power, bins = get_power(sample, pb.boxlength, N=pb.N, deltax2=sample.T) 70 | 71 | get_power(sample, pb.boxlength, N=pb.N, deltax2=sample, dimensionless=False) 72 | 73 | 74 | if __name__ == "__main__": 75 | test_discrete_power_gaussian() 76 | test_discrete_power_lognormal() 77 | -------------------------------------------------------------------------------- /tests/test_fft.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import contextlib 4 | import numpy as np 5 | 6 | from powerbox.dft import fft, fftfreq, fftshift, ifft, ifftshift 7 | from powerbox.dft_backend import FFTW, NumpyFFT 8 | 9 | ABCOMBOS = [ 10 | (0, 2 * np.pi, 0, 1), 11 | (0, 2 * np.pi, 1, 1), 12 | (0, 1, 1, 2 * np.pi), 13 | (0, 1, 1, 1), 14 | (1, 1, 0, 2 * np.pi), 15 | (1, 1, 0, 1), 16 | ] 17 | 18 | BACKENDS = [ 19 | NumpyFFT(), 20 | ] 21 | 22 | HAVE_FFTW = False 23 | HAVE_FFTW_MULTITHREAD = False 24 | 25 | with contextlib.suppress(ValueError, ImportError): 26 | import pyfftw 27 | 28 | BACKENDS.append(FFTW(nthreads=1)) 29 | HAVE_FFTW = True 30 | 31 | pyfftw.builders._utils._default_threads(4) 32 | 33 | BACKENDS.append(FFTW(nthreads=2)) 34 | HAVE_FFTW_MULTITHREAD = True 35 | 36 | 37 | def gauss_ft(k, a, b, n=2): 38 | return (np.abs(b) / (2 * np.pi) ** (1 - a)) ** (n / 2.0) * np.exp( 39 | -(b**2) * k**2 / (4 * np.pi) 40 | ) 41 | 42 | 43 | def gauss(x): 44 | return np.exp(-np.pi * x**2) 45 | 46 | 47 | @pytest.fixture(scope="module") 48 | def g2d(): 49 | N = 1000 50 | L = 10.0 51 | dx = L / N 52 | x = np.arange(-L / 2, L / 2, dx)[:N] 53 | xgrid = np.sqrt(np.add.outer(x**2, x**2)) 54 | fx = gauss(xgrid) 55 | return {"L": L, "fx": fx, "x": x} 56 | 57 | 58 | @pytest.fixture(scope="module") 59 | def g1d(): 60 | N = 1000 61 | L = 10.0 62 | dx = L / N 63 | x = np.arange(-L / 2, L / 2, dx)[:N] 64 | fx = np.exp(-np.pi * x**2) 65 | return {"L": L, "fx": fx, "x": x} 66 | 67 | 68 | @pytest.mark.parametrize("a,b", [(0, 2 * np.pi), (0, 1), (1, 1)]) 69 | @pytest.mark.parametrize("backend", BACKENDS) 70 | def test_roundtrip_fb(g2d, a, b, backend): 71 | Fx, freq = fft( 72 | g2d["fx"], L=g2d["L"], a=a, b=b, left_edge=-g2d["L"] / 2, backend=backend 73 | ) 74 | 75 | Lk = -2 * np.min(freq) 76 | fx, x = ifft(Fx, Lk=Lk, a=a, b=b, backend=backend) 77 | assert np.max(fx.real - g2d["fx"]) < 1e-10 # Test FT result 78 | assert np.max(x[0] - g2d["x"]) < 1e-10 # Test x-grid 79 | 80 | 81 | @pytest.mark.parametrize("a,b", [(0, 2 * np.pi), (0, 1), (1, 1)]) 82 | @pytest.mark.parametrize("backend", BACKENDS) 83 | def test_roundtrip_bf(g2d, a, b, backend): 84 | fx, freq = ifft(g2d["fx"], Lk=g2d["L"], a=a, b=b, backend=backend) 85 | 86 | L = -2 * np.min(freq) 87 | Fk, k = fft(fx, L=L, a=a, b=b, backend=backend) 88 | assert np.max(Fk.real - g2d["fx"]) < 1e-10 # Test FT result 89 | assert np.max(k[0] - g2d["x"]) < 1e-10 # Test x-grid 90 | 91 | 92 | @pytest.mark.parametrize("a,b", [(0, 2 * np.pi), (0, 1), (1, 1)]) 93 | @pytest.mark.parametrize("backend", BACKENDS) 94 | def test_forward_only(g1d, a, b, backend): 95 | Fx, freq = fft( 96 | g1d["fx"], L=g1d["L"], a=a, b=b, left_edge=-g1d["L"] / 2, backend=backend 97 | ) 98 | assert np.max(np.abs(Fx.real - gauss_ft(freq[0], a, b, n=1))) < 1e-10 99 | 100 | 101 | def analytic_mix(x, a, b, ainv, binv, n=2): 102 | return (binv / (b * (2 * np.pi) ** (ainv - a))) ** (n / 2.0) * gauss(binv * x / b) 103 | 104 | 105 | @pytest.mark.parametrize("a,b, ainv, binv", ABCOMBOS) 106 | @pytest.mark.parametrize("backend", BACKENDS) 107 | def test_mixed_1d_fb(g1d, a, b, ainv, binv, backend): 108 | Fk, freq = fft( 109 | g1d["fx"], L=g1d["L"], a=a, b=b, left_edge=-g1d["L"] / 2, backend=backend 110 | ) 111 | Lk = -2 * np.min(freq) 112 | fx, x = ifft(Fk, Lk=Lk, a=ainv, b=binv, backend=backend) 113 | assert np.max(np.abs(fx.real - analytic_mix(x[0], a, b, ainv, binv, n=1))) < 1e-10 114 | 115 | 116 | @pytest.mark.parametrize("a,b, ainv, binv", ABCOMBOS) 117 | @pytest.mark.parametrize("backend", BACKENDS) 118 | def test_mixed_1d_bf(g1d, a, b, ainv, binv, backend): 119 | Fk, freq = ifft(g1d["fx"], Lk=g1d["L"], a=ainv, b=binv, backend=backend) 120 | L = -2 * np.min(freq) 121 | fx, x = fft(Fk, L=L, a=a, b=b, left_edge=-L / 2, backend=backend) 122 | assert np.max(np.abs(fx.real - analytic_mix(x[0], a, binv, ainv, b, n=1))) < 1e-10 123 | 124 | 125 | @pytest.mark.parametrize("a,b, ainv, binv", ABCOMBOS) 126 | @pytest.mark.parametrize("backend", BACKENDS) 127 | def test_mixed_2d_fb(g2d, a, b, ainv, binv, backend): 128 | Fk, freq = fft( 129 | g2d["fx"], L=g2d["L"], a=a, b=b, left_edge=-g2d["L"] / 2, backend=backend 130 | ) 131 | Lk = -2 * np.min(freq) 132 | fx, x, xgrid = ifft(Fk, Lk=Lk, a=ainv, b=binv, ret_cubegrid=True, backend=backend) 133 | assert np.max(np.abs(fx.real - analytic_mix(xgrid, a, b, ainv, binv))) < 1e-10 134 | 135 | 136 | NTHREADS_TO_CHECK = (None, 1, False) 137 | 138 | if HAVE_FFTW_MULTITHREAD: 139 | NTHREADS_TO_CHECK += (2,) 140 | 141 | 142 | @pytest.mark.parametrize("a,b, ainv, binv", ABCOMBOS) 143 | @pytest.mark.parametrize("nthreads", NTHREADS_TO_CHECK) 144 | def test_mixed_2d_bf(g2d, a, b, ainv, binv, nthreads): 145 | Fk, freq = ifft(g2d["fx"], Lk=g2d["L"], a=ainv, b=binv, nthreads=nthreads) 146 | L = -2 * np.min(freq) 147 | fx, x, xgrid = fft( 148 | Fk, L=L, a=a, b=b, left_edge=-L / 2, ret_cubegrid=True, nthreads=nthreads 149 | ) 150 | assert np.max(np.abs(fx.real - analytic_mix(xgrid, a, binv, ainv, b))) < 1e-10 151 | 152 | 153 | @pytest.mark.parametrize("nthreads", NTHREADS_TO_CHECK) 154 | def test_fftshift(nthreads): 155 | x = np.linspace(0, 1, 11) 156 | 157 | y = fftshift(ifftshift(x, nthreads=nthreads), nthreads=nthreads) 158 | assert np.all(x == y) 159 | 160 | 161 | @pytest.mark.parametrize("nthreads", NTHREADS_TO_CHECK) 162 | @pytest.mark.parametrize("n", (10, 11)) 163 | def test_fftfreq(nthreads, n): 164 | freqs = fftfreq(n, nthreads=nthreads) 165 | assert np.all(np.diff(freqs)) > 0 166 | -------------------------------------------------------------------------------- /tests/test_lognormal.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from functools import partial 3 | 4 | from powerbox import LogNormalPowerBox, PowerBox, get_power 5 | 6 | get_power = partial(get_power, bins_upto_boxlen=True) 7 | 8 | 9 | def test_ln_vs_straight(): 10 | # Set up two boxes with exactly the same parameters 11 | pb = PowerBox(128, lambda u: 100.0 * u**-2.0, dim=3, seed=1234, boxlength=100.0) 12 | ln_pb = LogNormalPowerBox( 13 | 128, lambda u: 100.0 * u**-2.0, dim=3, seed=1234, boxlength=100.0 14 | ) 15 | 16 | pk = get_power(pb.delta_x(), pb.boxlength)[0] 17 | ln_pk = get_power(ln_pb.delta_x(), pb.boxlength)[0] 18 | 19 | pk = pk[1:-1] 20 | ln_pk = ln_pk[1:-1] 21 | print(np.mean(np.abs((pk - ln_pk) / pk)), np.abs((pk - ln_pk) / pk)) 22 | assert np.mean(np.abs((pk - ln_pk) / pk)) < 2e-1 # 10% agreement 23 | 24 | 25 | def test_ln_vs_straight_standard_freq(): 26 | # Set up two boxes with exactly the same parameters 27 | pb = PowerBox( 28 | 128, 29 | lambda u: 12.0 * u**-2.0, 30 | dim=3, 31 | seed=1234, 32 | boxlength=1200.0, 33 | a=0, 34 | b=2 * np.pi, 35 | ) 36 | ln_pb = LogNormalPowerBox( 37 | 128, 38 | lambda u: 12.0 * u**-2.0, 39 | dim=3, 40 | seed=1234, 41 | boxlength=1200.0, 42 | a=0, 43 | b=2 * np.pi, 44 | ) 45 | 46 | pk = get_power(pb.delta_x(), pb.boxlength, a=0, b=2 * np.pi)[0] 47 | ln_pk = get_power(ln_pb.delta_x(), pb.boxlength, a=0, b=2 * np.pi)[0] 48 | 49 | pk = pk[1:-1] 50 | ln_pk = ln_pk[1:-1] 51 | print(np.mean(np.abs((pk - ln_pk) / pk)), np.abs((pk - ln_pk) / pk)) 52 | assert np.mean(np.abs((pk - ln_pk) / pk)) < 2e-1 # 10% agreement 53 | -------------------------------------------------------------------------------- /tests/test_power.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import warnings 3 | from functools import partial 4 | 5 | from powerbox import PowerBox, get_power, ignore_zero_absk, ignore_zero_ki, power2delta 6 | 7 | get_power = partial(get_power, bins_upto_boxlen=True) 8 | 9 | 10 | def test_power1d(): 11 | p = [0] * 40 12 | for i in range(40): 13 | pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, a=0, b=1) 14 | 15 | p[i], k = get_power(pb.delta_x(), pb.boxlength, a=0, b=1) 16 | 17 | assert np.allclose( 18 | np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -2.0, rtol=2 19 | ) 20 | 21 | 22 | def test_power1d_n3(): 23 | p = [0] * 40 24 | for i in range(40): 25 | pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k**-3.0, boxlength=1.0, b=1) 26 | p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1) 27 | 28 | assert np.allclose( 29 | np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3.0, rtol=2 30 | ) 31 | 32 | 33 | def test_power1d_bigL(): 34 | p = [0] * 40 35 | for i in range(40): 36 | pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k**-3.0, boxlength=10.0, b=1) 37 | p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1) 38 | 39 | assert np.allclose( 40 | np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3.0, rtol=2 41 | ) 42 | 43 | 44 | def test_power1d_ordinary_freq(): 45 | p = [0] * 40 46 | for i in range(40): 47 | pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k**-3.0, boxlength=1.0) 48 | p[i], k = get_power(pb.delta_x(), pb.boxlength) 49 | 50 | assert np.allclose( 51 | np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3.0, rtol=2 52 | ) 53 | 54 | 55 | def test_power1d_halfN(): 56 | p = [0] * 40 57 | for i in range(40): 58 | pb = PowerBox(4001, dim=1, pk=lambda k: 1.0 * k**-3.0, boxlength=1.0, b=1) 59 | p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1) 60 | 61 | assert np.allclose( 62 | np.mean(np.array(p), axis=0)[1000:], 1.0 * k[1000:] ** -3.0, rtol=2 63 | ) 64 | 65 | 66 | def test_power2d(): 67 | p = [0] * 5 68 | for i in range(5): 69 | pb = PowerBox(200, dim=2, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 70 | p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1) 71 | 72 | assert np.allclose( 73 | np.mean(np.array(p), axis=0)[100:], 1.0 * k[100:] ** -2.0, rtol=2 74 | ) 75 | 76 | 77 | def test_power3d(): 78 | pb = PowerBox(50, dim=3, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 79 | p, k = get_power(pb.delta_x(), pb.boxlength, b=1) 80 | 81 | print(p / (1.0 * k**-2.0)) 82 | assert np.allclose(p, 1.0 * k**-2.0, rtol=2) 83 | 84 | 85 | def test_k_zero_ignore(): 86 | pb = PowerBox(50, dim=2, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 87 | 88 | dx = pb.delta_x() 89 | p1, k1 = get_power(dx, pb.boxlength, bin_ave=False) 90 | p0, k0 = get_power(dx, pb.boxlength, ignore_zero_mode=True, bin_ave=False) 91 | 92 | assert np.all(k1 == k0) 93 | 94 | assert np.all(p1[1:] == p0[1:]) 95 | 96 | assert p1[0] != p0[0] 97 | 98 | 99 | def test_k_weights(): 100 | pb = PowerBox(50, dim=2, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 101 | 102 | dx = pb.delta_x() 103 | 104 | k_weights = np.ones_like(dx) 105 | k_weights[:, 25] = 0 106 | 107 | p1, k1 = get_power(dx, pb.boxlength, bin_ave=False) 108 | p0, k0 = get_power(dx, pb.boxlength, bin_ave=False, k_weights=k_weights) 109 | 110 | assert np.all(k1 == k0) 111 | assert not np.allclose(p1, p0) 112 | 113 | 114 | def test_prefactor_fnc(): 115 | pb = PowerBox(50, dim=3, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 116 | pdelta, kdelta = get_power(pb.delta_x(), pb.boxlength, prefactor_fnc=power2delta) 117 | p, k = get_power(pb.delta_x(), pb.boxlength) 118 | 119 | assert np.all(k == kdelta) 120 | assert np.any(p != pdelta) 121 | 122 | 123 | def test_k_weights_fnc(): 124 | pb = PowerBox(50, dim=3, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 125 | with warnings.catch_warnings(): 126 | warnings.filterwarnings("ignore", message="invalid value encountered in divide") 127 | warnings.filterwarnings( 128 | "ignore", message="One or more radial bins had no cells within it" 129 | ) 130 | p_ki0, k_ki0 = get_power(pb.delta_x(), pb.boxlength, k_weights=ignore_zero_ki) 131 | p, k = get_power(pb.delta_x(), pb.boxlength, k_weights=ignore_zero_absk) 132 | 133 | assert not np.allclose(p, p_ki0) 134 | -------------------------------------------------------------------------------- /tests/test_stats.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import numpy as np 4 | from scipy.ndimage import gaussian_filter 5 | 6 | from powerbox import PowerBox 7 | 8 | 9 | @pytest.mark.skip( 10 | reason="this is not passing to desired tolerance at this point... not sure if this is a problem. It's not systematic." 11 | ) 12 | def test_resolution(): 13 | var = [0] * 6 14 | for i in range(6): 15 | pb = PowerBox( 16 | 64 * 2**i, 17 | dim=2, 18 | pk=lambda k: 1.0 * k**-2.0, 19 | boxlength=1.0, 20 | angular_freq=True, 21 | ) 22 | var[i] = np.var(gaussian_filter(pb.delta_x(), sigma=2**i, mode="wrap")) 23 | print(var / var[0]) 24 | assert np.allclose(var / var[0], 1, atol=1e-2) 25 | -------------------------------------------------------------------------------- /tests/test_tools.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import numpy as np 4 | import warnings 5 | from functools import partial 6 | 7 | from powerbox.powerbox import PowerBox 8 | from powerbox.tools import ( 9 | _getbins, 10 | _magnitude_grid, 11 | above_mu_min_angular_generator, 12 | angular_average, 13 | angular_average_nd, 14 | get_power, 15 | regular_angular_generator, 16 | ) 17 | 18 | get_power = partial(get_power, bins_upto_boxlen=True) 19 | angular_average = partial(angular_average, bins_upto_boxlen=True) 20 | angular_average_nd = partial(angular_average_nd, bins_upto_boxlen=True) 21 | 22 | 23 | def test_warn_interp_weights(): 24 | x = np.linspace(-3, 3, 400) 25 | X, Y = np.meshgrid(x, x) 26 | r2 = X**2 + Y**2 27 | P = r2**-1.0 28 | P = np.repeat(P, 100).reshape(400, 400, 100) 29 | freq = [x, x, np.linspace(-2, 2, 100)] 30 | weights = np.random.rand(np.prod(P.shape)).reshape(P.shape) 31 | with pytest.warns(RuntimeWarning): 32 | angular_average( 33 | P, 34 | freq, 35 | bins=10, 36 | interpolation_method="linear", 37 | weights=weights, 38 | interp_points_generator=regular_angular_generator(), 39 | ) 40 | 41 | 42 | def test_bins_upto_boxlen_warning(): 43 | with pytest.warns( 44 | FutureWarning, 45 | match="In the future, bins will be generated by default up to the smallest", 46 | ): 47 | _getbins(bins=10, coord_mags=np.ones((10, 10)), log=False) 48 | 49 | 50 | @pytest.mark.parametrize("xmax", [1, 10, np.pi]) 51 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 52 | def test_bins_upto_boxlen(xmax, ndim): 53 | x = np.linspace(-xmax, xmax, 21) 54 | mag = _magnitude_grid([x] * ndim) 55 | 56 | bins = _getbins(bins=12, coord_mags=mag, log=False, bins_upto_boxlen=True) 57 | assert bins.max() == xmax 58 | 59 | 60 | @pytest.mark.parametrize("xmax", [1, 10, np.pi]) 61 | @pytest.mark.parametrize("ndim", [1, 2, 3]) 62 | def test_bins_upto_maxmag(xmax, ndim): 63 | x = np.linspace(-xmax, xmax, 21) 64 | mag = _magnitude_grid([x] * ndim) 65 | 66 | bins = _getbins(bins=12, coord_mags=mag, log=False, bins_upto_boxlen=False) 67 | assert np.isclose(bins.max(), xmax * np.sqrt(ndim)) 68 | 69 | 70 | @pytest.mark.parametrize("interpolation_method", [None, "linear"]) 71 | def test_angular_avg_nd_3(interpolation_method): 72 | x = np.linspace(-3, 3, 400) 73 | X, Y = np.meshgrid(x, x) 74 | r2 = X**2 + Y**2 75 | P = r2**-1.0 76 | P = np.repeat(P, 100).reshape(400, 400, 100) 77 | freq = [x, x, np.linspace(-2, 2, 100)] 78 | p_k, k_av_bins, sw = angular_average_nd( 79 | P, 80 | freq, 81 | bins=50, 82 | n=2, 83 | interpolation_method=interpolation_method, 84 | return_sumweights=True, 85 | bins_upto_boxlen=True, 86 | ) 87 | if interpolation_method == "linear": 88 | assert np.max(np.abs((p_k[:, 0] - k_av_bins**-2.0) / k_av_bins**-2.0)) < 0.05 89 | else: 90 | # Without interpolation, the radially-averaged power is not very accurate 91 | # due to the low number of bins at small values of k_av_bins, so we start 92 | # the comparison at the 6th bin. 93 | assert ( 94 | np.max(np.abs((p_k[6:, 0] - k_av_bins[6:] ** -2.0) / k_av_bins[6:] ** -2.0)) 95 | < 0.05 96 | ) 97 | 98 | 99 | def test_weights_shape(): 100 | x = np.linspace(-3, 3, 40) 101 | P = np.ones(3 * [40]) 102 | weights = np.ones(3 * [20]) 103 | freq = [x for _ in range(3)] 104 | 105 | with pytest.raises(ValueError): 106 | p_k_lin, k_av_bins_lin = angular_average( 107 | P, freq, bins=10, weights=weights, bins_upto_boxlen=True 108 | ) 109 | 110 | 111 | @pytest.mark.parametrize("n", range(1, 5)) 112 | def test_interp_w_weights(n): 113 | x = np.linspace(-3, 3, 40) 114 | P = np.ones(n * [40]) 115 | weights = np.ones_like(P) 116 | if n == 1: 117 | P[2:5] = 0 118 | weights[2:5] = 0 119 | elif n == 2: 120 | P[2:5, 2:5] = 0 121 | weights[2:5, 2:5] = 0 122 | elif n == 3: 123 | P[:4, 3:6, 7:10] = 0 124 | weights[:4, :, :] = 0 125 | weights[:, 3:6, :] = 0 126 | weights[:, :, 7:10] = 0 127 | else: 128 | P[:4, 3:6, 7:10, 1:2] = 0 129 | weights[:4, :, :, :] = 0 130 | weights[:, 3:6, :, :] = 0 131 | weights[:, :, 7:10, :] = 0 132 | weights[:, :, :, 1:2] = 0 133 | 134 | # Test 4D avg works 135 | freq = [x for _ in range(n)] 136 | p_k_lin, k_av_bins_lin = angular_average( 137 | P, 138 | freq, 139 | bins=10, 140 | interpolation_method="linear", 141 | weights=weights, 142 | interp_points_generator=regular_angular_generator(), 143 | log_bins=True, 144 | bins_upto_boxlen=True, 145 | ) 146 | 147 | assert np.all(p_k_lin == 1.0) 148 | 149 | 150 | @pytest.mark.parametrize("n", range(1, 3)) 151 | def test_zero_ki(n): 152 | x = np.arange(-100, 100, 1) 153 | from powerbox.tools import ignore_zero_ki 154 | 155 | # needed only for shape 156 | freq = n * [x] 157 | coords = np.array(np.meshgrid(*freq)) 158 | kmag = np.sqrt(np.sum(coords**2, axis=0)) 159 | weights = ignore_zero_ki(freq, kmag) 160 | L = x[-1] - x[0] + 1 161 | masked_points = np.sum(weights == 0) 162 | if n == 1: 163 | assert masked_points == 1 164 | elif n == 2: 165 | assert masked_points == n * L - 1 166 | elif n == 3: 167 | assert masked_points == n * L**2 - n * L + 1 168 | else: 169 | assert masked_points == n * L**3 - n * L**2 + n * L - 1 170 | 171 | 172 | @pytest.mark.parametrize("n", range(2, 3)) 173 | def test_interp_w_mu(n): 174 | x = np.linspace(0.0, 3, 40) 175 | if n == 2: 176 | kpar_mesh, kperp_mesh = np.meshgrid(x, x) 177 | with warnings.catch_warnings(): 178 | warnings.filterwarnings( 179 | "ignore", message="divide by zero encountered in divide" 180 | ) 181 | theta = np.arctan2(kperp_mesh, kpar_mesh) 182 | mu_mesh = np.cos(theta) 183 | else: 184 | kx_mesh, ky_mesh, kz_mesh = np.meshgrid(x, x, x, indexing="ij") 185 | theta = np.arccos(kz_mesh / np.sqrt(kx_mesh**2 + ky_mesh**2 + kz_mesh**2)) 186 | mu_mesh = np.cos(theta) 187 | 188 | # Need a little cushion so we test against data at mu = 0.95 189 | # If we test for mu that is higher (default is mu >= 0.97) 190 | # and put the data also only at mu >= 0.97, then the interped average will 191 | # not be 1. at low radii so the test fails. 192 | mask = mu_mesh >= 0.95 193 | P = np.zeros(mask.shape) 194 | P[mask] = 1.0 195 | 196 | p_k_lin, k_av_bins_lin = angular_average( 197 | P, 198 | n * [x], 199 | bins=10, 200 | interpolation_method="linear", 201 | weights=1.0, 202 | interp_points_generator=above_mu_min_angular_generator(), 203 | bins_upto_boxlen=True, 204 | ) 205 | # Start from the 4th bin due to the average being a bit < 1 at low radii 206 | assert np.all(p_k_lin[3:] == 1.0) 207 | 208 | 209 | def test_error_coords_and_mask(): 210 | x = np.linspace(1.0, 3, 40) 211 | kpar_mesh, kperp_mesh = np.meshgrid(x, x) 212 | theta = np.arctan2(kperp_mesh, kpar_mesh) 213 | mu_mesh = np.cos(theta) 214 | 215 | mask = mu_mesh >= 0.97 216 | P = np.zeros(mask.shape) 217 | P[mask] = 1.0 218 | with pytest.raises(ValueError): 219 | p_k_lin, k_av_bins_lin = angular_average( 220 | P, 221 | [x, x], 222 | bins=10, 223 | interpolation_method="linear", 224 | weights=1.0, 225 | interp_points_generator=above_mu_min_angular_generator(mu=0.97), 226 | bins_upto_boxlen=True, 227 | ) 228 | 229 | 230 | def test_interp_method(): 231 | x = np.linspace(-3, 3, 40) 232 | P = np.ones((40, 40, 40)) 233 | freq = [x, x, x] 234 | with pytest.raises(ValueError): 235 | angular_average_nd( 236 | P, 237 | freq, 238 | bins=20, 239 | get_variance=True, 240 | interpolation_method="abc", 241 | bins_upto_boxlen=True, 242 | ) 243 | 244 | with pytest.raises(ValueError): 245 | angular_average( 246 | P, 247 | freq, 248 | bins=20, 249 | get_variance=True, 250 | interpolation_method="abc", 251 | bins_upto_boxlen=True, 252 | ) 253 | 254 | 255 | def test_error_w_kmag_coords(): 256 | x = np.linspace(-3, 3, 40) 257 | P = np.ones((40, 40, 40)) 258 | X, Y = np.meshgrid(x, x) 259 | 260 | with pytest.raises(ValueError): 261 | angular_average_nd(P, X**2 + Y**2, bins=20, interpolation_method="linear") 262 | 263 | x = np.linspace(-3, 3, 40) 264 | P = np.ones((40, 40, 40)) 265 | X, Y = np.meshgrid(x, x) 266 | 267 | with pytest.raises(ValueError): 268 | angular_average(P, X**2 + Y**2, bins=20, interpolation_method="linear") 269 | 270 | 271 | def test_kmag_coords_nointerp(): 272 | x = np.linspace(-3, 3, 40) 273 | P = np.ones((40, 40, 40)) 274 | X, Y = np.meshgrid(x, x) 275 | with pytest.raises(ValueError): 276 | angular_average_nd(P, np.sqrt(X**2 + Y**2), bins=20, interpolation_method=None) 277 | with pytest.raises(ValueError): 278 | angular_average(P, np.sqrt(X**2 + Y**2), bins=20, interpolation_method=None) 279 | 280 | 281 | @pytest.mark.parametrize("n", range(1, 3)) 282 | def test_angular_avg_nd(n): 283 | x = np.linspace(-3, 3, 40) 284 | X, Y, Z = np.meshgrid(x, x, x) 285 | r2 = X**2 + Y**2 + Z**2 286 | P = r2**-1.0 287 | 288 | # Test 4D avg works 289 | P = np.repeat(P, 10).reshape(40, 40, 40, 10) 290 | freq = [x, x, x, np.linspace(-2, 2, 10)] 291 | 292 | p_k_lin, k_av_bins_lin = angular_average_nd( 293 | P, freq, bins=10, n=n, interpolation_method="linear", bins_upto_boxlen=True 294 | ) 295 | 296 | if n == 1: 297 | # Without interpolation, the radially-averaged power is not very accurate 298 | # due to the low number of bins at small values of k_av_bins, so we start 299 | # the comparison at the 6th bin. 300 | assert ( 301 | np.max( 302 | np.abs( 303 | ( 304 | p_k_lin[6:, len(x) // 2, len(x) // 2, 0] 305 | - k_av_bins_lin[6:] ** -2.0 306 | ) 307 | / k_av_bins_lin[6:] ** -2.0 308 | ) 309 | ) 310 | < 0.05 311 | ) 312 | elif n == 2: 313 | assert ( 314 | np.max( 315 | np.abs( 316 | (p_k_lin[:, len(x) // 2, 0] - k_av_bins_lin**-2.0) 317 | / k_av_bins_lin**-2.0 318 | ) 319 | ) 320 | < 0.05 321 | ) 322 | else: 323 | assert ( 324 | np.max(np.abs((p_k_lin[:, 0] - k_av_bins_lin**-2.0) / k_av_bins_lin**-2.0)) 325 | < 0.05 326 | ) 327 | 328 | 329 | def test_angular_avg_nd_complex_interp(): 330 | x = np.linspace(-3, 3, 400) 331 | X, Y = np.meshgrid(x, x) 332 | r2 = X**2 + Y**2 333 | P = r2**-1.0 + 1j * r2**-1.0 334 | P = np.repeat(P, 100).reshape(400, 400, 100) 335 | freq = [x, x, np.linspace(-2, 2, 100)] 336 | p_k_lin, k_av_bins_lin = angular_average_nd( 337 | P, freq, bins=50, n=2, interpolation_method="linear", bins_upto_boxlen=True 338 | ) 339 | real = np.real(p_k_lin) 340 | imag = np.imag(p_k_lin) 341 | assert ( 342 | np.max(np.abs((real[:, 0] - k_av_bins_lin**-2.0) / k_av_bins_lin**-2.0)) < 0.05 343 | ) 344 | 345 | assert np.isclose(real, imag).all() 346 | 347 | 348 | @pytest.mark.parametrize("interpolation_method", [None, "linear"]) 349 | def test_angular_avg_nd_4_2(interpolation_method): 350 | x = np.linspace(-3, 3, 200) 351 | X, Y = np.meshgrid(x, x) 352 | r2 = X**2 + Y**2 353 | P = r2**-1.0 354 | P = np.repeat(P, 10).reshape(200, 200, 10) 355 | P = np.repeat(P, 10).reshape(200, 200, 10, 10) 356 | 357 | freq = [x, x, np.linspace(-2, 2, 10), np.linspace(-2, 2, 10)] 358 | p_k, k_av_bins = angular_average_nd(P, freq, bins=50, n=2) 359 | p_k_lin, k_av_bins_lin = angular_average_nd( 360 | P, 361 | freq, 362 | bins=50, 363 | n=2, 364 | interpolation_method=interpolation_method, 365 | bins_upto_boxlen=True, 366 | ) 367 | # The radially-averaged power is not very accurate 368 | # due to the low number of bins at small values of k_av_bins, so we start 369 | # the comparison at the 6th bin. 370 | assert ( 371 | np.max( 372 | np.abs( 373 | (p_k_lin[6:, 0, 0] - k_av_bins_lin[6:] ** -2.0) 374 | / k_av_bins_lin[6:] ** -2.0 375 | ) 376 | ) 377 | < 0.06 378 | ) 379 | 380 | 381 | def test_var_not_impl(): 382 | x = np.linspace(-3, 3, 200) 383 | P = np.ones((200, 10)) 384 | coords = [x, np.linspace(-2, 2, 10)] 385 | with pytest.raises(NotImplementedError): 386 | ave, coord, var = angular_average( 387 | P, coords, bins=20, get_variance=True, interpolation_method="linear" 388 | ) 389 | with pytest.raises(NotImplementedError): 390 | ave, coord, var = angular_average_nd( 391 | P, coords, bins=20, get_variance=True, interpolation_method="linear" 392 | ) 393 | 394 | 395 | def test_angular_avg_nd_2_1_varnull(): 396 | x = np.linspace(-3, 3, 200) 397 | 398 | P = np.ones((200, 10)) 399 | 400 | coords = [x, np.linspace(-2, 2, 10)] 401 | p_k, k_av_bins, var, sw = angular_average_nd( 402 | P, 403 | coords, 404 | bins=20, 405 | n=1, 406 | get_variance=True, 407 | return_sumweights=True, 408 | bins_upto_boxlen=True, 409 | ) 410 | 411 | assert np.all(var == 0) 412 | 413 | 414 | def test_null_variance_2d(): 415 | x = np.linspace(-3, 3, 400) 416 | X, Y = np.meshgrid(x, x) 417 | r2 = X**2 + Y**2 418 | P = np.ones_like(r2) 419 | ave, coord, var = angular_average( 420 | P, 421 | np.sqrt(r2), 422 | bins=np.linspace(0, x.max(), 20), 423 | get_variance=True, 424 | bins_upto_boxlen=True, 425 | ) 426 | assert np.all(var == 0) 427 | 428 | 429 | def test_variance_2d(): 430 | x = np.linspace(-3, 3, 400) 431 | X, Y = np.meshgrid(x, x) 432 | r2 = X**2 + Y**2 433 | P = np.ones_like(r2) 434 | P += np.random.normal(scale=1, size=(len(x), len(x))) 435 | ave, coord, var = angular_average( 436 | P, 437 | np.sqrt(r2), 438 | bins=np.linspace(0, x.max(), 20), 439 | get_variance=True, 440 | bins_upto_boxlen=True, 441 | ) 442 | assert np.all(np.diff(var) <= 0) 443 | 444 | 445 | def test_complex_variance(): 446 | x = np.linspace(-3, 3, 400) 447 | X, Y = np.meshgrid(x, x) 448 | r2 = X**2 + Y**2 449 | P = np.ones_like(r2) + np.ones_like(r2) * 1j 450 | with pytest.raises(NotImplementedError): 451 | ave, coord, var = angular_average( 452 | P, 453 | np.sqrt(r2), 454 | bins=np.linspace(0, x.max(), 20), 455 | get_variance=True, 456 | bins_upto_boxlen=True, 457 | ) 458 | 459 | 460 | def test_bin_edges(): 461 | x = np.linspace(-3, 3, 200) 462 | X, Y = np.meshgrid(x, x) 463 | r2 = X**2 + Y**2 464 | P = r2**-1.0 465 | bins = np.linspace(0, x.max(), 20) 466 | ave, coord = angular_average( 467 | P, np.sqrt(r2), bins=bins, bin_ave=False, bins_upto_boxlen=True 468 | ) 469 | assert np.all(coord == bins) 470 | 471 | 472 | def test_sum(): 473 | x = np.linspace(-3, 3, 200) 474 | X, Y = np.meshgrid(x, x) 475 | r2 = X**2 + Y**2 476 | P = r2**-1.0 477 | ave, coord = angular_average( 478 | P, np.sqrt(r2), bins=20, bin_ave=False, average=False, bins_upto_boxlen=True 479 | ) 480 | assert np.sum(P[r2 < 9.0]) == np.sum(ave) 481 | 482 | ave, coord = angular_average( 483 | P, np.sqrt(r2), bins=20, bin_ave=True, average=False, bins_upto_boxlen=True 484 | ) 485 | assert np.sum(P[r2 < 9.0]) == np.sum(ave) 486 | 487 | 488 | def test_var_trivial_weights(): 489 | x = np.linspace(-3, 3, 400) 490 | X, Y = np.meshgrid(x, x) 491 | r2 = X**2 + Y**2 492 | P = np.ones_like(r2) 493 | P += np.random.normal(scale=1, size=(len(x), len(x))) 494 | ave, coord, var = angular_average( 495 | P, 496 | np.sqrt(r2), 497 | bins=np.linspace(0, x.max(), 20), 498 | get_variance=True, 499 | weights=np.ones_like(r2), 500 | bins_upto_boxlen=True, 501 | ) 502 | print(np.diff(var)) 503 | assert np.all(np.diff(var) <= 1e-6) 504 | 505 | 506 | def test_logbins(): 507 | x = np.linspace(-3, 3, 400) 508 | X, Y = np.meshgrid(x, x) 509 | r2 = X**2 + Y**2 510 | P = np.ones_like(r2) 511 | ave, coord = angular_average( 512 | P, np.sqrt(r2), bins=10, bin_ave=False, log_bins=True, bins_upto_boxlen=True 513 | ) 514 | 515 | assert np.all(np.isclose(np.diff(coord[1:] / coord[:-1]), 0)) 516 | 517 | 518 | def test_cross_power_identity(): 519 | pb = PowerBox(200, dim=2, pk=lambda k: 1.0 * k**-2.0, boxlength=1.0, b=1) 520 | dx = pb.delta_x() 521 | p, k = get_power(dx, pb.boxlength, b=1) 522 | p_cross, k = get_power(dx, pb.boxlength, b=1, deltax2=dx) 523 | assert np.all(np.isclose(p, p_cross)) 524 | p, k = get_power(dx, [1, 1], b=1) 525 | p_cross, k = get_power(dx, [1, 1], b=1, deltax2=dx) 526 | assert np.all(np.isclose(p, p_cross)) 527 | 528 | 529 | @pytest.mark.skip() 530 | def test_against_multirealisation(): 531 | x = np.linspace(-3, 3, 1000) 532 | X, Y = np.meshgrid(x, x) 533 | r2 = X**2 + Y**2 534 | bins = np.linspace(0, x.max(), 20) 535 | 536 | # Get the variance from several realisations 537 | ave = [0] * 50 538 | for j in range(50): 539 | P = np.ones_like(r2) + np.random.normal(scale=1, size=(len(x), len(x))) 540 | ave[j], coord = angular_average(P, np.sqrt(r2), bins=bins) 541 | 542 | var = np.var(np.array(ave), axis=0) 543 | 544 | # Get the variance from a single realisation 545 | ave, coord, var2 = angular_average(P, np.sqrt(r2), bins=bins, get_variance=True) 546 | 547 | print(var) 548 | print(var2) 549 | assert np.all(np.isclose(var, var2, 1e-2)) 550 | --------------------------------------------------------------------------------