├── .git-blame-ignore-revs ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── custom-issue-template.md │ └── feature_request.md ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── release.yml │ └── unit_tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── .yamllint.yml ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── changelog.md ├── docs ├── Makefile ├── make.bat └── source │ ├── _static │ ├── css │ │ └── custom.css │ ├── logo.gif │ └── versions.json │ ├── _templates │ ├── custom-class-template.rst │ └── custom-module-template.rst │ ├── api.rst │ ├── cfc.rst │ ├── conf.py │ ├── development.rst │ ├── examples.rst │ ├── general.rst │ ├── index.rst │ ├── installation.rst │ ├── motivation.rst │ ├── refs.bib │ ├── tde.rst │ ├── utils.rst │ └── waveshape.rst ├── environment.yml ├── examples ├── README.rst ├── compute_aac.py ├── compute_pac_control_harmonics.py ├── compute_ppc.py ├── plot_compute_general.py ├── plot_compute_pac.py ├── plot_compute_tde.py ├── plot_compute_tde_fbands.py ├── plot_compute_waveshape.py └── plot_compute_waveshape_noisy_data.py ├── pyproject.toml ├── src └── pybispectra │ ├── __init__.py │ ├── cfc │ ├── __init__.py │ ├── aac.py │ ├── pac.py │ └── ppc.py │ ├── data │ ├── __init__.py │ ├── example_data.py │ └── example_data │ │ ├── sim_data_aac.npy │ │ ├── sim_data_pac_bivariate.npy │ │ ├── sim_data_pac_univariate.npy │ │ ├── sim_data_ppc.npy │ │ ├── sim_data_tde_correlated_noise.npy │ │ ├── sim_data_tde_fbands.npy │ │ ├── sim_data_tde_independent_noise.npy │ │ ├── sim_data_waveshape_noisy.npy │ │ ├── sim_data_waveshape_peaks_troughs.npy │ │ └── sim_data_waveshape_sawtooths.npy │ ├── general │ ├── __init__.py │ └── general.py │ ├── tde │ ├── __init__.py │ └── tde.py │ ├── utils │ ├── __init__.py │ ├── _defaults.py │ ├── _docs.py │ ├── _plot.py │ ├── _process.py │ ├── _utils.py │ ├── ged.py │ ├── results.py │ └── utils.py │ └── waveshape │ ├── __init__.py │ └── waveshape.py └── tests ├── test_cfc.py ├── test_ged.py ├── test_general.py ├── test_plotting.py ├── test_results.py ├── test_tde.py ├── test_util_funcs.py └── test_waveshape.py /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | cb8dc22fa55a5aa28d33efd5eeb413400c8371e3 # linting and line length -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Tell us about broken, incorrect, or inconsistent behavior 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | #### Describe the bug 11 | 〈Replace this text with a clear and concise description of the bug.〉 12 | 13 | 14 | #### Steps to reproduce 15 | 〈Replace this text with a code snippet or minimal working example [MWE] to 16 | replicate your problem. This example should involve some simulated data (e.g., 17 | one of those shipped with PyBispectra `pybispectra.data.example_data.DATASETS` 18 | that can be loaded with `pybispectra.get_example_data_paths()`), or one of the 19 | [datasets shipped with MNE-Python] (e.g., the [sample] dataset). If you cannot 20 | replicate the error on a built-in dataset, provide a link to a small, 21 | anonymised portion of your data that yields the error.〉 22 | 23 | [MWE]: https://en.wikipedia.org/wiki/Minimal_Working_Example 24 | [datasets shipped with MNE-Python]: https://mne.tools/stable/documentation/datasets.html 25 | [sample]: https://mne.tools/stable/documentation/datasets.html#sample 26 | 27 | 28 | #### Expected results 29 | 〈Replace this text with a clear and concise description of what you expected to 30 | happen.〉 31 | 32 | 33 | #### Actual results 34 | 〈Replace this text with the actual output, traceback, screenshot, or other 35 | description of the results.〉 36 | 37 | 38 | #### Additional information 39 | 〈Replace this text with information about your system. This should include 40 | information about the PyBispectra version (`pybispectra.__version__`) as well 41 | as other information that can be found with MNE by running `import mne; 42 | mne.sys_info()`.〉 43 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom-issue-template.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Create an issue without a template 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[ENH]" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | #### Describe what is missing 11 | 〈Replace this text with a clear and concise description of the missing 12 | feature.〉 13 | 14 | 15 | #### Describe your solution 16 | 〈Replace this text with a clear and concise description of how the missing 17 | feature could be implemented.〉 18 | 19 | #### Describe possible alternatives 20 | 〈Replace this text with a clear and concise description of possible alternative 21 | features or implementations that could help to address the problem.〉 22 | 23 | 24 | #### Additional information 25 | 〈Replace this text with additional information that is relevant to the feature 26 | and proposed implementations.〉 27 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Thanks for contributing to PyBispectra! Before submitting your pull request, 2 | please read the [contribution guide](https://pybispectra.readthedocs.io/en/main/development.html#contributing-to-pybispectra). 3 | 4 | #### Reference issue 5 | Addresses #ISSUE_NUMBER. 6 | 7 | 8 | #### What does this implement/fix? 9 | 〈Replace this text with a clear and concise description of this pull request's 10 | content.〉 11 | 12 | 13 | #### Additional information 14 | 〈Replace this text with additional information that is relevant to the pull 15 | request.〉 16 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # Upload a Python Package using Twine when a release is created 2 | 3 | name: Build 4 | on: 5 | release: 6 | types: [published] 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | branches: 12 | - main 13 | 14 | permissions: 15 | contents: read 16 | 17 | jobs: 18 | package: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Python 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: '3.10' 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install build twine 30 | - name: Build package 31 | run: python -m build --sdist --wheel 32 | - name: Check package 33 | run: twine check --strict dist/* 34 | - name: Check env vars 35 | run: | 36 | echo "Triggered by: ${{ github.event_name }}" 37 | - uses: actions/upload-artifact@v4 38 | with: 39 | name: dist 40 | path: dist 41 | 42 | # PyPI on release 43 | pypi: 44 | needs: package 45 | environment: release 46 | runs-on: ubuntu-latest 47 | if: github.event_name == 'release' 48 | permissions: 49 | id-token: write 50 | steps: 51 | - uses: actions/download-artifact@v4 52 | with: 53 | name: dist 54 | path: dist 55 | - name: Publish to PyPI 56 | uses: pypa/gh-action-pypi-publish@release/v1 57 | -------------------------------------------------------------------------------- /.github/workflows/unit_tests.yml: -------------------------------------------------------------------------------- 1 | name: "unit_tests" 2 | concurrency: 3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.type }} 4 | cancel-in-progress: true 5 | on: 6 | push: 7 | branches: 8 | - "main" 9 | pull_request: 10 | branches: 11 | - "*" 12 | 13 | jobs: 14 | # Run unit tests 15 | test_pip: 16 | runs-on: ${{ matrix.os }} 17 | defaults: 18 | run: 19 | shell: bash -e {0} 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | include: 24 | - os: ubuntu-latest 25 | python-version: "3.10" 26 | - os: ubuntu-latest 27 | python-version: "3.12" 28 | - os: macos-13 # Intel 29 | python-version: "3.12" 30 | - os: macos-14 # arm64 31 | python-version: "3.12" 32 | - os: windows-latest 33 | python-version: "3.12" 34 | env: 35 | TZ: Europe/Berlin 36 | FORCE_COLOR: true 37 | DISPLAY: ":99.0" 38 | OPENBLAS_NUM_THREADS: "1" 39 | PYTHONUNBUFFERED: "1" 40 | steps: 41 | - uses: actions/setup-python@v5 42 | with: 43 | python-version: ${{ matrix.python-version }} 44 | - uses: actions/checkout@v4 45 | - uses: pyvista/setup-headless-display-action@main 46 | with: 47 | qt: true 48 | pyvista: false 49 | - name: Install dependencies 50 | run: | 51 | python -m pip install --upgrade pip setuptools wheel 52 | pip install --upgrade --upgrade-strategy eager .[test] 53 | - name: Display versions and environment information 54 | run: | 55 | echo $TZ 56 | date 57 | python --version 58 | which python 59 | - run: pip install -e . 60 | - run: python -c "import pybispectra; import mne; print(f'PyBispectra {pybispectra.__version__}\n'); mne.sys_info()" 61 | - name: Run pytest 62 | run: python -m coverage run && coverage report 63 | 64 | test_conda: 65 | timeout-minutes: 90 66 | runs-on: ubuntu-latest 67 | defaults: 68 | run: 69 | shell: bash -el {0} 70 | env: 71 | MKL_NUM_THREADS: '1' 72 | PYTHONUNBUFFERED: '1' 73 | PYTHON_VERSION: '3.12' 74 | steps: 75 | - uses: actions/checkout@v4 76 | - uses: pyvista/setup-headless-display-action@main 77 | with: 78 | qt: true 79 | pyvista: false 80 | - uses: mamba-org/setup-micromamba@v2 81 | with: 82 | environment-file: environment.yml 83 | create-args: >- # beware the >- instead of |, we don't split on newlines but on spaces 84 | python=${{ env.PYTHON_VERSION }} 85 | - name: Install package and test dependencies 86 | run: | 87 | pip install -e . 88 | pip install .[test] 89 | - name: Display versions and environment information 90 | run: python -c "import pybispectra; import mne; print(f'PyBispectra {pybispectra.__version__}\n'); mne.sys_info()" 91 | - name: Run pytest 92 | run: python -m coverage run && coverage report 93 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | docs/source/auto_examples 74 | docs/source/generated 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # poetry 100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 104 | #poetry.lock 105 | 106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 107 | __pypackages__/ 108 | 109 | # Celery stuff 110 | celerybeat-schedule 111 | celerybeat.pid 112 | 113 | # SageMath parsed files 114 | *.sage.py 115 | 116 | # Environments 117 | .env 118 | .venv 119 | env/ 120 | venv/ 121 | ENV/ 122 | env.bak/ 123 | venv.bak/ 124 | 125 | # Spyder project settings 126 | .spyderproject 127 | .spyproject 128 | 129 | # Rope project settings 130 | .ropeproject 131 | 132 | # mkdocs documentation 133 | /site 134 | 135 | # mypy 136 | .mypy_cache/ 137 | .dmypy.json 138 | dmypy.json 139 | 140 | # Pyre type checker 141 | .pyre/ 142 | 143 | # pytype static type analyzer 144 | .pytype/ 145 | 146 | # Cython debug symbols 147 | cython_debug/ 148 | 149 | # PyCharm 150 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can 151 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 152 | # and can be added to the global gitignore or merged into this file. For a more nuclear 153 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 154 | #.idea/ 155 | 156 | # VS Code 157 | .vscode -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | # ruff PyBispectra 3 | - repo: https://github.com/astral-sh/ruff-pre-commit 4 | rev: v0.11.13 5 | hooks: 6 | - id: ruff 7 | name: ruff lint pybispectra 8 | args: ["--fix"] 9 | files: ^src/pybispectra/ 10 | 11 | # ruff examples 12 | - repo: https://github.com/astral-sh/ruff-pre-commit 13 | rev: v0.11.13 14 | hooks: 15 | - id: ruff 16 | name: ruff lint examples 17 | args: ["--fix"] 18 | files: ^examples/ 19 | 20 | # codespell 21 | - repo: https://github.com/codespell-project/codespell 22 | rev: v2.4.1 23 | hooks: 24 | - id: codespell 25 | additional_dependencies: 26 | - tomli 27 | files: ^pybispectra/|^docs/|^examples/ 28 | types_or: [python, rst, inc] 29 | 30 | # yamllint 31 | - repo: https://github.com/adrienverge/yamllint.git 32 | rev: v1.37.1 33 | hooks: 34 | - id: yamllint 35 | args: [--strict, -c, .yamllint.yml] 36 | 37 | # rstcheck 38 | - repo: https://github.com/rstcheck/rstcheck.git 39 | rev: v6.2.5 40 | hooks: 41 | - id: rstcheck 42 | additional_dependencies: 43 | - tomli 44 | files: ^docs/.*\.(rst|inc)$ 45 | 46 | # toml-sort 47 | - repo: https://github.com/pappasam/toml-sort.git 48 | rev: v0.24.2 49 | hooks: 50 | - id: toml-sort-fix 51 | 52 | ci: 53 | autofix_prs: true 54 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | 14 | # Build documentation in the docs/ directory with Sphinx 15 | sphinx: 16 | configuration: docs/source/conf.py 17 | builder: html 18 | fail_on_warning: false 19 | 20 | # Optionally declare the Python requirements required to build your docs 21 | python: 22 | install: 23 | - method: pip 24 | path: . 25 | extra_requirements: 26 | - doc 27 | -------------------------------------------------------------------------------- /.yamllint.yml: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | line-length: disable 5 | document-start: disable 6 | truthy: disable 7 | new-lines: 8 | type: platform 9 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - family-names: "Binns" 5 | given-names: "Thomas Samuel" 6 | orcid: "https://orcid.org/0000-0003-0657-0891" 7 | - family-names: "Pellegrini" 8 | given-names: "Franziska" 9 | orcid: "https://orcid.org/0000-0001-9769-1597" 10 | - family-names: "Jurhar" 11 | given-names: "Tin" 12 | orcid: "https://orcid.org/0000-0002-8804-2349" 13 | - family-names: "Haufe" 14 | given-names: "Stefan" 15 | orcid: "https://orcid.org/0000-0003-1470-9195" 16 | title: "PyBispectra" 17 | doi: 10.5281/zenodo.8377820 18 | date-released: 2023-09-23 19 | url: "https://github.com/neuromodulation/PyBispectra/tree/main" 20 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct - PyBispectra 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behaviour that contributes to a positive environment for our community include: 10 | 11 | * Demonstrating empathy and kindness toward other people 12 | * Being respectful of differing opinions, viewpoints, and experiences 13 | * Giving and gracefully accepting constructive feedback 14 | * Accepting responsibility and apologising to those affected by our mistakes, and learning from the experience 15 | * Focusing on what is best not just for us as individuals, but for the overall community 16 | 17 | Examples of unacceptable behaviour include: 18 | 19 | * The use of sexualised language or imagery, and sexual attention or advances 20 | * Trolling, insulting or derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or email address, without their explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying and enforcing our standards of acceptable behaviour and will take appropriate and fair corrective action in response to any instances of unacceptable behaviour. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviours that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behaviour may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. 38 | 39 | All community leaders are obligated to respect the privacy and security of the reporter of any incident. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org/), version [1.4](https://www.contributor-covenant.org/version/1/4/code-of-conduct/code_of_conduct.md) and [2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct/code_of_conduct.md), and was generated by [contributing.md](https://contributing.md/generator). -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to PyBispectra 2 | 3 | All contributions are welcomed, whether you wish to report issues you have encountered, have suggestions for improvements, or have made changes which you would like to see officially added to the package. For more information, see the [development](https://pybispectra.readthedocs.io/en/main/development.html) page. 4 | 5 | Users and contributors to PyBispectra are expected to follow our [Code of Conduct](CODE_OF_CONDUCT.md). -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Thomas Samuel Binns 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![PyBispectra logo](docs/source/_static/logo.gif) 2 | 3 | A Python signal processing package for computing spectral- and time-domain interactions using the bispectrum. 4 | 5 | This package provides the tools for computing phase-amplitude coupling, time delay estimation, and wave shape features using the bispectrum and bicoherence. Additional tools for computing amplitude-amplitude coupling, phase-phase coupling, and spatio-spectral filters are also provided. 6 | 7 | Parallel processing and [Numba](https://numba.pydata.org/) optimisation are implemented to reduce computation times. 8 | 9 | ## Installation & Requirements: 10 | Install the package into the desired environment using pip `pip install pybispectra`
11 | More information on the [installation](https://pybispectra.readthedocs.io/en/main/installation.html) page. 12 | 13 | ## Use: 14 | To get started with the toolbox, check out the [documentation](https://pybispectra.readthedocs.io/en/main/) and [examples](https://pybispectra.readthedocs.io/en/main/examples.html). 15 | 16 | ## Contributing & Development: 17 | If you encounter issues with the package, want to suggest improvements, or have made any changes which you would like to see officially supported, please refer to the [development](https://pybispectra.readthedocs.io/en/main/development.html) page. A unit test suite is included and must be expanded where necessary to validate any changes. 18 | 19 | ## Citing: 20 | If you use this toolbox in your work, please include the following citation:
21 | Binns, T. S., Pellegrini, F., Jurhar, T., & Haufe, S. (2023). PyBispectra. DOI: [10.5281/zenodo.8377820](https://doi.org/10.5281/zenodo.8377820) 22 | -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | # PyBispectra Changelog 2 | 3 | ## [Version 1.3dev](https://pybispectra.readthedocs.io/en/main/) 4 | 5 | ##### Bug Fixes 6 | - Fixed error where `indices` in `ResultsCFC`, `ResultsTDE`, and `ResultsGeneral` classes were not being mapped to results correctly. 7 | 8 | ## [Version 1.2](https://pybispectra.readthedocs.io/en/1.2.1/) 9 | 10 | ##### Enhancements 11 | - Added general `Bispectrum` and `Threenorm` classes for computing with flexible kmn channel combinations. 12 | 13 | ##### Bug Fixes 14 | - Fixed error where the number of subplots exceeding the number of nodes would cause plotting to fail. 15 | - Fixed error where bandpass filter settings for the SSD method in `SpatioSpectralFilter` were not being applied correctly. 16 | 17 | ##### API 18 | - Changed the default value of `min_ratio` in `SpatioSpectralFilter.get_transformed_data()` from `1.0` to `-inf`. 19 | - Added the option to control whether a copy is returned from the `get_results()` method of all `Results...` classes and from `SpatioSpectralFilter.get_transformed_data()` (default behaviour returns a copy, like in previous versions). 20 | - Added new `fit_ssd()`, `fit_hpmax()`, and `transform()` methods to the `SpatioSpectralFilter` class to bring it more in line with `scikit-learn` fit-transform classes. 21 | 22 | ##### Documentation 23 | - Added a new example for computing the bispectrum and threenorm using the general classes. 24 | 25 | ## [Version 1.1](https://pybispectra.readthedocs.io/en/1.1.0/) 26 | 27 | ##### Enhancements 28 | - Reduced the memory requirement of bispectrum computations. 29 | - Added support for computing & storing time delays of multiple frequency bands simultaneously. 30 | - Added a new option for controlling the colour bar of waveshape plots. 31 | - Added an option for controlling the precision of computations. 32 | 33 | ##### Bug Fixes 34 | - Fixed incorrect channel indexing for time delay antisymmetrisation. 35 | 36 | ##### API 37 | - Changed how operations on specific frequency/time ranges are specified to be more flexible. 38 | 39 | ##### Documentation 40 | - Added a new example for computing time delays on specific frequency bands. 41 | 42 | 43 | ## [Version 1.0](https://pybispectra.readthedocs.io/en/1.0.0/) 44 | 45 | - Initial release. -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set AUTOEXDIR=%SOURCEDIR%\auto_examples 12 | set GENDIR=%SOURCEDIR%\generated 13 | set BUILDDIR=build 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | if "%1" == "" goto help 29 | if "%1" == "clean" goto clean 30 | 31 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 32 | goto end 33 | 34 | :help 35 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 36 | goto end 37 | 38 | :clean 39 | echo Removing everything under '%AUTOEXDIR%' and '%GENDIR%'... 40 | del /S /Q %AUTOEXDIR%\* >nul 2>&1 41 | del /S /Q %GENDIR%\* >nul 2>&1 42 | %SPHINXBUILD% -M clean %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 43 | 44 | :end 45 | popd 46 | -------------------------------------------------------------------------------- /docs/source/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | div { 2 | text-align: justify; 3 | } 4 | 5 | ul { 6 | text-align: left 7 | } -------------------------------------------------------------------------------- /docs/source/_static/logo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/docs/source/_static/logo.gif -------------------------------------------------------------------------------- /docs/source/_static/versions.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "1.3.0dev", 4 | "version": "1.3.0dev", 5 | "url": "https://pybispectra.readthedocs.io/en/main/" 6 | }, 7 | { 8 | "name": "1.2.1", 9 | "version": "1.2.1", 10 | "url": "https://pybispectra.readthedocs.io/en/1.2.1/" 11 | }, 12 | { 13 | "name": "1.1.0", 14 | "version": "1.1.0", 15 | "url": "https://pybispectra.readthedocs.io/en/1.1.0/" 16 | }, 17 | { 18 | "name": "1.0.0", 19 | "version": "1.0.0", 20 | "url": "https://pybispectra.readthedocs.io/en/1.0.0/" 21 | } 22 | ] -------------------------------------------------------------------------------- /docs/source/_templates/custom-class-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :inherited-members: 8 | -------------------------------------------------------------------------------- /docs/source/_templates/custom-module-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: Module Attributes 8 | 9 | .. autosummary:: 10 | :toctree: 11 | {% for item in attributes %} 12 | {{ item }} 13 | {%- endfor %} 14 | {% endif %} 15 | {% endblock %} 16 | 17 | {% block functions %} 18 | {% if functions %} 19 | .. rubric:: {{ _('Functions') }} 20 | 21 | .. autosummary:: 22 | :toctree: 23 | :nosignatures: 24 | {% for item in functions %} 25 | {{ item }} 26 | {%- endfor %} 27 | {% endif %} 28 | {% endblock %} 29 | 30 | {% block classes %} 31 | {% if classes %} 32 | .. rubric:: {{ _('Classes') }} 33 | 34 | .. autosummary:: 35 | :toctree: 36 | :template: custom-class-template.rst 37 | {% for item in classes %} 38 | {{ item }} 39 | {%- endfor %} 40 | {% endif %} 41 | {% endblock %} 42 | 43 | {% block exceptions %} 44 | {% if exceptions %} 45 | .. rubric:: {{ _('Exceptions') }} 46 | 47 | .. autosummary:: 48 | :toctree: 49 | {% for item in exceptions %} 50 | {{ item }} 51 | {%- endfor %} 52 | {% endif %} 53 | {% endblock %} 54 | 55 | {% block modules %} 56 | {% if modules %} 57 | .. rubric:: Modules 58 | 59 | .. autosummary:: 60 | :toctree: 61 | :template: custom-module-template.rst 62 | :recursive: 63 | {% for item in modules %} 64 | {{ item }} 65 | {%- endfor %} 66 | {% endif %} 67 | {% endblock %} -------------------------------------------------------------------------------- /docs/source/api.rst: -------------------------------------------------------------------------------- 1 | API 2 | === 3 | 4 | Here are the API references for the classes and functions of PyBispectra. 5 | 6 | .. container:: d-none 7 | 8 | :py:mod:`pybispectra`: 9 | 10 | .. automodule:: pybispectra 11 | :no-members: 12 | :no-inherited-members: 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | cfc 18 | tde 19 | waveshape 20 | general 21 | utils 22 | -------------------------------------------------------------------------------- /docs/source/cfc.rst: -------------------------------------------------------------------------------- 1 | .. _api_cfc: 2 | 3 | Cross-Frequency Coupling 4 | ======================== 5 | 6 | :py:mod:`pybispectra.cfc`: 7 | 8 | .. automodule:: pybispectra.cfc 9 | :no-members: 10 | :no-inherited-members: 11 | 12 | .. autosummary:: 13 | :template: custom-class-template.rst 14 | :toctree: generated/ 15 | 16 | AAC 17 | PAC 18 | PPC -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | 9 | import os 10 | import sys 11 | 12 | import pybispectra 13 | from pybispectra.utils._docs import linkcode_resolve 14 | 15 | project = "PyBispectra" 16 | copyright = "2023, Thomas S. Binns" 17 | author = "Thomas S. Binns" 18 | release = "1.3.0dev" 19 | 20 | # -- General configuration --------------------------------------------------- 21 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 22 | 23 | sys.path.insert(0, os.path.abspath("../../")) 24 | 25 | extensions = [ 26 | "sphinx.ext.mathjax", 27 | "sphinx.ext.autosectionlabel", 28 | "sphinx.ext.autodoc", 29 | "sphinx.ext.autosummary", 30 | "sphinx.ext.linkcode", 31 | "sphinx.ext.intersphinx", 32 | "numpydoc", 33 | "sphinxcontrib.bibtex", 34 | "sphinx_gallery.gen_gallery", 35 | "sphinx_copybutton", 36 | ] 37 | 38 | source_suffix = [".rst", ".md"] 39 | 40 | bibtex_bibfiles = ["refs.bib"] 41 | 42 | sphinx_gallery_conf = { 43 | "examples_dirs": "../../examples", 44 | "gallery_dirs": "auto_examples", 45 | } 46 | 47 | templates_path = ["_templates"] 48 | exclude_patterns = ["_build"] 49 | 50 | 51 | # -- Options for HTML output ------------------------------------------------- 52 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 53 | 54 | html_theme = "pydata_sphinx_theme" 55 | html_static_path = ["_static"] 56 | 57 | html_css_files = [ 58 | "css/custom.css", 59 | ] 60 | 61 | html_theme_options = { 62 | "icon_links": [ 63 | dict( 64 | name="GitHub", 65 | url="https://github.com/braindatalab/PyBispectra", 66 | icon="fa-brands fa-square-github", 67 | ) 68 | ], 69 | "icon_links_label": "External Links", # for screen reader 70 | "use_edit_page_button": False, 71 | "navigation_with_keys": False, 72 | "show_toc_level": 1, 73 | "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], 74 | "footer_start": ["copyright"], 75 | "switcher": { 76 | "json_url": "https://pybispectra.readthedocs.io/en/main/_static/versions.json", # noqa E501 77 | "version_match": pybispectra.__version__, 78 | }, 79 | "pygment_light_style": "default", 80 | "pygment_dark_style": "github-dark", 81 | } 82 | 83 | 84 | # -- Intersphinx configuration ----------------------------------------------- 85 | 86 | intersphinx_mapping = { 87 | "python": ("https://docs.python.org/3", None), 88 | "numpy": ("https://numpy.org/doc/stable", None), 89 | "scipy": ("https://docs.scipy.org/doc/scipy", None), 90 | "matplotlib": ("https://matplotlib.org/stable", None), 91 | "numba": ("https://numba.readthedocs.io/en/latest", None), 92 | "mne": ("https://mne.tools/stable", None), 93 | } 94 | 95 | 96 | # NumPyDoc configuration ----------------------------------------------------- 97 | 98 | # Define what extra methods numpydoc will document 99 | numpydoc_class_members_toctree = True 100 | numpydoc_attributes_as_param_list = True 101 | numpydoc_xref_param_type = True 102 | numpydoc_xref_aliases = { 103 | # Python 104 | "bool": ":class:`python:bool`", 105 | # Matplotlib 106 | "Axes": "matplotlib.axes.Axes", 107 | "Figure": "matplotlib.figure.Figure", 108 | } 109 | numpydoc_xref_ignore = { 110 | # words 111 | "instance", 112 | "instances", 113 | "of", 114 | "default", 115 | "shape", 116 | "or", 117 | "with", 118 | "length", 119 | "pair", 120 | "pyplot", 121 | "matplotlib", 122 | "optional", 123 | "kwargs", 124 | "in", 125 | "dtype", 126 | "object", 127 | "low", 128 | "high", 129 | # shapes 130 | "epochs", 131 | "channels", 132 | "nodes", 133 | "rank", 134 | "times", 135 | "components", 136 | "frequencies", 137 | "frequency_bands", 138 | "x", 139 | "n_vertices", 140 | "n_faces", 141 | "n_channels", 142 | "m", 143 | "n", 144 | "n_events", 145 | "n_colors", 146 | "n_times", 147 | "obj", 148 | "n_chan", 149 | "n_epochs", 150 | "n_picks", 151 | "n_ch_groups", 152 | "n_dipoles", 153 | "n_ica_components", 154 | "n_pos", 155 | "n_node_names", 156 | "n_tapers", 157 | "n_signals", 158 | "n_step", 159 | "n_freqs", 160 | "wsize", 161 | "Tx", 162 | "M", 163 | "N", 164 | "p", 165 | "q", 166 | "r", 167 | "n_observations", 168 | "n_regressors", 169 | "n_cols", 170 | "n_frequencies", 171 | "n_tests", 172 | "n_samples", 173 | "n_permutations", 174 | "nchan", 175 | "n_points", 176 | "n_features", 177 | "n_parts", 178 | "n_features_new", 179 | "n_components", 180 | "n_labels", 181 | "n_events_in", 182 | "n_splits", 183 | "n_scores", 184 | "n_outputs", 185 | "n_trials", 186 | "n_estimators", 187 | "n_tasks", 188 | "nd_features", 189 | "n_classes", 190 | "n_targets", 191 | "n_slices", 192 | "n_hpi", 193 | "n_fids", 194 | "n_elp", 195 | "n_pts", 196 | "n_tris", 197 | "n_nodes", 198 | "n_nonzero", 199 | "n_events_out", 200 | "n_segments", 201 | "n_orient_inv", 202 | "n_orient_fwd", 203 | "n_orient", 204 | "n_dipoles_lcmv", 205 | "n_dipoles_fwd", 206 | "n_picks_ref", 207 | "n_coords", 208 | "n_meg", 209 | "n_good_meg", 210 | "n_moments", 211 | "n_patterns", 212 | "n_new_events", 213 | } 214 | -------------------------------------------------------------------------------- /docs/source/development.rst: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | Changelog 5 | --------- 6 | View the changelog for each PyBispectra version here: `version changelog 7 | `_ 8 | 9 | 10 | Installing PyBispectra in editable mode 11 | --------------------------------------- 12 | 13 | If you want to make changes to PyBispectra, you may wish to install it in editable mode. 14 | To do so, first clone the `GitHub repository 15 | `_ to your desired location. Once 16 | cloned, navigate to this location and install the package alongside its development 17 | requirements using pip: 18 | 19 | .. code-block:: 20 | 21 | pip install -e . 22 | pip install .[dev] 23 | 24 | 25 | Contributing to PyBispectra 26 | --------------------------- 27 | 28 | This project and everyone participating in it is governed by our `Code of Conduct 29 | `_. By 30 | participating, you are expected to uphold this code. 31 | 32 | If you encounter any issues with the package or wish to suggest improvements, please 33 | submit a report on the `issues page 34 | `_. 35 | 36 | If you have made any changes which you would like to see officially added to the 37 | package, consider submitting a `pull request 38 | `_. A unit test suite is included. 39 | Tests must be added for any new features, and adaptations to the existing tests must be 40 | made where necessary. Checks for these tests are run when a pull request is submitted, 41 | however these tests can also be run locally by calling `coverage 42 | `_ with `pytest `_ in 43 | the base directory: 44 | 45 | .. code-block:: 46 | 47 | coverage run && coverage report 48 | 49 | Please also check that the documentation can be built following any changes. The 50 | documentation is built when a pull request is submitted, however the documentation can 51 | also be built locally using `Sphinx `_ in the 52 | ``/docs`` directory (outputs are in the ``/docs/build/html`` directory): 53 | 54 | .. code-block:: 55 | 56 | make html 57 | 58 | Finally, features of the code such as compliance with established styles and spelling 59 | errors in the documentation are also checked. These checks are run when a pull request 60 | is submitted, however they can also be run locally using `pre-commit 61 | `_. To have these checks run automatically whenever you commit 62 | changes, install ``pre-commit`` with the following command in the base directory: 63 | 64 | .. code-block:: 65 | 66 | pre-commit install 67 | -------------------------------------------------------------------------------- /docs/source/examples.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | Find out how to get started with PyBispectra using the provided examples. 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | :caption: Contents: 9 | 10 | auto_examples/plot_compute_pac 11 | auto_examples/plot_compute_tde 12 | auto_examples/plot_compute_tde_fbands 13 | auto_examples/plot_compute_waveshape 14 | auto_examples/plot_compute_waveshape_noisy_data 15 | auto_examples/plot_compute_general 16 | -------------------------------------------------------------------------------- /docs/source/general.rst: -------------------------------------------------------------------------------- 1 | .. _api_general: 2 | 3 | General 4 | ======= 5 | 6 | :py:mod:`pybispectra.general`: 7 | 8 | .. automodule:: pybispectra.general 9 | :no-members: 10 | :no-inherited-members: 11 | 12 | .. autosummary:: 13 | :template: custom-class-template.rst 14 | :toctree: generated/ 15 | 16 | Bispectrum 17 | Threenorm -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. PyBispectra documentation master file 2 | 3 | .. title:: Home 4 | 5 | .. define new line for html 6 | .. |br| raw:: html 7 | 8 |
9 | 10 | .. image:: _static/logo.gif 11 | :alt: PyBispectra 12 | 13 | |br| 14 | A Python signal processing package for computing spectral- and time-domain interactions 15 | using the bispectrum. 16 | 17 | This package provides the tools for computing phase-amplitude coupling, time delay 18 | estimation, and waveshape features using the bispectrum and bicoherence. Additional 19 | tools for computing amplitude-amplitude coupling, phase-phase coupling, and 20 | spatio-spectral filters are also provided. 21 | 22 | Parallel processing and `Numba `_ optimisation are 23 | implemented to reduce computation times. 24 | 25 | If you use this toolbox in your work, please include the following citation:|br| 26 | Binns, T. S., Pellegrini, F., Jurhar, T., & Haufe, S. (2023). PyBispectra. DOI: 27 | `10.5281/zenodo.8377820 `_ 28 | 29 | .. toctree:: 30 | :maxdepth: 2 31 | :titlesonly: 32 | :caption: Contents: 33 | 34 | motivation 35 | installation 36 | examples 37 | api 38 | development 39 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | To install PyBispectra, activate the desired environment in which you want the package, 5 | then install it using pip: 6 | 7 | .. code-block:: 8 | 9 | pip install pybispectra 10 | 11 | PyBispectra requires Python >= 3.10. 12 | 13 | If you need to create an environment in which to install PyBispectra, you can do so 14 | using `conda `_, after which you can install the 15 | package: 16 | 17 | .. code-block:: 18 | 19 | conda create -n pybispectra_env 20 | conda activate pybispectra_env 21 | conda install -c conda-forge pip 22 | pip install pybispectra 23 | -------------------------------------------------------------------------------- /docs/source/motivation.rst: -------------------------------------------------------------------------------- 1 | Motivation 2 | ========== 3 | 4 | What is the bispectrum? 5 | ----------------------- 6 | The bispectrum is a higher-order statistic, based on the Fourier transform of 7 | the third order moment :footcite:`Nikias1987`. Two forms of computing the 8 | bispectrum exist: the direct approach, in which the Fourier coefficients of the 9 | data are computed, which in turn are used to compute the bispectum; or the 10 | indirect approach, in which the third order moments of the data are computed 11 | first before the Fourier transform is taken. PyBispectra uses the direct 12 | method. The bispectrum, :math:`\textbf{B}`, has the form 13 | 14 | :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2)\textbf{n}^*(f_2+f_1)>` , 15 | 16 | where :math:`kmn` is a combination of signals with Fourier coefficients 17 | :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; 18 | :math:`f_1` and :math:`f_2` correspond to a lower and higher frequency, 19 | respectively; and :math:`<>` represents the average value over epochs. 20 | 21 | A normalised form of the bispectrum also exists, termed bicoherence. Several 22 | forms of normalisation exist, however a common form is the threenorm: a 23 | univariate normalisation whereby the values of the bicoherence will be bound in 24 | the range :math:`[-1, 1]` in a manner that is independent of the coupling 25 | properties within or between signals :footcite:`Shahbazi2014`. The threenorm, 26 | :math:`\textbf{N}`, has the form 27 | 28 | :math:`\textbf{N}_{kmn}(f_1,f_2)=(<|\textbf{k}(f_1)|^3><|\textbf{m}(f_2)|^3><|\textbf{n}(f_2+f_1)|^3>)^{\frac{1}{3}}` . 29 | 30 | The bicoherence, :math:`\boldsymbol{\mathcal{B}}`, is then computed as 31 | 32 | :math:`\boldsymbol{\mathcal{B}}_{kmn}(f_1,f_2)=\Large\frac{\textbf{B}_{kmn}(f_1,f_2)}{\textbf{N}_{kmn}(f_1,f_2)}` . 33 | 34 | There are several possible uses of the bispectrum and bicoherence for signal 35 | analyses, including for phase-amplitude coupling (a form of cross-frequency 36 | coupling), the analysis of non-sinusoidal waveform features, and time delay 37 | estimation. 38 | 39 | 40 | Why analyse cross-frequency coupling, waveshape, and time delays? 41 | ----------------------------------------------------------------- 42 | Cross-frequency coupling, waveshape analysis, and time delay estimation are 43 | relevant in a range of disciplines. 44 | 45 | Cross-frequency coupling methods allow us to analyse interactions within and 46 | across signals between a lower frequency, :math:`f_1`, and a higher frequency, 47 | :math:`f_2`. Different forms of coupling exist, such as phase-phase coupling, 48 | amplitude-amplitude coupling, and phase-amplitude coupling. In phase-amplitude 49 | coupling, we examine the relationship between the phase of a signal at 50 | :math:`f_1` and the amplitude of a signal at :math:`f_2`. Cross-frequency 51 | interactions have been posited as fundamental aspects of neuronal communication 52 | in the brain :footcite:`Canolty2010`, with alterations in these relationships 53 | implicated in diseases such as Parkinson's :footcite:`deHemptinne2013` and 54 | Alzheimer's :footcite:`Bazzigaluppi2018`. 55 | 56 | Additionally, a signal's shape can contain information of interest. For 57 | example, non-sinusoidal features of signals may reflect particular forms of 58 | interneuronal communication :footcite:`Sherman2016`, and have been shown to be 59 | correlated with symptoms of neurological diseases and altered by their 60 | treatments :footcite:`Cole2017`. 61 | 62 | Finally, time delays, :math:`\tau`, between signals can also provide useful 63 | insights into systems. Such estimates are crucial for radar and sonar 64 | technologies :footcite:`Chen2004`, but also in neuroscience, where time delays 65 | can be used to infer features of the physical relationships between interacting 66 | brain regions :footcite:`Silchenko2010`. 67 | 68 | 69 | Why use the bispectrum for these analyses? 70 | ------------------------------------------ 71 | The bispectrum offers several advantages over other methods for analysing 72 | phase-amplitude coupling, waveform shape, and time delay estimates. 73 | 74 | For phase-amplitude coupling, common methods such as the modulation index can 75 | be practically challenging, requiring a precise set of filters to be applied to 76 | the data to extract the true underlying interactions (which are not readily 77 | apparent) as well as being computationally expensive (due to the requirement of 78 | Hilbert transforming the data) :footcite:`Zandvoort2021`. Furthermore, when 79 | analysing coupling between separate signals, the modulation index performs 80 | poorly at distinguishing genuine across-site coupling from within-site coupling 81 | in the presence of source mixing :footcite:`Chella2014`. The bispectrum 82 | overcomes these issues, being computationally cheaper, lacking the 83 | need to precisely filter the data :footcite:`Zandvoort2021`, and being more 84 | robust to spurious across-site coupling estimates 85 | :footcite:`PellegriniPreprint`. 86 | 87 | Additionally, analyses of waveshape often rely on searching through the 88 | time-series signal :footcite:`Cole2017`, a computationally expensive procedure 89 | when handling long periods of high sampling-rate data. Furthermore, if 90 | information at particular frequencies is desired, the time-series must be 91 | bandpass filtered, distorting the shape of non-sinusoidal aspects of the 92 | underlying signal :footcite:`Bartz2019`. With the bispectrum, non-sinudoisal 93 | waveshape information can be extracted in a computationally cheap, 94 | frequency-resolved manner, without the need to bandpass filter. 95 | 96 | Finally, traditional forms of time delay estimation often rely on 97 | cross-correlation. This method is perfectly adequate in noiseless situations or 98 | those where the noise of the signals are uncorrelated with one another as well 99 | as with the sources of interest :footcite:`Nikias1988,JurharPreprint`. This, 100 | however, is often not the case in many real-world contexts, leading to spurious 101 | time delay estimates. In contrast, the bispectrum is able to suppress the 102 | contribution of Gaussian noise sources to time delay estimates 103 | :footcite:`Nikias1988`, and additional steps can be taken to minimise the 104 | effects of non-Gaussian noise sources, such as those associated with volume 105 | conduction :footcite:`JurharPreprint`. 106 | 107 | 108 | What is available in PyBispectra? 109 | --------------------------------- 110 | PyBispectra offers tools for computing phase-amplitude coupling, time delay 111 | estimation, and waveshape feature analysis using the bispectrum and 112 | bicoherence. Additional tools are included for computing phase-phase coupling, 113 | amplitude-amplitude coupling, Fourier coefficients, time-frequency 114 | representations of data, spatio-spectral filters, as well as tools plotting 115 | results. 116 | 117 | You can find the installation instructions :doc:`here `, as well 118 | as examples of how the package can be used :doc:`here `. 119 | 120 | 121 | References 122 | ---------- 123 | .. footbibliography:: 124 | -------------------------------------------------------------------------------- /docs/source/refs.bib: -------------------------------------------------------------------------------- 1 | @article{Bartz2019, 2 | title={Analyzing the waveshape of brain oscillations with bicoherence}, 3 | author={Bartz, Sarah and Avarvand, Forooz Shahbazi and Leicht, Gregor and Nolte, Guido}, 4 | journal={NeuroImage}, 5 | volume={188}, 6 | pages={145--160}, 7 | year={2019}, 8 | publisher={Elsevier}, 9 | doi={10.1016/j.neuroimage.2018.11.045} 10 | } 11 | 12 | @article{Bazzigaluppi2018, 13 | title={Early-stage attenuation of phase-amplitude coupling in the hippocampus and medial prefrontal cortex in a transgenic rat model of {A}lzheimer's disease}, 14 | author={Bazzigaluppi, Paolo and Beckett, Tina L. and Koletar, Margaret M. and Lai, Aaron Y. and Joo, Illsung L. and Brown, Mary E. and Carlen, Peter L. and McLaurin, JoAnne and Stefanovic, Bojana}, 15 | journal={Journal of Neurochemistry}, 16 | volume={144}, 17 | number={5}, 18 | pages={669--679}, 19 | year={2018}, 20 | publisher={Wiley Online Library}, 21 | doi={10.1111/jnc.14136} 22 | } 23 | 24 | @article{Canolty2010, 25 | title={The functional role of cross-frequency coupling}, 26 | author={Canolty, Ryan T. and Knight, Robert T.}, 27 | journal={Trends in Cognitive Sciences}, 28 | volume={14}, 29 | number={11}, 30 | pages={506--515}, 31 | year={2010}, 32 | publisher={Elsevier}, 33 | doi={10.1016/j.tics.2010.09.001} 34 | } 35 | 36 | @article{Chella2014, 37 | title={Third order spectral analysis robust to mixing artifacts for mapping cross-frequency interactions in {EEG}/{MEG}}, 38 | author={Chella, Federico and Marzetti, Laura and Pizzella, Vittorio and Zappasodi, Filippo and Nolte, Guido}, 39 | journal={NeuroImage}, 40 | volume={91}, 41 | pages={146--161}, 42 | year={2014}, 43 | publisher={Elsevier}, 44 | doi={10.1016/j.neuroimage.2013.12.064} 45 | } 46 | 47 | @inbook{Chen2004, 48 | title={Time {D}elay {E}stimation}, 49 | author={Chen, Jingdong and Huang, Yiteng and Benesty, Jacob}, 50 | editor={Huang, Yiteng and Benesty, Jacob}, 51 | chapter={8}, 52 | series={Audio Signal Processing for Next-Generation Multimedia Communication Systems}, 53 | pages={197--227}, 54 | year={2004}, 55 | publisher={Springer}, 56 | doi={10.1007/1-4020-7769-6_8} 57 | } 58 | 59 | @article{Cohen2022, 60 | title={A tutorial on generalized eigendecomposition for denoising, contrast enhancement, and dimension reduction in multichannel electrophysiology}, 61 | author={Cohen, Michael X.}, 62 | journal={Neuroimage}, 63 | volume={247}, 64 | pages={118809}, 65 | year={2022}, 66 | publisher={Elsevier}, 67 | doi={10.1016/j.neuroimage.2021.118809} 68 | } 69 | 70 | @article{Cole2017, 71 | title={Nonsinusoidal beta oscillations reflect cortical pathophysiology in {P}arkinson's disease}, 72 | author={Cole, Scott R. and van der Meij, Roemer and Peterson, Erik J. and de Hemptinne, Coralie and Starr, Philip A. and Voytek, Bradley}, 73 | journal={Journal of Neuroscience}, 74 | volume={37}, 75 | number={18}, 76 | pages={4830--4840}, 77 | year={2017}, 78 | publisher={Soc Neuroscience}, 79 | doi={10.1523/JNEUROSCI.2208-16.2017} 80 | } 81 | 82 | @article{Giehl2021, 83 | title={Dissociating harmonic and non-harmonic phase-amplitude coupling in the human brain}, 84 | author={Giehl, Janet and Noury, Nima and Siegel, Markus}, 85 | journal={NeuroImage}, 86 | volume={227}, 87 | pages={117648}, 88 | year={2021}, 89 | publisher={Elsevier}, 90 | doi={10.1016/j.neuroimage.2020.117648} 91 | } 92 | 93 | @article{deHemptinne2013, 94 | title={Exaggerated phase-amplitude coupling in the primary motor cortex in {P}arkinson disease}, 95 | author={De Hemptinne, Coralie and Ryapolova-Webb, Elena S. and Air, Ellen L. and Garcia, Paul A. and Miller, Kai J. and Ojemann, Jeffrey G. and Ostrem, Jill L. and Galifianakis, Nicholas B. and Starr, Philip A.}, 96 | journal={Proceedings of the National Academy of Sciences}, 97 | volume={110}, 98 | number={12}, 99 | pages={4780--4785}, 100 | year={2013}, 101 | publisher={National Acad Sciences}, 102 | doi={10.1073/pnas.1214546110} 103 | } 104 | 105 | @article{JurharPreprint, 106 | title={{Estimating Time Delays between Signals under Mixed Noise Influence with Novel Cross- and Bispectral Methods}}, 107 | author={Tin Jurhar and Franziska Pellegrini and Ana I. Nuñes del Toro and Tilman Stephani and Guido Nolte and Stefan Haufe}, 108 | year={2025}, 109 | journal={arXiv}, 110 | doi={10.48550/arXiv.2502.17474} 111 | } 112 | 113 | @article{Kovach2018, 114 | title={The bispectrum and its relationship to phase-amplitude coupling}, 115 | author={Kovach, Christopher K. and Oya, Hiroyuki and Kawasaki, Hiroto}, 116 | journal={NeuroImage}, 117 | volume={173}, 118 | pages={518--539}, 119 | year={2018}, 120 | publisher={Elsevier}, 121 | doi={10.1016/j.neuroimage.2018.02.033} 122 | } 123 | 124 | @article{Nikias1987, 125 | title={Bispectrum estimation: {A} digital signal processing framework}, 126 | author={Nikias, Chrysostomos L. and Raghuveer, Mysore R.}, 127 | journal={Proceedings of the IEEE}, 128 | volume={75}, 129 | number={7}, 130 | pages={869--891}, 131 | year={1987}, 132 | publisher={IEEE}, 133 | doi={10.1109/PROC.1987.13824} 134 | } 135 | 136 | @article{Nikias1988, 137 | title={Time delay estimation in unknown {G}aussian spatially correlated noise}, 138 | author={Nikias, Chrysostomos L. and Pan, Renlong}, 139 | journal={IEEE Transactions on Acoustics, Speech, and Signal Processing}, 140 | volume={36}, 141 | number={11}, 142 | pages={1706--1714}, 143 | year={1988}, 144 | publisher={IEEE}, 145 | doi={10.1109/29.9008} 146 | } 147 | 148 | @article{Nikulin2011, 149 | title={A novel method for reliable and fast extraction of neuronal {EEG}/{MEG} oscillations on the basis of spatio-spectral decomposition}, 150 | author={Nikulin, Vadim V. and Nolte, Guido and Curio, Gabriel}, 151 | journal={NeuroImage}, 152 | volume={55}, 153 | number={4}, 154 | pages={1528--1535}, 155 | year={2011}, 156 | publisher={Elsevier}, 157 | doi={10.1016/j.neuroimage.2011.01.057} 158 | } 159 | 160 | @article{PellegriniPreprint, 161 | title={Distinguishing between-from within-site phase-amplitude coupling using antisymmetrized bispectra}, 162 | author={Pellegrini, Franziska and Nguyen, Tien Dung and Herrera, Taliana and Nikulin, Vadim and Nolte, Guido and Haufe, Stefan}, 163 | journal={bioRxiv}, 164 | year={2023}, 165 | publisher={Cold Spring Harbor Laboratory}, 166 | doi={10.1101/2023.10.26.564193} 167 | } 168 | 169 | @article{Polyakova2020, 170 | title={Cortical control of subthalamic neuronal activity through the hyperdirect and indirect pathways in monkeys}, 171 | author={Polyakova, Zlata and Chiken, Satomi and Hatanaka, Nobuhiko and Nambu, Atsushi}, 172 | journal={Journal of Neuroscience}, 173 | volume={40}, 174 | number={39}, 175 | pages={7451--7463}, 176 | year={2020}, 177 | publisher={Soc Neuroscience}, 178 | doi={10.1523/JNEUROSCI.0772-20.2020} 179 | } 180 | 181 | @article{Shahbazi2014, 182 | title={Univariate normalization of bispectrum using {H}{\"o}lder's inequality}, 183 | author={Shahbazi, Forooz and Ewald, Arne and Nolte, Guido}, 184 | journal={Journal of Neuroscience Methods}, 185 | volume={233}, 186 | pages={177--186}, 187 | year={2014}, 188 | publisher={Elsevier}, 189 | doi={10.1016/j.jneumeth.2014.05.030} 190 | } 191 | 192 | @article{Sherman2016, 193 | title={Neural mechanisms of transient neocortical beta rhythms: {C}onverging evidence from humans, computational modeling, monkeys, and mice}, 194 | author={Sherman, Maxwell A. and Lee, Shane and Law, Robert and Haegens, Saskia and Thorn, Catherine A. and H{\"a}m{\"a}l{\"a}inen, Matti S. and Moore, Christopher I. and Jones, Stephanie R.}, 195 | journal={Proceedings of the National Academy of Sciences}, 196 | volume={113}, 197 | number={33}, 198 | pages={E4885--E4894}, 199 | year={2016}, 200 | publisher={National Acad Sciences}, 201 | doi={10.1073/pnas.1604135113} 202 | } 203 | 204 | @article{Silchenko2010, 205 | title={Data-driven approach to the estimation of connectivity and time delays in the coupling of interacting neuronal subsystems}, 206 | author={Silchenko, Alexander N. and Adamchic, Ilya and Pawelczyk, Norbert and Hauptmann, Christian and Maarouf, Mohammad and Sturm, Volker and Tass, Peter A.}, 207 | journal={Journal of Neuroscience Methods}, 208 | volume={191}, 209 | number={1}, 210 | pages={32--44}, 211 | year={2010}, 212 | publisher={Elsevier}, 213 | doi={10.1016/j.jneumeth.2010.06.004} 214 | } 215 | 216 | @article{Zandvoort2021, 217 | title={Defining the filter parameters for phase-amplitude coupling from a bispectral point of view}, 218 | author={Zandvoort, Coen S. and Nolte, Guido}, 219 | journal={Journal of Neuroscience Methods}, 220 | volume={350}, 221 | pages={109032}, 222 | year={2021}, 223 | publisher={Elsevier}, 224 | doi={10.1016/j.jneumeth.2020.109032} 225 | } -------------------------------------------------------------------------------- /docs/source/tde.rst: -------------------------------------------------------------------------------- 1 | .. _api_tde: 2 | 3 | Time Delay Estimation 4 | ===================== 5 | 6 | :py:mod:`pybispectra.tde`: 7 | 8 | .. automodule:: pybispectra.tde 9 | :no-members: 10 | :no-inherited-members: 11 | 12 | .. autosummary:: 13 | :template: custom-class-template.rst 14 | :toctree: generated/ 15 | 16 | TDE -------------------------------------------------------------------------------- /docs/source/utils.rst: -------------------------------------------------------------------------------- 1 | .. _api_utils: 2 | 3 | Utilities 4 | ========= 5 | 6 | :py:mod:`pybispectra.utils`: 7 | 8 | .. automodule:: pybispectra.utils 9 | :no-members: 10 | :no-inherited-members: 11 | 12 | .. autosummary:: 13 | :template: custom-class-template.rst 14 | :toctree: generated/ 15 | 16 | ResultsCFC 17 | ResultsTDE 18 | ResultsWaveShape 19 | ResultsGeneral 20 | SpatioSpectralFilter 21 | 22 | .. autosummary:: 23 | :toctree: generated/ 24 | 25 | compute_fft 26 | compute_tfr 27 | compute_rank 28 | set_precision -------------------------------------------------------------------------------- /docs/source/waveshape.rst: -------------------------------------------------------------------------------- 1 | .. _api_waveshape: 2 | 3 | Waveshape 4 | ========= 5 | 6 | :py:mod:`pybispectra.waveshape`: 7 | 8 | .. automodule:: pybispectra.waveshape 9 | :no-members: 10 | :no-inherited-members: 11 | 12 | .. autosummary:: 13 | :template: custom-class-template.rst 14 | :toctree: generated/ 15 | 16 | WaveShape -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: pybispectra 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python>=3.10 6 | - joblib 7 | - matplotlib 8 | - mne>1.6 9 | - numba 10 | - numpy 11 | - scikit-learn 12 | - scipy 13 | -------------------------------------------------------------------------------- /examples/README.rst: -------------------------------------------------------------------------------- 1 | PyBispectra Examples 2 | ==================== 3 | 4 | Here you will find a set of examples on how PyBispectra can be used to compute 5 | cross-frequency coupling, time delay estimation, and waveshape features. -------------------------------------------------------------------------------- /examples/compute_aac.py: -------------------------------------------------------------------------------- 1 | """ 2 | ==================================== 3 | Compute amplitude-amplitude coupling 4 | ==================================== 5 | 6 | This example demonstrates how amplitude-amplitude coupling (AAC) can be 7 | computed with PyBispectra. 8 | """ 9 | 10 | # %% 11 | 12 | import numpy as np 13 | 14 | from pybispectra import AAC, compute_tfr, get_example_data_paths 15 | 16 | ############################################################################### 17 | # Background 18 | # ---------- 19 | # AAC quantifies the relationship between the amplitudes of a lower frequency 20 | # :math:`f_1` and a higher frequency :math:`f_2` within a single signal, or 21 | # across different signals. This is computed as the Pearson correlation 22 | # coefficient between the power of the time-frequency representation (TFR) of 23 | # the signals at :math:`f_1` and :math:`f_2` across time, averaged over epochs 24 | # :footcite:`Giehl2021` (i.e. it is not based in the bispectrum). 25 | 26 | ############################################################################### 27 | # Generating data and computing Fourier coefficients 28 | # -------------------------------------------------- 29 | # We will start by loading some data that we can compute AAC on, then compute 30 | # the amplitude TFR of the data (using Morlet wavelets in this example). 31 | 32 | # %% 33 | 34 | # load example data 35 | data = np.load(get_example_data_paths("sim_data_aac")) 36 | sampling_freq = 200 # Hz 37 | freqs = np.arange(5, 101, 1) 38 | 39 | # compute amplitude TFR 40 | tfr, freqs = compute_tfr( 41 | data=data, 42 | sampling_freq=sampling_freq, 43 | freqs=freqs, 44 | tfr_mode="multitaper", 45 | n_cycles=7, 46 | verbose=False, 47 | ) 48 | 49 | print( 50 | f"TFR of data: [{tfr.shape[0]} epochs x {tfr.shape[1]} channels x " 51 | f"{tfr.shape[2]} frequencies x {tfr.shape[3]} times]\nFreq. range: " 52 | f"{freqs[0]} - {freqs[-1]} Hz" 53 | ) 54 | 55 | ############################################################################### 56 | # As you can see, we have the amplitude TFR for 2 channels for 30 epochs, with 57 | # frequencies ranging from 1 to 100 Hz (1 Hz resolution), and 400 timepoints 58 | # per epoch. The amplitude TFR of the data will be used to compute AAC. 59 | # 60 | # Computing AAC 61 | # ------------- 62 | # To compute AAC, we start by initialising the :class:`~pybispectra.cfc.AAC` 63 | # class object with the amplitude TFR and the frequency information. To compute 64 | # AAC, we call the :meth:`~pybispectra.cfc.AAC.compute` method. By default, AAC 65 | # is computed between all channel and frequency combinations, however we can 66 | # also specify particular combinations of interest. 67 | # 68 | # Here, we specify the :attr:`~pybispectra.cfc.AAC.indices` to compute AAC on. 69 | # :attr:`~pybispectra.cfc.AAC.indices` is expected to be a tuple containing two 70 | # NumPy arrays for the indices of the seed and target channels, respectively. 71 | # The indices specified below mean that AAC will only be computed across 72 | # frequencies between each channel (i.e. 0 -> 1). By leaving the frequency 73 | # arguments :attr:`~pybispectra.cfc.AAC.f1s` and 74 | # :attr:`~pybispectra.cfc.AAC.f2s` blank, we will look at all possible 75 | # frequency combinations. 76 | 77 | # %% 78 | 79 | aac = AAC( 80 | data=tfr, freqs=freqs, sampling_freq=sampling_freq, verbose=False 81 | ) # initialise object 82 | aac.compute(indices=([1], [0])) # compute AAC 83 | aac_results = aac.results.get_results() # return results as array 84 | 85 | print( 86 | f"AAC results: [{aac_results.shape[0]} connection(s) x " 87 | f"{aac_results.shape[1]} f1s x {aac_results.shape[2]} f2s]" 88 | ) 89 | 90 | ############################################################################### 91 | # We can see that AAC has been computed for 1 connections (0 -> 1), and all 92 | # possible frequency combinations, averaged across our 30 epochs. Whilst there 93 | # are 10,000 such frequency combinations in our [100 x 100] matrix, AAC for the 94 | # lower triangular matrix is naturally a mirror of the upper triangular matrix. 95 | # Accordingly, the values for these redundant entries are left as ``numpy.nan`` 96 | # (see the plotted results below for a visual demonstration of this). 97 | 98 | ############################################################################### 99 | # Plotting AAC 100 | # ------------ 101 | # Let us now inspect the results. For this, we will plot the results for all 102 | # frequencies, although we could specify a subset of frequencies to inspect. 103 | 104 | # %% 105 | 106 | fig, axes = aac.results.plot() # f1s=np.arange(5, 16), f2s=np.arange(55, 66)) 107 | 108 | ############################################################################### 109 | # As you can see, values for the lower right triangle of each plot are missing, 110 | # corresponding to the frequency combinations where :math:`f_1` is greater than 111 | # :math:`f_2`, and hence where PPC is not computed. Note that the ``Figure`` 112 | # and ``Axes`` objects can also be returned for any desired manual adjustments 113 | # of the plots. 114 | 115 | # %% 116 | -------------------------------------------------------------------------------- /examples/compute_pac_control_harmonics.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================================================== 3 | Distinguishing harmonic from non-harmonic phase-amplitude coupling 4 | ================================================================== 5 | 6 | This example demonstrates how the tools of PyBispectra (phase-phase coupling 7 | (PPC), amplitude-amplitude coupling (AAC) and wave shape features) can be used 8 | to identify phase-amplitude coupling (PAC) resulting from frequency harmonics, 9 | as opposed to an interaction between distinct oscillations. 10 | """ 11 | 12 | # %% 13 | 14 | import numpy as np 15 | 16 | from pybispectra import PAC, PPC, SpatioSpectralFilter, WaveShape, compute_fft 17 | 18 | ############################################################################### 19 | # Background 20 | # ---------- 21 | # Typically, PAC is interpreted as an interaction between distinct oscillatory 22 | # signals, whereby the phase of a slower oscillation modulates the amplitude of 23 | # a faster oscillation, so-called non-harmonic PAC. However, PAC can also 24 | # occur as a result of higher-frequency harmonics of a lower frequency 25 | # oscillation of interest, termed harmonic PAC, which may be linked to how 26 | # non-sinusoidal this lower frequency oscillation is :footcite:`Giehl2021`. 27 | # 28 | # Crucially, the mechanisms by which harmonic and non-harmonic PAC occur are 29 | # thought to differ, which can have serious implications for how one interprets 30 | # the results of any PAC analysis. Therefore, it is important to determine 31 | # whether PAC is of the harmonic or non-harmonic variety. This can be done by 32 | # comparing PAC with PPC, AAC, as well as wave shape features 33 | # :footcite:`Giehl2021`. 34 | 35 | ############################################################################### 36 | # Computing the various metrics 37 | # ----------------------------- 38 | # We start by generating some data, computing its Fourier coefficients, and 39 | # then computing PAC, PPC, AAC, and wave shape features (see the respective 40 | # examples for detailed information on how to do this). 41 | -------------------------------------------------------------------------------- /examples/compute_ppc.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================ 3 | Compute phase-phase coupling 4 | ============================ 5 | 6 | This example demonstrates how phase-phase coupling (PPC) can be computed with 7 | PyBispectra. 8 | """ 9 | 10 | # %% 11 | 12 | import numpy as np 13 | 14 | from pybispectra import PPC, compute_fft, get_example_data_paths 15 | 16 | ############################################################################### 17 | # Background 18 | # ---------- 19 | # PPC quantifies the relationship between the phases of a lower frequency 20 | # :math:`f_1` and a higher frequency :math:`f_2` within a single signal, or 21 | # across different signals. 22 | # 23 | # The method available in PyBispectra can be thought of as a measure of 24 | # coherence between frequencies :footcite:`Giehl2021` (note that it is not 25 | # based on the bispectrum): 26 | # 27 | # :math:`\large PPC(\vec{x}_{f_1},\vec{y}_{f_2})=\LARGE \frac{|\langle 28 | # \vec{a}_x(f_1)\vec{a}_y(f_2) e^{i(\vec{\varphi}_x(f_1)\frac{f_2}{f_1}- 29 | # \vec{\varphi}_y(f_2))} \rangle|}{\langle \vec{a}_x(f_1)\vec{a}_y(f_2) 30 | # \rangle}`, 31 | # 32 | # where :math:`\vec{a}(f)` and :math:`\vec{\varphi}(f)` are the amplitude and 33 | # phase of a signal at a given frequency, respectively, and the angled brackets 34 | # represent the average over epochs. The phase of :math:`f_1` is accelerated to 35 | # match that of :math:`f_2` by scaling the phase by a factor of 36 | # :math:`\frac{f_2}{f_1}`. PPC values for this measure lie in the range 37 | # :math:`[0, 1]`, with 0 representing a random phase relationship, and 1 38 | # representing perfect phase coupling. 39 | 40 | ############################################################################### 41 | # Generating data and computing Fourier coefficients 42 | # -------------------------------------------------- 43 | # We will start by loading some data that we can compute PPC on, then compute 44 | # the Fourier coefficients of the data. 45 | 46 | # %% 47 | 48 | data_dir = "data" 49 | 50 | # generate data 51 | data = np.load(get_example_data_paths("sim_data_ppc")) 52 | sampling_freq = 500 # Hz 53 | 54 | # compute Fourier coeffs. 55 | fft_coeffs, freqs = compute_fft( 56 | data=data, 57 | sampling_freq=sampling_freq, 58 | n_points=sampling_freq, 59 | verbose=False, 60 | ) 61 | 62 | print( 63 | f"FFT coeffs.: [{fft_coeffs.shape[0]} epochs x {fft_coeffs.shape[1]} " 64 | f"channels x {fft_coeffs.shape[2]} frequencies]\n" 65 | f"Freq. range: {freqs[0]} - {freqs[-1]} Hz" 66 | ) 67 | 68 | ############################################################################### 69 | # As you can see, we have FFT coefficients for 2 channels across 30 epochs, 70 | # with 101 frequencies ranging from 0 to 50 Hz with a frequency resolution of 71 | # 0.5 Hz. We will use these coefficients to compute PPC. 72 | # 73 | # Computing PPC 74 | # ------------- 75 | # To compute PPC, we start by initialising the :class:`~pybispectra.cfc.PPC` 76 | # class object with the FFT coefficients and the frequency information. To 77 | # compute PPC, we call the :meth:`~pybispectra.cfc.PPC.compute` method. By 78 | # default, PPC is computed between all channel and frequency combinations, 79 | # however we can also specify particular combinations of interest. 80 | # 81 | # Here, we specify the :attr:`~pybispectra.cfc.PPC.indices` to compute PPC on. 82 | # :attr:`~pybispectra.cfc.PPC.indices` is expected to be a tuple containing two 83 | # NumPy arrays for the indices of the seed and target channels, respectively. 84 | # The indices specified below mean that PPC will only be computed across 85 | # frequencies between channels (i.e. 0 -> 1). By leaving the frequency 86 | # arguments :attr:`~pybispectra.cfc.PPC.f1s` and 87 | # :attr:`~pybispectra.cfc.PPC.f2s` blank, we will look at all possible 88 | # frequency combinations. 89 | 90 | # %% 91 | 92 | ppc = PPC( 93 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 94 | ) # initialise object 95 | ppc.compute(indices=([0], [1])) # compute PPC 96 | ppc_results = ppc.results.get_results() # return results as array 97 | 98 | print( 99 | f"PPC results: [{ppc_results.shape[0]} connection(s) x " 100 | f"{ppc_results.shape[1]} f1 x {ppc_results.shape[2]} f2]" 101 | ) 102 | 103 | ############################################################################### 104 | # We can see that PPC has been computed for one connection (0 -> 1), and all 105 | # possible frequency combinations, averaged across our 30 epochs. Whilst there 106 | # are 10,000 such frequency combinations in our [100 x 100] matrices, PPC for 107 | # those entries where :math:`f1` would be higher than :math:`f2` is not 108 | # computed, in which case the values are ``numpy.nan``. 109 | 110 | ############################################################################### 111 | # Plotting PPC 112 | # ------------ 113 | # Let us now inspect the results. For this, we will plot the results for all 114 | # frequencies, although we could specify a subset of frequencies to inspect. 115 | 116 | # %% 117 | 118 | ppc.results.plot(f1s=np.arange(5, 16), f2s=np.arange(55, 66)) 119 | 120 | ############################################################################### 121 | # As you can see, values for the lower right triangle of each plot are missing, 122 | # corresponding to the frequency combinations where :math:`f_1` is greater than 123 | # :math:`f_2`, and hence where PPC is not computed. Note that the ``Figure`` 124 | # and ``Axes`` objects can also be returned for any desired manual adjustments 125 | # of the plots. 126 | 127 | ############################################################################### 128 | # References 129 | # ----------------------------------------------------------------------------- 130 | # .. footbibliography:: 131 | 132 | # %% 133 | -------------------------------------------------------------------------------- /examples/plot_compute_general.py: -------------------------------------------------------------------------------- 1 | """ 2 | ==================================== 3 | Compute the bispectrum and threenorm 4 | ==================================== 5 | 6 | This example demonstrates how the bispectrum and threenorm can be computed. 7 | """ 8 | 9 | # Author(s): 10 | # Thomas S. Binns | github.com/tsbinns 11 | 12 | # sphinx_gallery_multi_image = "single" 13 | 14 | # %% 15 | 16 | import numpy as np 17 | 18 | from pybispectra import ( 19 | PAC, 20 | Bispectrum, 21 | ResultsCFC, 22 | Threenorm, 23 | compute_fft, 24 | get_example_data_paths, 25 | ) 26 | 27 | ######################################################################################## 28 | # Background 29 | # ---------- 30 | # The bispectrum can be used for various types of signal analysis, including 31 | # phase-amplitude coupling :footcite:`Kovach2018`, non-sinusoidal waveshape 32 | # :footcite:`Bartz2019`, and time delay estimation :footcite:`Nikias1988`. 33 | # 34 | # Although PyBispectra offers dedicated tools for computing these metrics, this involves 35 | # taking information from specific combinations of channels (see: 36 | # :doc:`plot_compute_pac`; :doc:`plot_compute_waveshape`; and :doc:`plot_compute_tde`). 37 | # 38 | # For your analyses, you may wish to specify the combination of channels freely, a 39 | # feature offered by :class:`~pybispectra.general.Bispectrum` (and the equivalent 40 | # :class:`~pybispectra.general.Threenorm` for normalisation :footcite:`Shahbazi2014`). 41 | # 42 | # In this example, we will demonstrate how these classes can be used to freely compute 43 | # the bispectrum and threenorm, and show by comparing to the dedicated classes that the 44 | # same information is captured in these general tools. 45 | # 46 | # Here, we focus on phase-amplitude coupling (PAC). The bispectrum has the general form 47 | # 48 | # :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2)\textbf{n}^* 49 | # (f_2+f_1)>` , 50 | # 51 | # where :math:`kmn` is a combination of signals with Fourier coefficients 52 | # :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; and 53 | # :math:`<>` represents the average value over epochs. The computation of PAC follows 54 | # from this :footcite:`Kovach2018` 55 | # 56 | # :math:`\textbf{B}_{xyy}(f_1,f_2)=<\textbf{x}(f_1)\textbf{y}(f_2)\textbf{y}^* 57 | # (f_2+f_1)>` , 58 | # 59 | # :math:`\textrm{PAC}(\textbf{x}_{f_1},\textbf{y}_{f_2})=|\textbf{B}_{xyy}(f_1,f_2)|` . 60 | 61 | ######################################################################################## 62 | # Computing PAC with the dedicated class 63 | # --------------------------------------- 64 | # We start by computing PAC using the dedicated :class:`~pybispectra.cfc.PAC` class, 65 | # which we will take as our reference for results. 66 | # 67 | # The data we load here is simulated data containing coupling between the 10 Hz phase of 68 | # one signal (index 0) and the 60 Hz amplitude of another (index 1). 69 | 70 | # %% 71 | 72 | # load simulated data 73 | data = np.load(get_example_data_paths("sim_data_pac_bivariate")) 74 | sampling_freq = 200 # sampling frequency in Hz 75 | 76 | # compute Fourier coeffs. 77 | fft_coeffs, freqs = compute_fft( 78 | data=data, sampling_freq=sampling_freq, n_points=sampling_freq, verbose=False 79 | ) 80 | 81 | # compute & plot PAC 82 | pac = PAC( 83 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 84 | ) # initialise object 85 | pac.compute(indices=((0,), (1,))) # compute PAC 86 | pac_results = pac.results.get_results(copy=False) # extract results array 87 | pac.results.plot(f1s=(5, 15), f2s=(55, 65)) # plot PAC 88 | 89 | ######################################################################################## 90 | # As expected, we observe 10-60 Hz PAC with channel index 0 as our seed (:math:`x`; 10 91 | # Hz phase) and channel index 1 as our target (:math:`y`; 60 Hz amplitude). 92 | # 93 | # With the dedicated :class:`~pybispectra.cfc.PAC` class, the seeds and targets 94 | # are automatically assigned to the appropriate :math:`kmn` combination when 95 | # computing the bispectrum, in this case :math:`xyy`. 96 | 97 | ######################################################################################## 98 | # Computing PAC with the general class 99 | # ------------------------------------- 100 | # However, an equivalent result can be obtained using the 101 | # :class:`~pybispectra.general.Bispectrum` class and specifying the combination of 102 | # :math:`kmn=xyy` manually. 103 | 104 | # %% 105 | 106 | # compute the bispectrum where kmn = xyy & plot results 107 | bs = Bispectrum( 108 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 109 | ) # initialise object 110 | bs.compute(indices=((0,), (1,), (1,))) # kmn = xyy 111 | bs.results.plot(f1s=(5, 15), f2s=(55, 65)) # plot bispectrum 112 | 113 | ######################################################################################## 114 | # Since the bispectrum is complex-valued, we must take the absolute value to compare to 115 | # PAC. Additionally, we can package the results into the dedicated 116 | # :class:`~pybispectra.utils.ResultsCFC` class for cross-frequency coupling results. 117 | # 118 | # Plotting the results alongside each other shows they are identical. 119 | 120 | # %% 121 | 122 | # package general class results 123 | bs_pac = ResultsCFC( 124 | data=np.abs(bs.results.get_results()), 125 | indices=((0,), (1,)), 126 | f1s=bs.results.f1s, 127 | f2s=bs.results.f2s, 128 | name="PAC | Bispectrum (manual)", 129 | ) 130 | bs_pac_results = bs_pac.get_results(copy=False) 131 | 132 | # compare general and dedicated class results 133 | if np.all( 134 | bs_pac_results[~np.isnan(bs_pac_results)] == pac_results[~np.isnan(pac_results)] 135 | ): 136 | print("Results are identical!") 137 | else: 138 | raise ValueError("Results are not identical!") 139 | 140 | pac.results.plot(f1s=(5, 15), f2s=(55, 65)) # dedicated class 141 | bs_pac.plot(f1s=(5, 15), f2s=(55, 65)) # general class 142 | 143 | ######################################################################################## 144 | # Bispectrum normalisation 145 | # ------------------------ 146 | # The bispectrum can also be normalised to the bicoherence, 147 | # :math:`\boldsymbol{\mathcal{B}}`, using the threenorm, :math:`\textbf{N}`, 148 | # :footcite:`Shahbazi2014` 149 | # 150 | # :math:`\textbf{N}_{xyy}(f_1,f_2)=(<|\textbf{x}(f_1)|^3><|\textbf{y}(f_2)|^3> 151 | # <|\textbf{y}(f_2+f_1)|^3>)^{\frac{1}{3}}` , 152 | # 153 | # :math:`\boldsymbol{\mathcal{B}}_{xyy}(f_1,f_2)=\Large\frac{\textbf{B}_{xyy}(f_1,f_2)} 154 | # {\textbf{N}_{xyy}(f_1,f_2)}` , 155 | # 156 | # where the resulting values lie in the range :math:`[-1, 1]`, controlling for the 157 | # amplitude of the signals. 158 | # 159 | # While the dedicated :class:`~pybispectra.cfc.PAC` class has an option for performing 160 | # this normalisation, we can also compute the threenorm separately using the 161 | # :class:`~pybispectra.general.Threenorm` class and apply the normalisation manually. 162 | # 163 | # Again, we specify the :math:`kmn` channel combination as :math:`xyy` for our seed 164 | # (:math:`x`) and target (:math:`y`). 165 | 166 | # %% 167 | 168 | # compute the threenorm 169 | norm = Threenorm( 170 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 171 | ) # initialise object 172 | norm.compute(indices=((0,), (1,), (1,))) # kmn = xyy 173 | 174 | # normalise the bispectrum 175 | bicoh = np.abs( 176 | bs.results.get_results(copy=False) / norm.results.get_results(copy=False) 177 | ) 178 | 179 | # package bicoherence results 180 | bicoh_pac = ResultsCFC( 181 | data=bicoh, 182 | indices=((0,), (1,)), 183 | f1s=bs.results.f1s, 184 | f2s=bs.results.f2s, 185 | name="PAC | Bicoherence (manual)", 186 | ) 187 | bicoh_pac_results = bicoh_pac.get_results(copy=False) 188 | 189 | ######################################################################################## 190 | # Comparing these bicoherence values with those obtained from the dedicated 191 | # :class:`~pybispectra.cfc.PAC` class, we see that both approaches produce identical 192 | # results. 193 | 194 | # %% 195 | 196 | # compute bicoherence PAC with dedicated class 197 | pac_norm = PAC( 198 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 199 | ) # initialise object 200 | pac_norm.compute(indices=((0,), (1,)), norm=True) # compute PAC 201 | pac_norm_results = pac_norm.results.get_results(copy=False) # extract results array 202 | 203 | # compare general and dedicated class results 204 | if np.all( 205 | bicoh_pac_results[~np.isnan(bicoh_pac_results)] 206 | == pac_norm_results[~np.isnan(pac_norm_results)] 207 | ): 208 | print("Results are identical!") 209 | else: 210 | raise ValueError("Results are not identical!") 211 | 212 | pac_norm.results.plot(f1s=(5, 15), f2s=(55, 65)) # dedicated class 213 | bicoh_pac.plot(f1s=(5, 15), f2s=(55, 65)) # general class 214 | 215 | ######################################################################################## 216 | # Manual computation of waveshape results and antisymmetrisation 217 | # -------------------------------------------------------------- 218 | # The :class:`~pybispectra.general.Bispectrum` and 219 | # :class:`~pybispectra.general.Threenorm` classes can also be used to compute 220 | # non-sinusoidal waveshape results (equivalent to 221 | # :class:`~pybispectra.waveshape.WaveShape`) and antisymmetrised bispectra (e.g. as in 222 | # :class:`~pybispectra.cfc.PAC`) by following the equations listed in the respective 223 | # documentation and publications. 224 | 225 | ######################################################################################## 226 | # Conclusion 227 | # ---------- 228 | # Altogether, the :class:`~pybispectra.general.Bispectrum` and 229 | # :class:`~pybispectra.general.Threenorm` classes provide a flexible way to compute 230 | # bispectra and normalisation terms with custom :math:`kmn` channel combinations. 231 | 232 | ######################################################################################## 233 | # References 234 | # ---------- 235 | # .. footbibliography:: 236 | 237 | # %% 238 | -------------------------------------------------------------------------------- /examples/plot_compute_pac.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================ 3 | Compute phase-amplitude coupling 4 | ================================ 5 | 6 | This example demonstrates how phase-amplitude coupling (PAC) can be computed with 7 | PyBispectra. 8 | """ 9 | 10 | # Author(s): 11 | # Thomas S. Binns | github.com/tsbinns 12 | 13 | # %% 14 | 15 | import numpy as np 16 | 17 | from pybispectra import PAC, compute_fft, get_example_data_paths 18 | 19 | ######################################################################################## 20 | # Background 21 | # ---------- 22 | # PAC quantifies the relationship between the phases of a lower frequency :math:`f_1` 23 | # and the amplitude of a higher frequency :math:`f_2` within a single signal, 24 | # :math:`\textbf{x}`, or across different signals, :math:`\textbf{x}` and 25 | # :math:`\textbf{y}`. 26 | # 27 | # The method available in PyBispectra is based on the bispectrum, :math:`\textbf{B}`. 28 | # The bispectrum has the general form 29 | # 30 | # :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2)\textbf{n}^* 31 | # (f_2+f_1)>` , 32 | # 33 | # where :math:`kmn` is a combination of signals with Fourier coefficients 34 | # :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; and 35 | # :math:`<>` represents the average value over epochs. The computation of PAC follows 36 | # from this :footcite:`Kovach2018` 37 | # 38 | # :math:`\textbf{B}_{xyy}(f_1,f_2)=<\textbf{x}(f_1)\textbf{y}(f_2)\textbf{y}^* 39 | # (f_2+f_1)>` , 40 | # 41 | # :math:`\textrm{PAC}(\textbf{x}_{f_1},\textbf{y}_{f_2})=|\textbf{B}_{xyy}(f_1,f_2)|` . 42 | # 43 | # The bispectrum can be normalised to the bicoherence, :math:`\boldsymbol{\mathcal{B}}`, 44 | # using the threenorm, :math:`\textbf{N}`, :footcite:`Shahbazi2014` 45 | # 46 | # :math:`\textbf{N}_{xyy}(f_1,f_2)=(<|\textbf{x}(f_1)|^3><|\textbf{y}(f_2)|^3> 47 | # <|\textbf{y}(f_2+f_1)|^3>)^{\frac{1}{3}}` , 48 | # 49 | # :math:`\boldsymbol{\mathcal{B}}_{xyy}(f_1,f_2)=\Large\frac{\textbf{B}_{xyy}(f_1,f_2)} 50 | # {\textbf{N}_{xyy}(f_1,f_2)}` , 51 | # 52 | # :math:`\textrm{PAC}_{\textrm{norm}}(\textbf{x}_{f_1},\textbf{y}_{f_2})=| 53 | # \boldsymbol{\mathcal{B}}_{xyy}(f_1,f_2)|` , 54 | # 55 | # where the resulting values lie in the range :math:`[0, 1]`. Furthermore, PAC can be 56 | # antisymmetrised by subtracting the results from those found using the transposed 57 | # bispectrum, :math:`\textbf{B}_{xyx}`, :footcite:`Chella2014` 58 | # 59 | # :math:`\textrm{PAC}_{\textrm{antisym}}(\textbf{x}_{f_1},\textbf{y}_{f_2})=| 60 | # \textbf{B}_{xyy}-\textbf{B}_{xyx}|` . 61 | # 62 | # In the context of analysing PAC between two signals, antisymmetrisation allows you to 63 | # correct for spurious estimates of coupling arising from interactions within the 64 | # signals themselves in instances of source mixing, providing a more robust connectivity 65 | # metric :footcite:`PellegriniPreprint`. The same principle applies for the 66 | # antisymmetrisation of the bicoherence. 67 | 68 | ######################################################################################## 69 | # Loading data and computing Fourier coefficients 70 | # ----------------------------------------------- 71 | # We will start by loading some simulated data containing coupling between the 10 Hz 72 | # phase of one signal and the 60 Hz amplitude of another. We will then compute the 73 | # Fourier coefficients of the data, which will be used to compute PAC. 74 | 75 | # %% 76 | 77 | # load simulated data 78 | data = np.load(get_example_data_paths("sim_data_pac_bivariate")) 79 | sampling_freq = 200 # sampling frequency in Hz 80 | 81 | # compute Fourier coeffs. 82 | fft_coeffs, freqs = compute_fft( 83 | data=data, sampling_freq=sampling_freq, n_points=sampling_freq, verbose=False 84 | ) 85 | 86 | print( 87 | f"FFT coeffs.: [{fft_coeffs.shape[0]} epochs x {fft_coeffs.shape[1]} channels x " 88 | f"{fft_coeffs.shape[2]} frequencies]\nFreq. range: {freqs[0]} - {freqs[-1]} Hz" 89 | ) 90 | 91 | ######################################################################################## 92 | # As you can see, we have Fourier coefficients for 2 channels across 30 epochs, with 101 93 | # frequencies ranging from 0 to 100 Hz with a frequency resolution of 1 Hz. We will use 94 | # these coefficients to compute PAC. 95 | # 96 | # Computing PAC 97 | # ------------- 98 | # To compute PAC, we start by initialising the :class:`~pybispectra.cfc.PAC` class 99 | # object with the Fourier coefficients and the frequency information. To compute PAC, we 100 | # call the :meth:`~pybispectra.cfc.PAC.compute` method. By default, PAC is computed 101 | # between all channel and frequency combinations, however we can also specify particular 102 | # combinations of interest. 103 | # 104 | # Here, we specify the ``indices`` to compute PAC on. ``indices`` is expected to be a 105 | # tuple containing two lists for the indices of the seed and target channels, 106 | # respectively. The indices specified below mean that PAC will only be computed across 107 | # frequencies between the channels (i.e. 0 → 1). By leaving the frequency arguments 108 | # ``f1s`` and ``f2s`` blank, we will look at all possible frequency combinations. 109 | 110 | # %% 111 | 112 | pac = PAC( 113 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 114 | ) # initialise object 115 | pac.compute(indices=((0,), (1,))) # compute PAC 116 | 117 | pac_results = pac.results.get_results(copy=False) # return results as array 118 | 119 | print( 120 | f"PAC results: [{pac_results.shape[0]} connection x {pac_results.shape[1]} f1s x " 121 | f"{pac_results.shape[2]} f2s]" 122 | ) 123 | 124 | ######################################################################################## 125 | # We can see that PAC has been computed for one connection (0 → 1), and all possible 126 | # frequency combinations, averaged across our 30 epochs. Whilst there are > 10,000 such 127 | # frequency combinations in our [101 x 101] matrix, PAC for those entries where 128 | # :math:`f_1` would be higher than :math:`f_2`, as well as where :math:`f_2 + f_1` 129 | # exceeds the frequency bounds of our data, cannot be computed. In such cases, the 130 | # values are :obj:`numpy.nan`. 131 | 132 | ######################################################################################## 133 | # Plotting PAC 134 | # ------------ 135 | # Let us now inspect the results. Here, we specify a subset of frequencies to inspect 136 | # around the simulated interaction. If we wished, we could also plot all frequencies. 137 | # Note that the :class:`~matplotlib.figure.Figure` and :class:`~matplotlib.axes.Axes` 138 | # objects can also be returned for any desired manual adjustments of the plots. In this 139 | # simulated data example, we can see that the bispectrum indeed identifies the 140 | # occurrence of 10-60 Hz PAC between our seed and target channel. 141 | 142 | # %% 143 | 144 | fig, axes = pac.results.plot(f1s=(5, 15), f2s=(55, 65)) 145 | 146 | ######################################################################################## 147 | # Antisymmetrisation for across-signal PAC 148 | # ---------------------------------------- 149 | # In this simulated data example, interactions are only present between the signals, and 150 | # not within the signals themselves. This, however, is not always the case, and 151 | # estimates of across-site PAC can be corrupted by coupling interactions within each 152 | # signal in the presence of source mixing. To combat this, we can employ 153 | # antisymmetrisation :footcite:`Chella2014`. The example below shows some such simulated 154 | # data consisting of two independent sources, with 10-60 Hz PAC within each source (top 155 | # two plots), as well as a mixing of the underlying sources to produce 10-60 Hz PAC 156 | # between the two signals (bottom left plot). When appyling antisymmetrisation, however, 157 | # we see that the spurious across-signal PAC arising from the source mixing is 158 | # suppressed (bottom right plot). Antisymmetrisation is therefore a useful technique to 159 | # differentiate genuine across-site coupling from spurious coupling arising from the 160 | # within-site interactions of source-mixed signals. 161 | 162 | # %% 163 | 164 | # load real data 165 | data = np.load(get_example_data_paths("sim_data_pac_univariate")) 166 | sampling_freq = 200 167 | 168 | # compute Fourier coeffs. 169 | fft_coeffs, freqs = compute_fft( 170 | data=data, sampling_freq=sampling_freq, n_points=sampling_freq, verbose=False 171 | ) 172 | 173 | # compute PAC 174 | pac = PAC(data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False) 175 | pac.compute( 176 | indices=((0, 1, 0), (0, 1, 1)), f1s=(5, 15), f2s=(55, 65), antisym=(False, True) 177 | ) 178 | pac_standard, pac_antisym = pac.results 179 | 180 | pac_standard_array = pac_standard.get_results(copy=False) 181 | pac_antisym_array = pac_antisym.get_results(copy=False) 182 | vmin = np.min((np.nanmin(pac_standard_array), np.nanmin(pac_antisym_array))) 183 | vmax = np.max((np.nanmax(pac_standard_array), np.nanmax(pac_antisym_array))) 184 | 185 | # plot unsymmetrised PAC within & between signals 186 | fig_standard, axes_standard = pac_standard.plot( 187 | f1s=(5, 15), f2s=(55, 65), cbar_range=(vmin, vmax) 188 | ) 189 | 190 | # plot antisymmetrised PAC between signals 191 | fig_antisym, axes_antisym = pac_antisym.plot( 192 | nodes=(2,), f1s=(5, 15), f2s=(55, 65), cbar_range=(vmin, vmax) 193 | ) 194 | 195 | ######################################################################################## 196 | # References 197 | # ---------- 198 | # .. footbibliography:: 199 | 200 | # %% 201 | -------------------------------------------------------------------------------- /examples/plot_compute_tde.py: -------------------------------------------------------------------------------- 1 | """ 2 | ============================ 3 | Compute time delay estimates 4 | ============================ 5 | 6 | This example demonstrates how time delay estimation (TDE) can be computed with 7 | PyBispectra. 8 | """ 9 | 10 | # Author(s): 11 | # Thomas S. Binns | github.com/tsbinns 12 | 13 | # sphinx_gallery_multi_image = "single" 14 | 15 | # %% 16 | 17 | import numpy as np 18 | 19 | from pybispectra import TDE, compute_fft, get_example_data_paths 20 | 21 | ############################################################################### 22 | # Background 23 | # ---------- 24 | # A common feature of interest in signal analyses is determining the flow of information 25 | # between two signals, in terms of both the direction and the particular time lag 26 | # between them. 27 | # 28 | # The method available in PyBispectra is based on the bispectrum, :math:`\textbf{B}`. 29 | # The bispectrum has the general form 30 | # 31 | # :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2)\textbf{n}^* 32 | # (f_2+f_1)>` , 33 | # 34 | # where :math:`kmn` is a combination of signals with Fourier coefficients 35 | # :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; 36 | # :math:`f_1` and :math:`f_2` correspond to a lower and higher frequency, respectively; 37 | # and :math:`<>` represents the average value over epochs. When computing time delays, 38 | # information from :math:`\textbf{n}` is taken not only from the positive frequencies, 39 | # but also the negative frequencies. 40 | # 41 | # Four methods exist for computing TDE based on the bispectrum :footcite:`Nikias1988`. 42 | # The fundamental equation is as follows 43 | # 44 | # :math:`\textrm{TDE}_{xy}(\tau)=\int_{-\pi}^{+\pi}\int_{-\pi}^{+\pi}\textbf{I} 45 | # (\textbf{x}_{f_1},\textbf{y}_{f_2})e^{-if_1\tau}df_1df_2` , 46 | # 47 | # where :math:`\textbf{I}` varies depending on the method; and :math:`\tau` is a given 48 | # time delay. Phase information of the signals is extracted from the bispectrum in two 49 | # variants used by the different methods: 50 | # 51 | # :math:`\boldsymbol{\phi}(\textbf{x}_{f_1},\textbf{y}_{f_2})=\boldsymbol{\varphi}_ 52 | # {\textbf{B}_{xyx}} (f_1,f_2)-\boldsymbol{\varphi}_{\textbf{B}_{xxx}}(f_1,f_2)` ; 53 | # 54 | # :math:`\boldsymbol{\phi}'(\textbf{x}_{f_1},\textbf{y}_{f_2})=\boldsymbol{\varphi}_ 55 | # {\textbf{B}_{xyx}}(f_1,f_2)-\frac{1}{2}(\boldsymbol{\varphi}_{\textbf{B}_{xxx}} 56 | # (f_1,f_2) + \boldsymbol{\varphi}_{\textbf{B}_{yyy}}(f_1,f_2))` . 57 | # 58 | # **Method I**: 59 | # :math:`\textbf{I}(\textbf{x}_{f_1},\textbf{y}_{f_2})=e^{i\boldsymbol{\phi}(\textbf{x} 60 | # _{f_1},\textbf{y}_{f_2})}` 61 | # 62 | # **Method II**: 63 | # :math:`\textbf{I}(\textbf{x}_{f_1},\textbf{y}_{f_2})=e^{i\boldsymbol{\phi}' 64 | # (\textbf{x}_{f_1},\textbf{y}_{f_2})}` 65 | # 66 | # **Method III**: 67 | # :math:`\textbf{I}(\textbf{x}_{f_1},\textbf{y}_{f_2})=\Large \frac{\textbf{B}_{xyx} 68 | # (f_1,f_2)}{\textbf{B}_{xxx}(f_1,f_2)}` 69 | # 70 | # **Method IV**: 71 | # :math:`\textbf{I}(\textbf{x}_{f_1},\textbf{y}_{f_2})=\Large \frac{|\textbf{B}_{xyx} 72 | # (f_1,f_2)|e^{i\boldsymbol{\phi}'(\textbf{x}_{f_1},\textbf{y}_{f_2})}}{\sqrt{ 73 | # |\textbf{B}_{xxx}(f_1,f_2)||\textbf{B}_{yyy}(f_1,f_2)|}}` 74 | # 75 | # where :math:`\boldsymbol{\varphi}_{\textbf{B}}` is the phase of the bispectrum. All 76 | # four methods aim to capture the phase difference between :math:`\textbf{x}` and 77 | # :math:`\textbf{y}`. Method I involves the extraction of phase spectrum periodicity and 78 | # monotony, with method III involving an additional amplitude weighting from the 79 | # bispectrum of :math:`\textbf{x}`. Method II instead relies on a combination of phase 80 | # spectra of the different frequency components, with method IV containing an additional 81 | # amplitude weighting from the bispectrum of :math:`\textbf{x}` and :math:`\textbf{y}`. 82 | # No single method is superior to another. If time delay estimates for only certain 83 | # frequencies are desired, this information can be extracted from the matrix 84 | # :math:`\textbf{I}`. 85 | # 86 | # As a result of volume conduction artefacts (i.e. a common underlying signal that 87 | # propagates instantaneously to :math:`\textbf{x}` and :math:`\textbf{y}`), time delay 88 | # estimates can become contaminated, resulting in spurious estimates of :math:`\tau=0`. 89 | # Thankfully, antisymmetrisation of the bispectrum can be used to address these mixing 90 | # artefacts :footcite:`Chella2014`, which is implemented here as the replacement of 91 | # :math:`\textbf{B}_{xyx}` with :math:`(\textbf{B}_{xyx} - \textbf{B}_{yxx})` in the 92 | # above equations :footcite:`JurharPreprint`. 93 | 94 | ######################################################################################## 95 | # Loading data and computing Fourier coefficients 96 | # ----------------------------------------------- 97 | # We will start by loading some simulated data containing a time delay of 250 ms between 98 | # two signals, where :math:`\textbf{y}` is a delayed version of :math:`\textbf{x}`. We 99 | # will then compute the Fourier coefficients of the data, which will be used to compute 100 | # the time delay. 101 | # 102 | # We specify ``n_points`` to be twice the number of time points in the data, plus one. 103 | # This ensures that the time delay estimate spectrum is returned for the whole epoch 104 | # length (in both positive and negative delay directions, i.e. where :math:`\textbf{x}` 105 | # drives :math:`\textbf{y}`, and :math:`\textbf{y}` drives :math:`\textbf{x}`) with the 106 | # same temporal resolution as the original data. Using a number of points smaller than 107 | # this will reduce the window in which time delay estimates can be computed below the 108 | # epoch length, whereas using a higher number of points will only artificially increase 109 | # this window length. Accordingly ``n_points=2 * n_times + 1`` is recommended. 110 | # 111 | # In this example, our data consists of 30 epochs of 200 timepoints each, which with a 112 | # 200 Hz sampling frequency corresponds to 1 second of data per epoch (one timepoint 113 | # every 5 ms). Note that the temporal resolution of the time delay estimates can be 114 | # increased by increasing the sampling rate of the data. 115 | 116 | # %% 117 | 118 | # load simulated data 119 | data = np.load(get_example_data_paths("sim_data_tde_independent_noise")) 120 | sampling_freq = 200 # sampling frequency in Hz 121 | n_times = data.shape[2] # number of timepoints in the data 122 | 123 | # compute Fourier coeffs. 124 | fft_coeffs, freqs = compute_fft( 125 | data=data, 126 | sampling_freq=sampling_freq, 127 | n_points=2 * n_times + 1, # recommended for time delay estimation 128 | window="hamming", 129 | verbose=False, 130 | ) 131 | 132 | print( 133 | f"FFT coeffs.: [{fft_coeffs.shape[0]} epochs x {fft_coeffs.shape[1]} channels x " 134 | f"{fft_coeffs.shape[2]} frequencies]\nFreq. range: {freqs[0]:.0f} - " 135 | f"{freqs[1]:.0f} Hz" 136 | ) 137 | 138 | ######################################################################################## 139 | # Computing time delays 140 | # --------------------- 141 | # To compute time delays, we start by initialising the :class:`~pybispectra.tde.TDE` 142 | # class object with the Fourier coefficients and the frequency information and call the 143 | # :meth:`~pybispectra.tde.TDE.compute` method. For simplicity, we will focus on TDE 144 | # using method I. To demonstrate that TDE can show the directionality of information 145 | # flow as well as the particular time lag, we will compute TDE from signals 0 → 1 (the 146 | # genuine direction of information flow where the time delay should have a positive 147 | # value) and from signals 1 → 0 (the reverse direction of information flow where the 148 | # time delay should have a negative value). 149 | # 150 | # Using the ``fmin`` and ``fmax`` arguments, time delay information for frequency bands 151 | # of interest can be isolated by specifying the lower and higher frequencies of 152 | # interest. Here, we will compute time delays for all frequencies. Performing time delay 153 | # estimation on frequency bands is discussed in the following example: 154 | # :doc:`plot_compute_tde_fbands`. 155 | 156 | # %% 157 | 158 | tde = TDE( 159 | data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False 160 | ) # initialise object 161 | tde.compute(indices=((0, 1), (1, 0)), method=1) # compute TDE 162 | tde_times = tde.results.times 163 | 164 | tde_results = tde.results.get_results(copy=False) # return results as array 165 | 166 | print( 167 | f"TDE results: [{tde_results.shape[0]} connections x {tde_results.shape[1]} " 168 | f"frequency bands x {tde_results.shape[2]} times]" 169 | ) 170 | 171 | ######################################################################################## 172 | # We can see that time delays have been computed for two connections (0 → 1 and 1 → 0) 173 | # and one frequency band (0-100 Hz), with 401 timepoints, and averaged across our 30 174 | # epochs. The timepoints correspond to time delay estimates for every 5 ms (i.e. the 175 | # sampling rate of the data), ranging from -1000 ms to +1000 ms. 176 | 177 | ######################################################################################## 178 | # Plotting time delays 179 | # -------------------- 180 | # Let us now inspect the results. Note that the :class:`~matplotlib.figure.Figure` and 181 | # :class:`~matplotlib.axes.Axes` objects can be returned for any desired manual 182 | # adjustments of the plots. When handling TDE results, we take the time at which the 183 | # strength of the estimate is maximal as our :math:`\tau`. Doing so, we indeed see that 184 | # the time delay is identified as 250 ms. Furthermore, comparing the two connections, we 185 | # see that the direction of information flow is also correctly identified, with the 186 | # result for connection 0 → 1 being positive and the result for connection 1 → 0 being 187 | # negative (i.e. information flow from signal 0 to signal 1). Here, we manually find 188 | # :math:`\tau` based on the maximal value of the TDE results, however this information 189 | # is also precomputed and can be accessed via the ``tau`` attribute. 190 | # 191 | # Taking the time at which the estimate is maximal as our :math:`\tau` is one approach 192 | # to use when estimating time delays. For interest, however, we can also plot the full 193 | # time course of the TDE results. In this low noise example, we see that there is a 194 | # clear peak in time delay estimates at 250 ms. Depending on the nature and degree of 195 | # noise in the data, the time delay spectra may be less clear, and you may find 196 | # advantages using the other TDE method variants. 197 | 198 | # %% 199 | 200 | print( 201 | "The estimated time delay between signals 0 and 1 is " 202 | f"{tde_times[tde_results[0].argmax()]:.0f} ms.\n" 203 | "The estimated time delay between signals 1 and 0 is " 204 | f"{tde_times[tde_results[1].argmax()]:.0f} ms." 205 | ) 206 | 207 | for con_i in range(tde_results.shape[0]): 208 | assert tde_times[tde_results[con_i].argmax()] == tde.results.tau[con_i] 209 | 210 | fig, axes = tde.results.plot() 211 | 212 | ######################################################################################## 213 | # Handling artefacts from volume conduction 214 | # ----------------------------------------- 215 | # In the example above, we looked at simulated data from two signals with independent 216 | # noise sources, giving a clean TDE result at the true delay. In real data, however, 217 | # sources of noise in the data are often correlated across signals, such as due to 218 | # volume conduction, resulting in a bias of TDE methods towards zero time delay. To 219 | # mitigate such bias, we can employ antisymmetrisation of the bispectrum 220 | # :footcite:`JurharPreprint`. To demonstrate this, we will now look at simulated data 221 | # (still with a 250 ms delay) with the addition of a common underlying noise source 222 | # between the signals. 223 | # 224 | # As you can see, the TDE result without antisymmetrisation consists of two distinct 225 | # peaks: a larger one at time zero; and a smaller one at the genuine time delay (250 226 | # ms). As the estimate at time zero is largest, :math:`\tau` is therefore incorrectly 227 | # determined to be 0 ms. In contrast, antisymmetrisation suppresses the spurious peak at 228 | # time zero, leaving only a clear peak at the genuine time delay and the correct 229 | # estimation of :math:`\tau`. Accordingly, in instances where there is a risk of 230 | # correlated noise sources between the signals (e.g. with volume conduction), applying 231 | # antisymmetrisation when estimating time delays is recommended. 232 | 233 | # %% 234 | 235 | # load simulated data 236 | data = np.load(get_example_data_paths("sim_data_tde_correlated_noise")) 237 | sampling_freq = 200 # sampling frequency in Hz 238 | n_times = data.shape[2] # number of timepoints in the data 239 | 240 | # compute Fourier coeffs. 241 | fft_coeffs, freqs = compute_fft( 242 | data=data, 243 | sampling_freq=sampling_freq, 244 | n_points=2 * n_times + 1, 245 | window="hamming", 246 | verbose=False, 247 | ) 248 | 249 | # compute TDE 250 | tde = TDE(data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False) 251 | tde.compute(indices=((0,), (1,)), antisym=(False, True), method=1) 252 | tde_standard, tde_antisym = tde.results 253 | 254 | print( 255 | "The estimated time delay without antisymmetrisation is " 256 | f"{tde_standard.tau[0, 0]:.0f} ms.\n" 257 | "The estimated time delay with antisymmetrisation is " 258 | f"{tde_antisym.tau[0, 0]:.0f} ms." 259 | ) 260 | 261 | # plot results 262 | tde_standard.plot() 263 | tde_antisym.plot() 264 | 265 | ######################################################################################## 266 | # References 267 | # ---------- 268 | # .. footbibliography:: 269 | 270 | # %% 271 | -------------------------------------------------------------------------------- /examples/plot_compute_tde_fbands.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================================ 3 | Compute time delay estimates for frequency bands 4 | ================================================ 5 | 6 | This example demonstrates how time delay estimation (TDE) for different frequency bands 7 | can be computed with PyBispectra. 8 | """ 9 | 10 | # Author(s): 11 | # Thomas S. Binns | github.com/tsbinns 12 | 13 | # sphinx_gallery_multi_image = "single" 14 | 15 | # %% 16 | 17 | import numpy as np 18 | 19 | from pybispectra import TDE, compute_fft, get_example_data_paths 20 | 21 | ######################################################################################## 22 | # Background 23 | # ---------- 24 | # In the previous example, we looked at how the bispectrum can be used to compute time 25 | # delays. In this example, we will take things further to look at how time delays can be 26 | # computed for particular frequency bands. This can be of interest when two signals 27 | # consist of multiple interacting sources at distinct frequency bands. 28 | # 29 | # For example, in the brain, information flows from the motor cortex to the subthalamic 30 | # nucleus of the subcortical basal ganglia via two distinct pathways: the monosynaptic 31 | # hyperdirect pathway; and the polysynaptic indirect pathway. As such, 32 | # cortico-subthalamic communication is faster via the hyperdirect pathway than via the 33 | # indirect pathway :footcite:`Polyakova2020`. Furthermore, hyperdirect and indirect 34 | # pathway information flow is thought to be characterised by activity in higher 35 | # (~20-30 Hz) and lower (~10-20 Hz) frequency bands, respectively. Accordingly, 36 | # estimating time delays for these frequency bands could be used as a proxy for 37 | # investigating information flow in these different pathways. 38 | # 39 | # One approach for isolating frequency band information is to bandpass filter the data 40 | # before computing time delays. However, this approach can fail to reveal the time true 41 | # underlying time delay, even if the signals have a fairly high signal-to-noise ratio. 42 | # In contrast, as the bispectrum is frequency-resolved, we can extract information for 43 | # particular frequencies directly, with improved performance for revealing the true time 44 | # delay. 45 | 46 | ######################################################################################## 47 | # Computing frequency band-resolved time delays 48 | # --------------------------------------------- 49 | # We will start by loading some simulated data consisting of two signals, 50 | # :math:`\textbf{x}` and :math:`\textbf{y}`. In these signals, there is delay of 100 ms 51 | # from :math:`\textbf{x}` to :math:`\textbf{y}` in the 20-30 Hz range, and a delay of 52 | # 200 ms from :math:`\textbf{y}` to :math:`\textbf{x}` in the 30-40 Hz range. As before, 53 | # we compute the Fourier coefficients of the data, setting ``n_points`` to be twice the 54 | # number of time points in each epoch of the data, plus one. 55 | # 56 | # When computing time delay estimation, we extract information for the broadband 57 | # spectrum, 20-30 Hz band, and 30-40 Hz band, using the ``fmin`` and ``fmax`` arguments. 58 | 59 | # %% 60 | 61 | # load simulated data 62 | data = np.load(get_example_data_paths("sim_data_tde_fbands")) 63 | sampling_freq = 200 # sampling frequency in Hz 64 | n_times = data.shape[2] # number of timepoints in the data 65 | 66 | # compute Fourier coeffs. 67 | fft_coeffs, freqs = compute_fft( 68 | data=data, 69 | sampling_freq=sampling_freq, 70 | n_points=2 * n_times + 1, # recommended for time delay estimation 71 | window="hamming", 72 | verbose=False, 73 | ) 74 | 75 | tde = TDE(data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq, verbose=False) 76 | tde.compute(indices=((0,), (1,)), fmin=(0, 20, 30), fmax=(100, 30, 40)) 77 | 78 | print( 79 | f"TDE results: [{tde.results.shape[0]} connections x {tde.results.shape[1]} " 80 | f"frequency bands x {tde.results.shape[2]} times]" 81 | ) 82 | 83 | ######################################################################################## 84 | # We can see that time delays have been computed for one connection (0 → 1) and three 85 | # frequency bands (0-100 Hz; 20-30 Hz; and 30-40 Hz), with 401 timepoints. The 86 | # timepoints correspond to time delay estimates for every 5 ms (i.e. the sampling rate 87 | # of the data), ranging from -1000 ms to +1000 ms. 88 | # 89 | # Inspecting the results, we see that: the 20-30 Hz bispectrum entries capture the 90 | # corresponding delay around 100 ms from :math:`\textbf{x}` to :math:`\textbf{y}`; the 91 | # 30-40 Hz bispectrum entries capture the delay around 200 ms from :math:`\textbf{y}` to 92 | # :math:`\textbf{x}` (represented as a negative time delay from :math:`\textbf{x}` to 93 | # :math:`\textbf{y}`); and the broadband 0-100 Hz bispectrum captures both frequency 94 | # band interactions. As an additional note, you can see that computing time delays on 95 | # smaller frequency bands (i.e. fewer Fourier coefficients) increases the temporal 96 | # smoothing of results, something you must keep in mind if you expect your data to 97 | # contain distinct interactions which are temporally proximal to one another. 98 | 99 | # %% 100 | 101 | fig, axes = tde.results.plot(freq_bands=(1, 2, 0)) 102 | 103 | ######################################################################################## 104 | # Altogether, estimating time delays for particular frequency bands is a useful approach 105 | # to discriminate interactions between signals at distinct frequencies, whether these 106 | # frequency bands come from an *a priori* knowledge of the system being studied (e.g. as 107 | # for cortico-subthalamic interactions), or after observing multiple peaks in the 108 | # broadband time delay spectrum. 109 | 110 | ######################################################################################## 111 | # References 112 | # ---------- 113 | # .. footbibliography:: 114 | -------------------------------------------------------------------------------- /examples/plot_compute_waveshape.py: -------------------------------------------------------------------------------- 1 | """ 2 | ========================== 3 | Compute waveshape features 4 | ========================== 5 | 6 | This example demonstrates how waveshape features can be computed with PyBispectra. 7 | """ 8 | 9 | # Author(s): 10 | # Thomas S. Binns | github.com/tsbinns 11 | 12 | # sphinx_gallery_multi_image = "single" 13 | 14 | # %% 15 | 16 | import numpy as np 17 | from matplotlib import pyplot as plt 18 | from numpy.random import RandomState 19 | 20 | from pybispectra import WaveShape, compute_fft, get_example_data_paths 21 | 22 | ######################################################################################## 23 | # Background 24 | # ---------- 25 | # When analysing signals, important information may be gleaned from a variety of 26 | # features, including their shape. For example, in neuroscience it has been suggested 27 | # that non-sinusoidal waves may play important roles in physiology and pathology, such 28 | # as waveform sharpness reflecting synchrony of synaptic inputs :footcite:`Sherman2016` 29 | # and correlating with symptoms of Parkinson's disease :footcite:`Cole2017`. Two aspects 30 | # of waveshape described in recent literature include: rise-decay asymmetry - how much 31 | # the wave resembles a sawtooth pattern (also called waveform sharpness or derivative 32 | # skewness); and peak-trough asymmetry - whether peaks (events with a positive-valued 33 | # amplitude) or troughs (events with a negative-valued amplitude) are more dominant in 34 | # the signal (also called signal/value skewness). 35 | # 36 | # A common strategy for waveshape analysis involves identifying and characterising the 37 | # features of waves in time-series data - see Cole *et al.* (2017) :footcite:`Cole2017` 38 | # for an example. Naturally, it can be of interest to explore the shapes of signals at 39 | # particular frequencies, in which case the time-series data can be bandpass filtered. 40 | # There is, however, a major limitation to this approach: applying a bandpass filter to 41 | # data can seriously alter non-sinusoidal signal shape, compromising any analysis of 42 | # waveshape before it has begun. 43 | # 44 | # Thankfully, the bispectrum captures information about non-sinudoisal waveshape, 45 | # enabling spectrally-resolved analyses at a fine frequency resolution without the need 46 | # for bandpass filtering. In particular, the bispectrum contains information about 47 | # rise-decay asymmetry (encoded in the imaginary part of the bispectrum) and peak-trough 48 | # asymmetry (encoded in the real part of the bispectrum) :footcite:`Bartz2019`. 49 | # 50 | # The bispectrum has the general form 51 | # 52 | # :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2)\textbf{n}^* 53 | # (f_2+f_1)>` , 54 | # 55 | # where :math:`kmn` is a combination of signals with Fourier coefficients 56 | # :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; 57 | # :math:`f_1` and :math:`f_2` correspond to a lower and higher frequency, respectively; 58 | # and :math:`<>` represents the average value over epochs. When analysing waveshape, we 59 | # are interested in only a single signal, and as such :math:`k=m=n`. 60 | # 61 | # Furthermore, we can normalise the bispectrum to the bicoherence, 62 | # :math:`\boldsymbol{\mathcal{B}}`, using the threenorm, :math:`\textbf{N}`, 63 | # :footcite:`Shahbazi2014` 64 | # 65 | # :math:`\textbf{N}_{xxx}(f_1,f_2)=(<|\textbf{x}(f_1)|^3><|\textbf{x}(f_2)|^3> 66 | # <|\textbf{x}(f_2+f_1)|^3>)^{\frac{1}{3}}` , 67 | # 68 | # :math:`\boldsymbol{\mathcal{B}}_{xxx}(f_1,f_2)=\Large\frac{\textbf{B}_{xxx} 69 | # (f_1,f_2)}{\textbf{N}_{xxx}(f_1,f_2)}` , 70 | # 71 | # where the resulting values lie in the range :math:`[-1, 1]`. 72 | 73 | ######################################################################################## 74 | # Loading data and computing Fourier coefficients 75 | # ----------------------------------------------- 76 | # We will start by loading some example data and computing the Fourier coefficients 77 | # using the :func:`~pybispectra.utils.compute_fft` function. This data consists of 78 | # sawtooth waves (information will be captured in the rise-decay asymmetry) and waves 79 | # with a dominance of peaks or troughs (information will be captured in the peak-trough 80 | # asymmetry), all simulated as bursting oscillators at 10 Hz. 81 | 82 | # %% 83 | 84 | # load example data 85 | data_sawtooths = np.load(get_example_data_paths("sim_data_waveshape_sawtooths")) 86 | data_peaks_troughs = np.load(get_example_data_paths("sim_data_waveshape_peaks_troughs")) 87 | sampling_freq = 1000 # Hz 88 | 89 | # plot timeseries data 90 | times = np.linspace( 91 | 0, (data_sawtooths.shape[2] / sampling_freq), data_sawtooths.shape[2] 92 | ) 93 | fig, axes = plt.subplots(2, 2) 94 | axes[0, 0].plot(times, data_sawtooths[15, 0]) 95 | axes[0, 1].plot(times, data_sawtooths[15, 1]) 96 | axes[1, 0].plot(times, data_peaks_troughs[15, 0]) 97 | axes[1, 1].plot(times, data_peaks_troughs[15, 1]) 98 | titles = [ 99 | "Ramp up sawtooth", 100 | "Ramp down sawtooth", 101 | "Peak dominance", 102 | "Trough dominance", 103 | ] 104 | for ax, title in zip(axes.flatten(), titles): 105 | ax.set_title(title) 106 | ax.set_xlabel("Time (s)") 107 | ax.set_ylabel("Amplitude (A.U.)") 108 | fig.tight_layout() 109 | 110 | # add noise for numerical stability 111 | random = RandomState(44) 112 | snr = 0.25 113 | datasets = [data_sawtooths, data_peaks_troughs] 114 | for data_idx, data in enumerate(datasets): 115 | datasets[data_idx] = snr * data + (1 - snr) * random.rand(*data.shape) 116 | data_sawtooths = datasets[0] 117 | data_peaks_troughs = datasets[1] 118 | 119 | # compute Fourier coeffs. 120 | fft_coeffs_sawtooths, freqs = compute_fft( 121 | data=data_sawtooths, 122 | sampling_freq=sampling_freq, 123 | n_points=sampling_freq, 124 | verbose=False, 125 | ) 126 | fft_coeffs_peaks_troughs, _ = compute_fft( 127 | data=data_peaks_troughs, 128 | sampling_freq=sampling_freq, 129 | n_points=sampling_freq, 130 | verbose=False, 131 | ) 132 | 133 | ######################################################################################## 134 | # Plotting the data, we see that the sawtooth waves consist of a signal where the decay 135 | # steepness is greater than the rise steepness (ramp up sawtooth) and a signal where the 136 | # rise steepness is greater than the decay steepness (ramp down sawtooth). Additionally, 137 | # the peak and trough waves consist of a signal where peaks are most dominant, and a 138 | # signal where troughs are most dominant. After loading the data, we add some noise for 139 | # numerical stability. 140 | # 141 | # Computing waveshape features 142 | # ---------------------------- 143 | # To compute waveshape, we start by initialising the 144 | # :class:`~pybispectra.waveshape.WaveShape` class object with the Fourier coefficients 145 | # and the frequency information. To compute waveshape, we call the 146 | # :meth:`~pybispectra.waveshape.WaveShape.compute` method. By default, waveshape is 147 | # computed for all channels and all frequency combinations, however we can also specify 148 | # particular channels and combinations of interest. 149 | # 150 | # Here, we specify the frequency arguments ``f1s`` and ``f2s`` to compute waveshape on 151 | # in the range 5-35 Hz (around the frequency at which the signal features were 152 | # simulated). By leaving the indices argument blank, we will look at all channels in the 153 | # data. 154 | 155 | # %% 156 | 157 | # sawtooth waves 158 | waveshape_sawtooths = WaveShape( 159 | data=fft_coeffs_sawtooths, 160 | freqs=freqs, 161 | sampling_freq=sampling_freq, 162 | verbose=False, 163 | ) # initialise object 164 | waveshape_sawtooths.compute(f1s=(5, 35), f2s=(5, 35)) # compute waveshape 165 | 166 | # peaks and troughs 167 | waveshape_peaks_troughs = WaveShape( 168 | data=fft_coeffs_peaks_troughs, 169 | freqs=freqs, 170 | sampling_freq=sampling_freq, 171 | verbose=False, 172 | ) 173 | waveshape_peaks_troughs.compute(f1s=(5, 35), f2s=(5, 35)) # compute waveshape 174 | 175 | # return results as an array 176 | waveshape_results = waveshape_sawtooths.results.get_results(copy=False) 177 | 178 | print( 179 | f"Waveshape results: [{waveshape_results.shape[0]} channels x " 180 | f"{waveshape_results.shape[1]} f1s x {waveshape_results.shape[2]} f2s]" 181 | ) 182 | 183 | ######################################################################################## 184 | # We can see that waveshape features have been computed for both channels and the 185 | # specified frequency combinations, averaged across our epochs. Given the nature of the 186 | # bispectrum, entries where :math:`f_1` would be higher than :math:`f_2`, as well as 187 | # where :math:`f_2 + f_1` exceeds the frequency bounds of our data, cannot be computed. 188 | # In such cases, the values corresponding to those 'bad' frequency combinations are 189 | # :obj:`numpy.nan`. 190 | 191 | ######################################################################################## 192 | # Plotting waveshape features 193 | # --------------------------- 194 | # Let us now inspect the results. Information about the different waveshape features are 195 | # encoded in different aspects of the complex-valued bicoherence, with peak-trough 196 | # asymmetry encoded in the real part, and rise-decay asymmetry encoded in the imaginary 197 | # part. We can therefore additionally examine the absolute value of the bicoherence 198 | # (i.e. the magnitude) as well as the phase angle to get an overall picture of the 199 | # combination of peak-trough and rise-decay asymmetries. 200 | # 201 | # For the sawtooth waves, we expect the real part of bicoherence to be ~0 and the 202 | # imaginary part to be non-zero at the simulated 10 Hz frequency. From the plots, we see 203 | # that this is indeed the case. However, we also see that the imaginary values at the 10 204 | # Hz higher harmonics (i.e. 20 and 30 Hz) are also non-zero, a product of the Fourier 205 | # transform's application to non-sinusoidal signals. It is also worth noting that the 206 | # sign of the imaginary values varies for the particular sawtooth type, with a ramp up 207 | # sawtooth resulting in positive values, and a ramp down sawtooth resulting in negative 208 | # values. 209 | # 210 | # Information about the direction of the asymmetry is encoded not only in the sign of 211 | # the bicoherence values, but also in its phase. As in Bartz *et al.* 212 | # :footcite:`Bartz2019`, we represent phase in the range :math:`(0, 2\pi]` (travelling 213 | # counter-clockwise from the positive real axis). Accordingly, a phase of 214 | # :math:`\frac{1}{2}\pi` is seen at 10 Hz and its higher harmonics for the ramp up 215 | # sawtooth, with a phase of :math:`\frac{3}{2}\pi` for the ramp down sawtooth. The 216 | # phases and absolute values (i.e. the magnitude) therefore combine information from 217 | # both the real and imaginary components. 218 | # 219 | # In contrast, we expect the real part of the bicoherence to be non-zero for signals 220 | # with peak-trough asymmetry, and the imaginary part to be ~0. Again, this is what we 221 | # observe. Similarly to before, the signs of the real values are positive for the 222 | # peak-dominant signal, and negative for the trough-dominant signal, which is also 223 | # reflected in the phases (~0 or 2 :math:`\pi` for the peak-dominant signal, and 224 | # :math:`\pi` for the trough-dominant signal). 225 | # 226 | # Here, we plotted the real and imaginary parts of the bicoherence without taking the 227 | # absolute value. If the particular direction of asymmetry is not of interest, the 228 | # absolute values can be plotted instead (by setting ``plot_absolute=True``) to show the 229 | # overall degree of asymmetry. In any case, the direction of asymmetry can be inferred 230 | # from the phases. 231 | # 232 | # Finally, note that the :class:`~matplotlib.figure.Figure` and 233 | # :class:`~matplotlib.axes.Axes` objects can also be returned for any desired manual 234 | # adjustments of the plots. 235 | 236 | # %% 237 | 238 | figs, axes = waveshape_sawtooths.results.plot( 239 | major_tick_intervals=10, 240 | minor_tick_intervals=2, 241 | cbar_range_abs=(0, 1), 242 | cbar_range_real=(-1, 1), 243 | cbar_range_imag=(-1, 1), 244 | cbar_range_phase=(0, 2), 245 | plot_absolute=False, 246 | show=False, 247 | ) 248 | titles = ["Ramp up", "Ramp down"] 249 | for fig, title in zip(figs, titles): 250 | fig.suptitle(f"{title} sawtooth") 251 | fig.set_size_inches(6, 6) 252 | fig.show() 253 | 254 | figs, axes = waveshape_peaks_troughs.results.plot( 255 | major_tick_intervals=10, 256 | minor_tick_intervals=2, 257 | cbar_range_abs=(0, 1), 258 | cbar_range_real=(-1, 1), 259 | cbar_range_imag=(-1, 1), 260 | cbar_range_phase=(0, 2), 261 | plot_absolute=False, 262 | show=False, 263 | ) 264 | titles = ["Peak", "Trough"] 265 | for fig, title in zip(figs, titles): 266 | fig.suptitle(f"{title} dominance") 267 | fig.set_size_inches(6, 6) 268 | fig.show() 269 | 270 | ######################################################################################## 271 | # Analysing waveshape in low signal-to-noise ratio data 272 | # ----------------------------------------------------- 273 | # Depending on the degree of signal-to-noise ratio as well as the colour of the noise, 274 | # the ability of the bispectrum to extract information about the underlying waveshape 275 | # features can vary. To alleviate this, Bartz *et al.* :footcite:`Bartz2019` propose 276 | # utilising spatio-spectral filtering to enhance the signal-to-noise ratio of the data 277 | # at a frequency band of interest (which has the added benefit of enabling multivariate 278 | # signal analysis). Details of how spatio-spectral filtering can be incorporated into 279 | # waveshape analysis are presented in the following example: 280 | # :doc:`plot_compute_waveshape_noisy_data`. 281 | 282 | ######################################################################################## 283 | # References 284 | # ---------- 285 | # .. footbibliography:: 286 | 287 | # %% 288 | -------------------------------------------------------------------------------- /examples/plot_compute_waveshape_noisy_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | ================================================ 3 | Spatio-spectral filtering for waveshape analysis 4 | ================================================ 5 | 6 | This example demonstrates how spatio-spectral filtering can be incorporated into 7 | waveshape analysis with PyBispectra. 8 | """ 9 | 10 | # Author(s): 11 | # Thomas S. Binns | github.com/tsbinns 12 | 13 | # sphinx_gallery_multi_image = "single" 14 | 15 | # %% 16 | 17 | import numpy as np 18 | 19 | from pybispectra import ( 20 | ResultsWaveShape, 21 | SpatioSpectralFilter, 22 | WaveShape, 23 | compute_fft, 24 | compute_rank, 25 | get_example_data_paths, 26 | ) 27 | 28 | ######################################################################################## 29 | # Background 30 | # ---------- 31 | # When analysing signals, important information may be gleaned from a variety of 32 | # features, including their shape. One tool for extracting this information is the 33 | # bicoherence, capturing information about rise-decay asymmetry (i.e. how much a signal 34 | # resembles a sawtooth wave) and peak-trough asymmetry (i.e. how 'spiky' a signal is) 35 | # :footcite:`Bartz2019`. The use of the bicoherence for waveshape analysis was discussed 36 | # in detail in this example: :doc:`plot_compute_waveshape`. In particular, it was 37 | # mentioned how the signal-to-noise ratio of the signal can affect the ability of the 38 | # bicoherence to extract information about waveshape. In this example, we will look at 39 | # how spatio-spectral filtering can be used to improve the SNR of signals and enhance 40 | # waveshape analyses. 41 | 42 | ######################################################################################## 43 | # Loading data and performing spatio-spectral filtering 44 | # ----------------------------------------------------- 45 | # We will start by loading some example data. This data consists of a peak-dominant, 46 | # ramp down sawtooth bursting oscillator simulated at 20 Hz, and combined with a mixture 47 | # of 20 independent pink noise sources for a signal-to-noise ratio of 0.1. This setup 48 | # therefore reflects a typical electrophysiolgical recording scenario where the brain 49 | # source of interest projects to multiple sensors and is combined with multiple noise 50 | # sources. 51 | # 52 | # Two forms of spatio-spectral filtering are available in PyBispectra: spatio-spectral 53 | # decomposition (SSD) :footcite:`Nikulin2011`; and harmonic power maximisation (HPMax) 54 | # :footcite:`Bartz2019`. Both methods are based on generalised eigendecompositions 55 | # :footcite:`Cohen2022`. In SSD, a frequency band of interest is chosen as the signal, 56 | # and the flanking frequencies are designated as the noise. The data is filtered in 57 | # these signal and noise ranges, and the covariance matrices of the corresponding data 58 | # are used to generate a set of spatial filters that, when applied to the data, maximise 59 | # the SNR (i.e. maximise signal power at the desired frequency band). HPMax can be 60 | # thought of as an extension of SSD, in which the base frequency band and its higher 61 | # harmonics are designated as the signal, based on the principle that non-sinusoidal 62 | # signal information is encoded at its base frequency and higher harmonics. 63 | # 64 | # The ability of SSD and HPMax to recover information about the underlying wave shape of 65 | # a signal of interest varies depending on the SNR of the data and the colour of the 66 | # noise :footcite:`Bartz2019`. At an SNR of 0.1 with pink noise, both SSD and HPMax are 67 | # able to recover waveshape information well, however for simplicity we will focus only 68 | # one: HPMax. 69 | # 70 | # To perform spatio-spectral filtering, we start by initialising the 71 | # :class:`~pybispectra.utils.SpatioSpectralFilter` class object with the data and the 72 | # sampling frequency. To compute HPMax, we call the 73 | # :meth:`~pybispectra.utils.SpatioSpectralFilter.fit_transform_hpmax` method (SSD can be 74 | # computed using the :meth:`~pybispectra.utils.SpatioSpectralFilter.fit_transform_ssd` 75 | # method). We specify the 'signal' frequency band of interest to be 18-22 Hz and the 76 | # flanking 'noise' frequencies to be 15-18 Hz and 22-25 Hz. Additionally, we take 77 | # information from the two higher harmonics of the 'signal' frequencies (i.e. 36-44 Hz 78 | # and 54-66 Hz). 79 | # 80 | # After finding the filters, we use them to transform the data. By default, only those 81 | # filters whose signal vs. noise contributions are greater than 1 are returned, however 82 | # for simplicity we will take only the first component (i.e. the component with the 83 | # greatest signal-to-noise ratio). 84 | 85 | # %% 86 | 87 | # load example data 88 | data = np.load(get_example_data_paths("sim_data_waveshape_noisy")) 89 | sampling_freq = 1000 # Hz 90 | 91 | # perform spatio-spectral filtering 92 | ssf = SpatioSpectralFilter(data=data, sampling_freq=sampling_freq, verbose=False) 93 | transformed_data = ssf.fit_transform_hpmax( 94 | signal_bounds=(18, 22), noise_bounds=(15, 25), n_harmonics=2 95 | ) 96 | 97 | # select the first component of the filtered data 98 | transformed_data = transformed_data[:, [0], :] 99 | 100 | print( 101 | f"Original timeseries data: [{data.shape[0]} epochs x {data.shape[1]} channel(s) x " 102 | f"{data.shape[2]} times]" 103 | ) 104 | print( 105 | f"Transformed timeseries data: [{transformed_data.shape[0]} epochs x " 106 | f"{transformed_data.shape[1]} channel(s) x {transformed_data.shape[2]} times]\n" 107 | f"Signal-to-noise ratio of transformed data's first component: {ssf.ratios[0] :.2f}" 108 | ) 109 | 110 | ######################################################################################## 111 | # As SSD and HPMax are based on generalised eigendecomposition, the data for which the 112 | # filters are being generated must be full rank (i.e. a non-zero determinant). The 113 | # :meth:`~pybispectra.utils.SpatioSpectralFilter.fit_transform_ssd` and 114 | # :meth:`~pybispectra.utils.SpatioSpectralFilter.fit_transform_hpmax` methods have a 115 | # ``rank`` argument where the rank of the data can be specified, according to which the 116 | # data will be projected to. If the rank is not specified, it will be computed 117 | # automatically using the :func:`~pybispectra.utils.compute_rank` function. Here, the 118 | # rank is determined based on the number of non-zero singular values. Non-zero singular 119 | # values are defined as those which are greater than the largest singular value 120 | # multiplied by a tolerance value specified by the ``sv_tol`` argument (:math:`1e^{-5}` 121 | # by default). 122 | # 123 | # Below we see that our original timeseries data of 20 channels has a rank of 20, and is 124 | # therefore full rank. Although the rank of the data is automatically computed, the 125 | # option of specifying the rank subspace to project the data to is still provided, as a 126 | # rank projection less than that of the data's rank may be desired when dealing with a 127 | # large number of channels to prevent the overfitting of filters. 128 | 129 | # %% 130 | 131 | rank = compute_rank(data, sv_tol=1e-5) 132 | print(f"The original timeseries data ({data.shape[1]} channels) has a rank of {rank}.") 133 | 134 | ######################################################################################## 135 | # Computing and plotting waveshape features 136 | # ----------------------------------------- 137 | # For the waveshape analysis, we compute the Fourier coefficients of the data and use 138 | # them to initialise the :class:`~pybispectra.waveshape.WaveShape` class object. To 139 | # compute waveshape, we call the :meth:`~pybispectra.waveshape.WaveShape.compute` 140 | # method. 141 | # 142 | # Plotting the results, we see that information is captured in both the real and 143 | # imaginary part of the bicoherence at 20 Hz and its higher harmonics, as we expect 144 | # given that the simulated source consists of both peak-trough and rise-decay 145 | # asymmetries. Looking at the phases, we see that the results are ~ 146 | # :math:`\frac{7}{4}\pi` at these frequencies, in line with the fact that the simulated 147 | # source is a peak-dominant (positive real-value), ramp down (negative imaginary-value) 148 | # wave. Comparing to the results of the unfiltered data averaged over channels, the 149 | # waveshape information is much clearer in the filtered data. 150 | 151 | # %% 152 | 153 | # transformed data 154 | fft_coeffs_transformed, freqs = compute_fft( 155 | data=transformed_data, 156 | sampling_freq=sampling_freq, 157 | n_points=sampling_freq, 158 | verbose=False, 159 | ) 160 | waveshape_transformed = WaveShape( 161 | data=fft_coeffs_transformed, freqs=freqs, sampling_freq=sampling_freq, verbose=False 162 | ) 163 | waveshape_transformed.compute(f1s=(10, 70), f2s=(10, 70)) 164 | fig, axes = waveshape_transformed.results.plot( 165 | major_tick_intervals=10, 166 | minor_tick_intervals=2, 167 | cbar_range_abs=(0, 1), 168 | cbar_range_real=(0, 1), 169 | cbar_range_imag=(0, 1), 170 | cbar_range_phase=(0, 2), 171 | plot_absolute=True, 172 | ) 173 | fig[0].set_size_inches(6, 6) 174 | 175 | # noisy data 176 | fft_coeffs_noisy, freqs = compute_fft( 177 | data=data, sampling_freq=sampling_freq, n_points=sampling_freq, verbose=False 178 | ) 179 | waveshape_noisy = WaveShape( 180 | data=fft_coeffs_noisy, freqs=freqs, sampling_freq=sampling_freq, verbose=False 181 | ) 182 | waveshape_noisy.compute(f1s=(10, 70), f2s=(10, 70)) 183 | noisy_results = waveshape_noisy.results.get_results() 184 | noisy_results = noisy_results.mean(axis=0)[np.newaxis, :, :] 185 | noisy_results = ResultsWaveShape( 186 | data=noisy_results, 187 | indices=(0,), 188 | f1s=waveshape_noisy.results.f1s, 189 | f2s=waveshape_noisy.results.f2s, 190 | name=waveshape_noisy.results.name, 191 | ) 192 | fig, axes = noisy_results.plot( 193 | major_tick_intervals=10, 194 | minor_tick_intervals=2, 195 | cbar_range_abs=(0, 1), 196 | cbar_range_real=(0, 1), 197 | cbar_range_imag=(0, 1), 198 | cbar_range_phase=(0, 2), 199 | plot_absolute=True, 200 | ) 201 | fig[0].set_size_inches(6, 6) 202 | 203 | ######################################################################################## 204 | # As you can see, spatio-spectral filtering is a powerful tool for extracting waveshape 205 | # information from noisy data, and the tools in PyBispectra allow you to easily 206 | # incorporate these methods into your analysis pipeline. 207 | 208 | ######################################################################################## 209 | # References 210 | # ---------- 211 | # .. footbibliography:: 212 | 213 | # %% 214 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "hatchling.build" 3 | requires = ["hatchling"] 4 | 5 | [project] 6 | authors = [{email = "t.s.binns@outlook.com", name = "Thomas S. Binns"}] 7 | classifiers = [ 8 | "License :: OSI Approved :: MIT License", 9 | "Operating System :: OS Independent", 10 | "Programming Language :: Python :: 3", 11 | ] 12 | dependencies = [ 13 | "joblib", 14 | "matplotlib", 15 | "mne>1.6", 16 | "numba", 17 | "numpy", 18 | "scikit-learn", 19 | "scipy", 20 | ] 21 | description = "A Python signal processing package for computing spectral-domain and time-domain interactions using the bispectrum." 22 | name = "pybispectra" 23 | readme = "README.md" 24 | requires-python = ">=3.10" 25 | version = "1.3.0dev" 26 | 27 | [project.optional-dependencies] 28 | dev = ["pybispectra[doc]", "pybispectra[lint]", "pybispectra[test]"] 29 | doc = [ 30 | "ipykernel", 31 | "ipython", 32 | "ipywidgets", 33 | "notebook", 34 | "numpydoc", 35 | "pydata-sphinx-theme", 36 | "sphinx", 37 | "sphinx-copybutton", 38 | "sphinx-gallery>=0.18", 39 | "sphinxcontrib-bibtex", 40 | ] 41 | lint = [ 42 | "codespell", 43 | "isort", 44 | "pre-commit", 45 | "pydocstyle", 46 | "pydocstyle[toml]", 47 | "rstcheck", 48 | "ruff", 49 | "toml-sort", 50 | "yamllint", 51 | ] 52 | test = ["coverage", "pytest"] 53 | 54 | [project.urls] 55 | "Bug Tracker" = "https://github.com/braindatalab/PyBispectra/issues" 56 | "Homepage" = "https://github.com/braindatalab/PyBispectra" 57 | 58 | [tool.codespell] 59 | ignore-words-list = "trough" 60 | skip = "./docs/build,./docs/source/auto_examples,./docs/source/refs.bib" 61 | 62 | [tool.coverage.report] 63 | show_missing = true 64 | 65 | [tool.coverage.run] 66 | command_line = "-m pytest -v tests" 67 | omit = ["__init__.py", "src/pybispectra/utils/_docs.py", "tests/*"] 68 | source = ["pybispectra"] 69 | 70 | [tool.isort] 71 | profile = "black" 72 | 73 | [tool.pydocstyle] 74 | match-dir = "^(?!(examples|docs|tests)).*" 75 | 76 | [tool.pytest.ini_options] 77 | # use '', not "" for escaping characters in regex 78 | filterwarnings = [ 79 | "ignore:The seed and target for at least one connection is the same channel:UserWarning", 80 | 'ignore:At least one value in \`f1s\` is \>\= a value in \`f2s\`:UserWarning', 81 | 'ignore:At least one value of \`f2s\` \+ \`f1s\` is not present in the frequencies:UserWarning', 82 | ] 83 | 84 | [tool.rstcheck] 85 | ignore_directives = [ 86 | "autoclass", 87 | "autofunction", 88 | "automodule", 89 | "autosummary", 90 | "bibliography", 91 | "cssclass", 92 | "currentmodule", 93 | "dropdown", 94 | "footbibliography", 95 | "glossary", 96 | "graphviz", 97 | "grid", 98 | "highlight", 99 | "minigallery", 100 | "rst-class", 101 | "tab-set", 102 | "tabularcolumns", 103 | "toctree", 104 | ] 105 | ignore_messages = "^.*(Unknown target name|Undefined substitution referenced)[^`]*$" 106 | ignore_roles = [ 107 | "attr", 108 | "class", 109 | "doc", 110 | "eq", 111 | "exc", 112 | "file", 113 | "footcite", 114 | "footcite:t", 115 | "func", 116 | "gh", 117 | "kbd", 118 | "meth", 119 | "mod", 120 | "newcontrib", 121 | "pr", 122 | "py:mod", 123 | "ref", 124 | "samp", 125 | "term", 126 | ] 127 | report_level = "WARNING" 128 | 129 | [tool.ruff] 130 | extend-exclude = ["docs", "examples/compute_*.py"] 131 | line-length = 88 132 | 133 | [tool.ruff.lint.per-file-ignores] 134 | "*.py" = ["E203", "E741"] 135 | "__init__.py" = ["F401"] 136 | 137 | [tool.tomlsort] 138 | all = true 139 | ignore_case = true 140 | trailing_comma_inline_array = true 141 | -------------------------------------------------------------------------------- /src/pybispectra/__init__.py: -------------------------------------------------------------------------------- 1 | """Initialisation of the PyBispectra package.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .cfc import AAC, PAC, PPC 6 | from .data import get_example_data_paths 7 | from .general import Bispectrum, Threenorm 8 | from .tde import TDE 9 | from .utils import ( 10 | ResultsCFC, 11 | ResultsGeneral, 12 | ResultsTDE, 13 | ResultsWaveShape, 14 | SpatioSpectralFilter, 15 | compute_fft, 16 | compute_rank, 17 | compute_tfr, 18 | set_precision, 19 | ) 20 | from .waveshape import WaveShape 21 | -------------------------------------------------------------------------------- /src/pybispectra/cfc/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for handling cross-frequency coupling analysis.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .aac import AAC 6 | from .pac import PAC 7 | from .ppc import PPC 8 | -------------------------------------------------------------------------------- /src/pybispectra/cfc/aac.py: -------------------------------------------------------------------------------- 1 | """Tools for handling AAC analysis.""" 2 | 3 | import numpy as np 4 | from numba import njit 5 | 6 | from pybispectra.utils import ResultsCFC 7 | from pybispectra.utils._defaults import _precision 8 | from pybispectra.utils._process import _ProcessFreqBase 9 | from pybispectra.utils._utils import ( 10 | _compute_pearsonr_2d, 11 | _fast_find_first, 12 | _compute_in_parallel, 13 | ) 14 | 15 | 16 | class AAC(_ProcessFreqBase): 17 | """Class for computing amplitude-amplitude coupling (AAC). 18 | 19 | Parameters 20 | ---------- 21 | data : ~numpy.ndarray, shape of [epochs, channels, frequencies, times] 22 | Amplitude (power) of the time-frequency representation of data. 23 | 24 | freqs : ~numpy.ndarray, shape of [frequencies] 25 | Frequencies (in Hz) in ``data``. Frequencies are expected to be evenly spaced. 26 | 27 | sampling_freq : int | float 28 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 29 | 30 | verbose : bool (default True) 31 | Whether or not to report the progress of the processing. 32 | 33 | Methods 34 | ------- 35 | compute : 36 | Compute AAC, averaged over epochs. 37 | 38 | copy : 39 | Return a copy of the object. 40 | 41 | Attributes 42 | ---------- 43 | results : ~pybispectra.utils.ResultsCFC 44 | AAC results. 45 | 46 | data : ~numpy.ndarray, shape of [epochs, channels, frequencies, times] 47 | Amplitude (power) of the time-frequency representation of data. 48 | 49 | freqs : ~numpy.ndarray, shape of [frequencies] 50 | Frequencies (in Hz) in ``data``. 51 | 52 | sampling_freq : int | float 53 | Sampling frequency (in Hz) of ``data``. 54 | 55 | verbose : bool 56 | Whether or not to report the progress of the processing. 57 | """ # noqa: E501 58 | 59 | _data_precision: type = _precision.real # TFR real-valued 60 | 61 | _data_ndims: int = 4 # [epochs, channels, frequencies, times] 62 | 63 | _aac: np.ndarray = None 64 | 65 | def compute( 66 | self, 67 | indices: tuple[tuple[int]] | None = None, 68 | f1s: tuple[int | float] | None = None, 69 | f2s: tuple[int | float] | None = None, 70 | n_jobs: int = 1, 71 | ) -> None: 72 | r"""Compute AAC, averaged over epochs. 73 | 74 | Parameters 75 | ---------- 76 | indices : tuple of tuple of int, length of 2 | None (default None) 77 | Indices of the seed and target channels, respectively, to compute AAC 78 | between. If :obj:`None`, coupling between all channels is computed. 79 | 80 | f1s : tuple of int or float, length of 2 | None (default None) 81 | Start and end lower frequencies to compute AAC on, respectively. If 82 | :obj:`None`, all frequencies are used. 83 | 84 | f2s : tuple of int or float, length of 2 | None (default None) 85 | Start and end higher frequencies to compute AAC on, respectively. If 86 | :obj:`None`, all frequencies are used. 87 | 88 | n_jobs : int (default ``1``) 89 | Number of jobs to run in parallel. If ``-1``, all available CPUs are used. 90 | 91 | Notes 92 | ----- 93 | AAC is computed as the Pearson correlation coefficient across times for each 94 | frequency in each epoch, with coupling being averaged across epochs 95 | :footcite:`Giehl2021`. 96 | 97 | AAC is computed between all values of ``f1s`` and ``f2s``. If any value of 98 | ``f1s`` is higher than ``f2s``, a :obj:`numpy.nan` value is returned. 99 | 100 | References 101 | ---------- 102 | .. footbibliography:: 103 | """ 104 | self._reset_attrs() 105 | 106 | self._sort_indices(indices) 107 | self._sort_freqs(f1s, f2s) 108 | self._sort_parallelisation(n_jobs) 109 | 110 | if self.verbose: 111 | print("Computing AAC...") 112 | 113 | self._compute_aac() 114 | self._store_results() 115 | 116 | if self.verbose: 117 | print(" ... AAC computation finished\n") 118 | 119 | def _reset_attrs(self) -> None: 120 | """Reset attrs. of the object to prevent interference.""" 121 | super()._reset_attrs() 122 | self._aac = None 123 | 124 | def _compute_aac(self) -> None: 125 | """Compute AAC between f1s of seeds and f2s of targets.""" 126 | loop_kwargs = [ 127 | {"data": self.data[:, (seed, target)]} 128 | for seed, target in zip(self._seeds, self._targets) 129 | ] 130 | static_kwargs = { 131 | "freqs": self.freqs, 132 | "f1s": self._f1s, 133 | "f2s": self._f2s, 134 | "precision": _precision.real, 135 | } 136 | try: 137 | self._aac = _compute_in_parallel( 138 | func=_compute_aac, 139 | loop_kwargs=loop_kwargs, 140 | static_kwargs=static_kwargs, 141 | output=np.zeros( 142 | (self._n_cons, self._f1s.size, self._f2s.size), 143 | dtype=_precision.real, 144 | ), 145 | message="Processing connections...", 146 | n_jobs=self._n_jobs, 147 | verbose=self.verbose, 148 | prefer="processes", 149 | ) 150 | except MemoryError as error: # pragma: no cover 151 | raise MemoryError( 152 | "Memory allocation for the AAC computation failed. Try reducing the " 153 | "sampling frequency of the data, or reduce the precision of the " 154 | "computation with `pybispectra.set_precision('single')`." 155 | ) from error 156 | 157 | def _store_results(self) -> None: 158 | """Store computed results in an object.""" 159 | self._results = ResultsCFC( 160 | self._aac, self._indices, self._f1s, self._f2s, "AAC" 161 | ) 162 | 163 | @property 164 | def results(self) -> ResultsCFC: 165 | return self._results 166 | 167 | 168 | @njit 169 | def _compute_aac( 170 | data: np.ndarray, 171 | freqs: np.ndarray, 172 | f1s: np.ndarray, 173 | f2s: np.ndarray, 174 | precision: type, 175 | ) -> np.ndarray: # pragma: no cover 176 | """Compute AAC for a single connection across epochs. 177 | 178 | Parameters 179 | ---------- 180 | data : numpy.ndarray, shape of [epochs, 2, frequencies, times] 181 | Amplitude (power) of the time-frequency representation of data where the second 182 | dimension contains the data for the seed and target channel of a single 183 | connection, respectively. 184 | 185 | freqs : numpy.ndarray, shape of [frequencies] 186 | Frequencies in ``data``. 187 | 188 | f1s : numpy.ndarray, shape of [low frequencies] 189 | Low frequencies to compute coupling for. 190 | 191 | f2s : numpy.ndarray, shape of [high frequencies] 192 | High frequencies to compute coupling for. 193 | 194 | precision : type 195 | Precision to use for the computation. Either ``numpy.float32`` (single) or 196 | ``numpy.float64`` (double). 197 | 198 | Returns 199 | ------- 200 | results : numpy.ndarray, shape of [low frequencies, high frequencies] 201 | AAC averaged across epochs for a single connection. 202 | """ 203 | results = np.full((f1s.shape[0], f2s.shape[0]), fill_value=np.nan, dtype=precision) 204 | f1_start = _fast_find_first(freqs, f1s[0], 0) 205 | f1_end = _fast_find_first(freqs, f1s[-1], f1_start) 206 | f2_start = _fast_find_first(freqs, f2s[0], 0) 207 | f2_end = _fast_find_first(freqs, f2s[-1], f2_start) 208 | for f1_ri, f1_fi in enumerate(range(f1_start, f1_end + 1)): 209 | f1 = freqs[f1_fi] 210 | for f2_ri, f2_fi in enumerate(range(f2_start, f2_end + 1)): 211 | f2 = freqs[f2_fi] 212 | if f1 <= f2 and f1 > 0: 213 | results[f1_ri, f2_ri] = np.mean( 214 | _compute_pearsonr_2d( 215 | data[:, 0, f1_fi], data[:, 1, f2_fi], precision 216 | ) 217 | ) 218 | 219 | return results 220 | -------------------------------------------------------------------------------- /src/pybispectra/cfc/ppc.py: -------------------------------------------------------------------------------- 1 | """Tools for handling PPC analysis.""" 2 | 3 | import numpy as np 4 | from numba import njit 5 | 6 | from pybispectra.utils import ResultsCFC 7 | from pybispectra.utils._defaults import _precision 8 | from pybispectra.utils._process import _ProcessFreqBase 9 | from pybispectra.utils._utils import _fast_find_first, _compute_in_parallel 10 | 11 | 12 | class PPC(_ProcessFreqBase): 13 | """Class for computing phase-phase coupling (PPC). 14 | 15 | Parameters 16 | ---------- 17 | data : ~numpy.ndarray of float, shape of [epochs, channels, frequencies] 18 | Fourier coefficients. 19 | 20 | freqs : ~numpy.ndarray of float, shape of [frequencies] 21 | Frequencies (in Hz) in ``data``. Frequencies are expected to be evenly 22 | spaced. 23 | 24 | sampling_freq : int | float 25 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 26 | 27 | verbose : bool (default True) 28 | Whether or not to report the progress of the processing. 29 | 30 | Methods 31 | ------- 32 | compute : 33 | Compute PPC, averaged over epochs. 34 | 35 | copy : 36 | Return a copy of the object. 37 | 38 | Attributes 39 | ---------- 40 | results : ~pybispectra.utils.ResultsCFC 41 | PPC results. 42 | 43 | data : ~numpy.ndarray of float, shape of [epochs, channels, frequencies] 44 | Fourier coefficients. 45 | 46 | freqs : ~numpy.ndarray of float, shape of [frequencies] 47 | Frequencies (in Hz) in ``data``. 48 | 49 | sampling_freq : int | float 50 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 51 | 52 | verbose : bool 53 | Whether or not to report the progress of the processing. 54 | """ 55 | 56 | _ppc = None 57 | 58 | def compute( 59 | self, 60 | indices: tuple[tuple[int]] | None = None, 61 | f1s: tuple[int | float] | None = None, 62 | f2s: tuple[int | float] | None = None, 63 | n_jobs: int = 1, 64 | ) -> None: 65 | r"""Compute PPC, averaged over epochs. 66 | 67 | Parameters 68 | ---------- 69 | indices : tuple of tuple of int, length of 2 | None (default None) 70 | Indices of the seed and target channels, respectively, to compute PPC 71 | between. If :obj:`None`, coupling between all channels is computed. 72 | 73 | f1s : tuple of int or float, length of 2 | None (default None) 74 | Start and end lower frequencies to compute PPC on, respectively. If 75 | :obj:`None`, all frequencies are used. 76 | 77 | f2s : tuple of int or float, length of 2 | None (default None) 78 | Start and end higher frequencies to compute PPC on, respectively. 79 | If :obj:`None`, all frequencies are used. 80 | 81 | n_jobs : int (default ``1``) 82 | Number of jobs to run in parallel. If ``-1``, all available CPUs are used. 83 | 84 | Notes 85 | ----- 86 | PPC is computed as coherence between frequencies :footcite:`Giehl2021` 87 | 88 | :math:`\textrm{PPC}(\textbf{x}_{f_1},\textbf{y}_{f_2})=\Large \frac{|\langle 89 | \textbf{a}_x(f_1)\textbf{a}_y(f_2) e^{i(\boldsymbol{\varphi}_x(f_1)\frac{f_2} 90 | {f_1}-\boldsymbol{\varphi}_y(f_2))} \rangle|}{\langle\textbf{a}_x(f_1) 91 | \textbf{a}_y(f_2) \rangle}` , 92 | 93 | where :math:`\textbf{a}(f)` and :math:`\boldsymbol{\varphi}(f)` are the 94 | amplitude and phase of a signal at a given frequency, respectively; :math:`f_1` 95 | and :math:`f_2` correspond to a lower and higher frequency, respectively; and 96 | :math:`<>` represents the average value over epochs. 97 | 98 | PPC is computed between all values of ``f1s`` and ``f2s``. If any value of 99 | ``f1s`` is higher than ``f2s``, a :obj:`numpy.nan` value is returned. 100 | 101 | References 102 | ---------- 103 | .. footbibliography:: 104 | """ 105 | self._reset_attrs() 106 | 107 | self._sort_indices(indices) 108 | self._sort_freqs(f1s, f2s) 109 | self._sort_parallelisation(n_jobs) 110 | 111 | if self.verbose: 112 | print("Computing PPC...") 113 | 114 | self._compute_ppc() 115 | self._store_results() 116 | 117 | if self.verbose: 118 | print(" ... PPC computation finished\n") 119 | 120 | def _reset_attrs(self) -> None: 121 | """Reset attrs. of the object to prevent interference.""" 122 | super()._reset_attrs() 123 | self._ppc = None 124 | 125 | def _compute_ppc(self) -> None: 126 | """Compute PPC between f1s of seeds and f2s of targets.""" 127 | loop_kwargs = [ 128 | {"data": self.data[:, (seed, target)]} 129 | for seed, target in zip(self._seeds, self._targets) 130 | ] 131 | static_kwargs = { 132 | "freqs": self.freqs, 133 | "f1s": self._f1s, 134 | "f2s": self._f2s, 135 | "precision": _precision.real, 136 | } 137 | try: 138 | self._ppc = _compute_in_parallel( 139 | func=_compute_ppc, 140 | loop_kwargs=loop_kwargs, 141 | static_kwargs=static_kwargs, 142 | output=np.zeros( 143 | (self._n_cons, self._f1s.size, self._f2s.size), 144 | dtype=_precision.real, 145 | ), 146 | message="Processing connections...", 147 | n_jobs=self._n_jobs, 148 | verbose=self.verbose, 149 | prefer="processes", 150 | ) 151 | except MemoryError as error: # pragma: no cover 152 | raise MemoryError( 153 | "Memory allocation for the PPC computation failed. Try reducing the " 154 | "sampling frequency of the data, or reduce the precision of the " 155 | "computation with `pybispectra.set_precision('single')`." 156 | ) from error 157 | 158 | def _store_results(self) -> None: 159 | """Store computed results in an object.""" 160 | self._results = ResultsCFC( 161 | self._ppc, self._indices, self._f1s, self._f2s, "PPC" 162 | ) 163 | 164 | @property 165 | def results(self) -> ResultsCFC: 166 | return self._results 167 | 168 | 169 | @njit 170 | def _compute_ppc( 171 | data: np.ndarray, 172 | freqs: np.ndarray, 173 | f1s: np.ndarray, 174 | f2s: np.ndarray, 175 | precision: type, 176 | ) -> np.ndarray: # pragma: no cover 177 | """Compute PPC for a single connection across epochs. 178 | 179 | Parameters 180 | ---------- 181 | data : numpy.ndarray, shape of [epochs, 2, frequencies] 182 | FFT coefficients where the second dimension contains the data for the seed and 183 | target channel of a single connection, respectively. 184 | 185 | freqs : numpy.ndarray, shape of [frequencies] 186 | Frequencies in ``data``. 187 | 188 | f1s : numpy.ndarray, shape of [low frequencies] 189 | Low frequencies to compute coupling for. 190 | 191 | f2s : numpy.ndarray, shape of [high frequencies] 192 | High frequencies to compute coupling for. 193 | 194 | precision : type 195 | Precision to use for the computation. Either ``numpy.float32`` (single) or 196 | ``numpy.float64`` (double). 197 | 198 | Returns 199 | ------- 200 | results : numpy.ndarray, shape of [low frequencies, high frequencies] 201 | PPC for a single connection. 202 | """ 203 | results = np.full((f1s.shape[0], f2s.shape[0]), fill_value=np.nan, dtype=precision) 204 | f1_start = _fast_find_first(freqs, f1s[0], 0) 205 | f1_end = _fast_find_first(freqs, f1s[-1], f1_start) 206 | f2_start = _fast_find_first(freqs, f2s[0], 0) 207 | f2_end = _fast_find_first(freqs, f2s[-1], f2_start) 208 | for f1_ri, f1_fi in enumerate(range(f1_start, f1_end + 1)): 209 | f1 = freqs[f1_fi] 210 | for f2_ri, f2_fi in enumerate(range(f2_start, f2_end + 1)): 211 | f2 = freqs[f2_fi] 212 | if f1 < f2 and f1 > 0: 213 | fft_f1 = data[:, 0, f1_fi] 214 | fft_f2 = data[:, 1, f2_fi] 215 | numerator = np.abs( 216 | ( 217 | np.abs(fft_f1) 218 | * np.abs(fft_f2) 219 | * np.exp( 220 | 1j 221 | * ( 222 | np.angle(fft_f1, True) * (f2 / f1) 223 | - np.angle(fft_f2, True) 224 | ) 225 | ) 226 | ).mean() 227 | ) 228 | denominator = np.mean((np.abs(fft_f1) * np.abs(fft_f2))) 229 | results[f1_ri, f2_ri] = numerator / denominator 230 | 231 | return results 232 | -------------------------------------------------------------------------------- /src/pybispectra/data/__init__.py: -------------------------------------------------------------------------------- 1 | """Storage and tools for handling data files.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .example_data import DATASETS, get_example_data_paths 6 | -------------------------------------------------------------------------------- /src/pybispectra/data/example_data.py: -------------------------------------------------------------------------------- 1 | """Tools for fetching example data.""" 2 | 3 | import os 4 | from pathlib import Path 5 | 6 | DATASETS = { 7 | # AAC 8 | "sim_data_aac": "sim_data_aac.npy", 9 | # PAC 10 | "sim_data_pac_univariate": "sim_data_pac_univariate.npy", 11 | "sim_data_pac_bivariate": "sim_data_pac_bivariate.npy", 12 | # PPC 13 | "sim_data_ppc": "sim_data_aac.npy", 14 | # TDE 15 | "sim_data_tde_independent_noise": "sim_data_tde_independent_noise.npy", 16 | "sim_data_tde_correlated_noise": "sim_data_tde_correlated_noise.npy", 17 | "sim_data_tde_fbands": "sim_data_tde_fbands.npy", 18 | # Waveshape 19 | "sim_data_waveshape_peaks_troughs": "sim_data_waveshape_peaks_troughs.npy", 20 | "sim_data_waveshape_sawtooths": "sim_data_waveshape_sawtooths.npy", 21 | "sim_data_waveshape_noisy": "sim_data_waveshape_noisy.npy", 22 | } 23 | 24 | 25 | def get_example_data_paths(name: str) -> str: 26 | """Return the path to the requested example data. 27 | 28 | Parameters 29 | ---------- 30 | name : str 31 | Name of the example data. 32 | 33 | Returns 34 | ------- 35 | path : str 36 | Path to the example data. 37 | """ 38 | if name not in DATASETS.keys(): 39 | raise ValueError(f"`name` must be one of: {list(DATASETS.keys())}") 40 | 41 | filepath_upper = Path(os.path.abspath(__file__)).parent 42 | return os.path.join(filepath_upper, "example_data", DATASETS[name]) 43 | -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_aac.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_aac.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_pac_bivariate.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_pac_bivariate.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_pac_univariate.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_pac_univariate.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_ppc.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_ppc.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_tde_correlated_noise.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_tde_correlated_noise.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_tde_fbands.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_tde_fbands.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_tde_independent_noise.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_tde_independent_noise.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_waveshape_noisy.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_waveshape_noisy.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_waveshape_peaks_troughs.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_waveshape_peaks_troughs.npy -------------------------------------------------------------------------------- /src/pybispectra/data/example_data/sim_data_waveshape_sawtooths.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/braindatalab/PyBispectra/1944a74ad24fd1aef82fc0e94752f6f1150f1a9a/src/pybispectra/data/example_data/sim_data_waveshape_sawtooths.npy -------------------------------------------------------------------------------- /src/pybispectra/general/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for handling general bispectrum and threenorm computations.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .general import Bispectrum, Threenorm 6 | -------------------------------------------------------------------------------- /src/pybispectra/general/general.py: -------------------------------------------------------------------------------- 1 | """Tools for handling general bispectrum and threenorm computations.""" 2 | 3 | import numpy as np 4 | 5 | from pybispectra.utils import ResultsGeneral 6 | from pybispectra.utils._defaults import _precision 7 | from pybispectra.utils._process import ( 8 | _compute_bispectrum, 9 | _compute_threenorm, 10 | _ProcessBispectrum, 11 | ) 12 | from pybispectra.utils._utils import _compute_in_parallel 13 | 14 | 15 | class _General(_ProcessBispectrum): 16 | """Base class for processing the bispectrum and threenorm.""" 17 | 18 | _k: tuple[int] = None 19 | _m: tuple[int] = None 20 | _n: tuple[int] = None 21 | 22 | def _sort_indices(self, indices: tuple[tuple[int]] | None) -> None: 23 | """Sort kmn channel indices inputs.""" 24 | if indices is None: 25 | indices = tuple( 26 | [ 27 | tuple(np.tile(range(self._n_chans), self._n_chans**2).tolist()), 28 | tuple( 29 | np.repeat( 30 | np.tile(range(self._n_chans), self._n_chans), self._n_chans 31 | ).tolist() 32 | ), 33 | tuple(np.repeat(range(self._n_chans), self._n_chans**2).tolist()), 34 | ] 35 | ) 36 | if not isinstance(indices, tuple): 37 | raise TypeError("`indices` must be a tuple.") 38 | if len(indices) != 3: 39 | raise ValueError("`indices` must have length of 3.") 40 | self._indices = indices 41 | 42 | for group_idcs in indices: 43 | if not isinstance(group_idcs, tuple): 44 | raise TypeError("Entries of `indices` must be tuples.") 45 | if any(not isinstance(idx, int) for idx in group_idcs): 46 | raise TypeError("Entries for groups in `indices` must be ints.") 47 | if any(idx < 0 or idx >= self._n_chans for idx in group_idcs): 48 | raise ValueError( 49 | "`indices` contains indices for channels not present in the data." 50 | ) 51 | if len(np.unique([len(group) for group in indices])) != 1: 52 | raise ValueError("Entries of `indices` must have equal length.") 53 | 54 | self._k = self._indices[0] 55 | self._m = self._indices[1] 56 | self._n = self._indices[2] 57 | self._n_cons = len(self._indices[0]) 58 | 59 | 60 | class Bispectrum(_General): 61 | """Class for computing the bispectrum. 62 | 63 | Parameters 64 | ---------- 65 | data : ~numpy.ndarray, shape of [epochs, channels, frequencies] 66 | Fourier coefficients. 67 | 68 | freqs : ~numpy.ndarray, shape of [frequencies] 69 | Frequencies (in Hz) in ``data``. Frequencies are expected to be evenly spaced. 70 | 71 | sampling_freq : int | float 72 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 73 | 74 | verbose : bool (default True) 75 | Whether or not to report the progress of the processing. 76 | 77 | Methods 78 | ------- 79 | compute : 80 | Compute the bispectrum, averaged over epochs. 81 | 82 | copy : 83 | Return a copy of the object. 84 | 85 | Attributes 86 | ---------- 87 | results : ~pybispectra.utils.ResultsGeneral 88 | Bispectrum results. 89 | 90 | data : ~numpy.ndarray of float, shape of [epochs, channels, frequencies] 91 | Fourier coefficients. 92 | 93 | freqs : ~numpy.ndarray of float, shape of [frequencies] 94 | Frequencies (in Hz) in ``data``. 95 | 96 | sampling_freq : int | float 97 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 98 | 99 | verbose : bool 100 | Whether or not to report the progress of the processing. 101 | 102 | Notes 103 | ----- 104 | 105 | .. versionadded:: 1.2 106 | """ 107 | 108 | def compute( 109 | self, 110 | indices: tuple[tuple[int]] | None = None, 111 | f1s: tuple[int | float] | None = None, 112 | f2s: tuple[int | float] | None = None, 113 | n_jobs: int = 1, 114 | ) -> None: 115 | r"""Compute the bispectrum, averaged over epochs. 116 | 117 | Parameters 118 | ---------- 119 | indices : tuple of tuple of int, length of 3 | None (default None) 120 | Indices of the channels :math:`k`, :math:`m`, and :math:`n`, respectively, 121 | to compute the bispectrum for. If :obj:`None`, the bispectrum for all 122 | channel combinations is computed. 123 | 124 | f1s : tuple of int or float, length of 2 | None (default None) 125 | Start and end lower frequencies to compute the bispectrum for, respectively. 126 | If :obj:`None`, all frequencies are used. 127 | 128 | f2s : tuple of int or float, length of 2 | None (default None) 129 | Start and end higher frequencies to compute the bispectrum for, 130 | respectively. If :obj:`None`, all frequencies are used. 131 | 132 | n_jobs : int (default ``1``) 133 | The number of jobs to run in parallel. If ``-1``, all available CPUs are 134 | used. 135 | 136 | Notes 137 | ----- 138 | The bispectrum, :math:`\textbf{B}`, has the general form 139 | 140 | :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2) 141 | \textbf{n}^*(f_2+f_1)>` , 142 | 143 | where :math:`kmn` is a combination of signals with Fourier coefficients 144 | :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; 145 | :math:`f_1` and :math:`f_2` correspond to a lower and higher frequency, 146 | respectively; and :math:`<>` represents the average value over epochs. 147 | 148 | The bispectrum is computed between all values of ``f1s`` and ``f2s``. If any 149 | value of ``f1s`` is higher than ``f2s``, a :obj:`numpy.nan` value is returned. 150 | """ 151 | self._reset_attrs() 152 | 153 | self._sort_indices(indices) 154 | self._sort_freqs(f1s, f2s) 155 | self._sort_parallelisation(n_jobs) 156 | 157 | if self.verbose: 158 | print("Computing bispectrum...\n") 159 | 160 | self._compute_bispectrum() 161 | self._store_results() 162 | 163 | if self.verbose: 164 | print(" ... Bispectrum computation finished\n") 165 | 166 | def _reset_attrs(self) -> None: 167 | """Reset attrs. of the object to prevent interference.""" 168 | super()._reset_attrs() 169 | 170 | self._bispectrum = None 171 | 172 | def _compute_bispectrum(self) -> None: 173 | """Compute bispectrum between f1s and f2s of seeds and targets.""" 174 | loop_kwargs = [ 175 | {"kmn": np.array([np.array([k, m, n])])} 176 | for (k, m, n) in zip(self._k, self._m, self._n) 177 | ] 178 | static_kwargs = { 179 | "data": self.data, 180 | "freqs": self.freqs, 181 | "f1s": self._f1s, 182 | "f2s": self._f2s, 183 | "precision": _precision.complex, 184 | } 185 | 186 | try: 187 | self._bispectrum = _compute_in_parallel( 188 | func=_compute_bispectrum, 189 | loop_kwargs=loop_kwargs, 190 | static_kwargs=static_kwargs, 191 | output=np.zeros( 192 | (self._n_cons, 1, self._f1s.size, self._f2s.size), 193 | dtype=_precision.complex, 194 | ), 195 | message="Processing combinations...", 196 | n_jobs=self._n_jobs, 197 | verbose=self.verbose, 198 | prefer="processes", 199 | ).transpose(1, 0, 2, 3) 200 | except MemoryError as error: # pragma: no cover 201 | raise MemoryError( 202 | "Memory allocation for the bispectrum computation failed. Try reducing " 203 | "the sampling frequency of the data, or reduce the precision of the " 204 | "computation with `pybispectra.set_precision('single')`." 205 | ) from error 206 | 207 | def _store_results(self) -> None: 208 | """Store computed bispectrum in an object.""" 209 | self._results = ResultsGeneral( 210 | self._bispectrum[0], 211 | self._indices, 212 | self._f1s, 213 | self._f2s, 214 | "Bispectrum", 215 | ) 216 | 217 | @property 218 | def results(self) -> ResultsGeneral: 219 | return self._results 220 | 221 | 222 | class Threenorm(_General): 223 | """Class for computing the threenorm. 224 | 225 | Parameters 226 | ---------- 227 | data : ~numpy.ndarray, shape of [epochs, channels, frequencies] 228 | Fourier coefficients. 229 | 230 | freqs : ~numpy.ndarray, shape of [frequencies] 231 | Frequencies (in Hz) in ``data``. Frequencies are expected to be evenly spaced. 232 | 233 | sampling_freq : int | float 234 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 235 | 236 | verbose : bool (default True) 237 | Whether or not to report the progress of the processing. 238 | 239 | Methods 240 | ------- 241 | compute : 242 | Compute the threenorm, averaged over epochs. 243 | 244 | copy : 245 | Return a copy of the object. 246 | 247 | Attributes 248 | ---------- 249 | results : ~pybispectra.utils.ResultsGeneral 250 | Threenorm results. 251 | 252 | data : ~numpy.ndarray of float, shape of [epochs, channels, frequencies] 253 | Fourier coefficients. 254 | 255 | freqs : ~numpy.ndarray of float, shape of [frequencies] 256 | Frequencies (in Hz) in ``data``. 257 | 258 | sampling_freq : int | float 259 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 260 | 261 | verbose : bool 262 | Whether or not to report the progress of the processing. 263 | 264 | Notes 265 | ----- 266 | 267 | .. versionadded:: 1.2 268 | """ 269 | 270 | def compute( 271 | self, 272 | indices: tuple[tuple[int]] | None = None, 273 | f1s: tuple[int | float] | None = None, 274 | f2s: tuple[int | float] | None = None, 275 | n_jobs: int = 1, 276 | ) -> None: 277 | r"""Compute the threenorm, averaged over epochs. 278 | 279 | Parameters 280 | ---------- 281 | indices : tuple of tuple of int, length of 3 | None (default None) 282 | Indices of the channels :math:`k`, :math:`m`, and :math:`n`, respectively, 283 | to compute the threenorm for. If :obj:`None`, the threenorm for all channel 284 | combinations is computed. 285 | 286 | f1s : tuple of int or float, length of 2 | None (default None) 287 | Start and end lower frequencies to compute the threenorm for, respectively. 288 | If :obj:`None`, all frequencies are used. 289 | 290 | f2s : tuple of int or float, length of 2 | None (default None) 291 | Start and end higher frequencies to compute the threenorm for, respectively. 292 | If :obj:`None`, all frequencies are used. 293 | 294 | n_jobs : int (default ``1``) 295 | The number of jobs to run in parallel. If ``-1``, all available CPUs are 296 | used. 297 | 298 | Notes 299 | ----- 300 | The threenorm, :math:`\textbf{N}`, :footcite:`Shahbazi2014` has the 301 | general form 302 | 303 | :math:`\textbf{N}_{kmn}(f_1,f_2)=(<|\textbf{k}(f_1)|^3><|\textbf{m} (f_2)|^3> 304 | <|\textbf{n}(f_2+f_1)|^3>)^{\frac{1}{3}}` , 305 | 306 | where :math:`kmn` is a combination of signals with Fourier coefficients 307 | :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; 308 | :math:`f_1` and :math:`f_2` correspond to a lower and higher frequency, 309 | respectively; and :math:`<>` represents the average value over epochs. 310 | 311 | The threenorm is computed between all values of ``f1s`` and ``f2s``. If any 312 | value of ``f1s`` is higher than ``f2s``, a :obj:`numpy.nan` value is returned. 313 | 314 | References 315 | ---------- 316 | .. footbibliography:: 317 | """ 318 | self._reset_attrs() 319 | 320 | self._sort_indices(indices) 321 | self._sort_freqs(f1s, f2s) 322 | self._sort_parallelisation(n_jobs) 323 | 324 | if self.verbose: 325 | print("Computing threenorm...\n") 326 | 327 | self._compute_threenorm() 328 | self._store_results() 329 | 330 | if self.verbose: 331 | print(" ... Threenorm computation finished\n") 332 | 333 | def _reset_attrs(self) -> None: 334 | """Reset attrs. of the object to prevent interference.""" 335 | super()._reset_attrs() 336 | 337 | self._threenorm = None 338 | 339 | def _compute_threenorm(self) -> None: 340 | """Compute threenorm between f1s and f2s of seeds and targets.""" 341 | loop_kwargs = [ 342 | {"kmn": np.array([np.array([k, m, n])])} 343 | for (k, m, n) in zip(self._k, self._m, self._n) 344 | ] 345 | static_kwargs = { 346 | "data": self.data, 347 | "freqs": self.freqs, 348 | "f1s": self._f1s, 349 | "f2s": self._f2s, 350 | "precision": _precision.real, 351 | } 352 | 353 | try: 354 | self._threenorm = _compute_in_parallel( 355 | func=_compute_threenorm, 356 | loop_kwargs=loop_kwargs, 357 | static_kwargs=static_kwargs, 358 | output=np.zeros( 359 | (self._n_cons, 1, self._f1s.size, self._f2s.size), 360 | dtype=_precision.real, 361 | ), 362 | message="Processing combinations...", 363 | n_jobs=self._n_jobs, 364 | verbose=self.verbose, 365 | prefer="processes", 366 | ).transpose(1, 0, 2, 3) 367 | except MemoryError as error: # pragma: no cover 368 | raise MemoryError( 369 | "Memory allocation for the threenorm computation failed. Try reducing " 370 | "the sampling frequency of the data, or reduce the precision of the " 371 | "computation with `pybispectra.set_precision('single')`." 372 | ) from error 373 | 374 | def _store_results(self) -> None: 375 | """Store computed threenorm in an object.""" 376 | self._results = ResultsGeneral( 377 | self._threenorm[0], 378 | self._indices, 379 | self._f1s, 380 | self._f2s, 381 | "Threenorm", 382 | ) 383 | 384 | @property 385 | def results(self) -> ResultsGeneral: 386 | return self._results 387 | -------------------------------------------------------------------------------- /src/pybispectra/tde/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for handling time delay estimation analysis.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .tde import TDE 6 | -------------------------------------------------------------------------------- /src/pybispectra/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Helper tools for processing and storing results.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .ged import SpatioSpectralFilter 6 | from .results import ResultsCFC, ResultsGeneral, ResultsTDE, ResultsWaveShape 7 | from .utils import compute_fft, compute_rank, compute_tfr, set_precision 8 | -------------------------------------------------------------------------------- /src/pybispectra/utils/_defaults.py: -------------------------------------------------------------------------------- 1 | """Default values for the PyBispectra package.""" 2 | 3 | import numpy as np 4 | 5 | 6 | class _Precision: 7 | """Class specifying precision of data/results. 8 | 9 | Double precision (i.e. float64 and complex128) used by default. 10 | """ 11 | 12 | def __init__(self) -> None: # noqa: D107 13 | self.type = "double" 14 | self.real = np.float64 15 | self.complex = np.complex128 16 | 17 | def set_precision(self, precision: str) -> None: 18 | """Set precision of data/results. 19 | 20 | Parameters 21 | ---------- 22 | precision : str 23 | Precision of data/results. Must be one of "single" or "double". 24 | """ 25 | if precision not in ["single", "double"]: 26 | raise ValueError("`precision` must be either 'single' or 'double'.") 27 | 28 | if precision == "single": 29 | self.type = "single" 30 | self.real = np.float32 31 | self.complex = np.complex64 32 | else: 33 | self.type = "double" 34 | self.real = np.float64 35 | self.complex = np.complex128 36 | 37 | 38 | _precision = _Precision() 39 | -------------------------------------------------------------------------------- /src/pybispectra/utils/_docs.py: -------------------------------------------------------------------------------- 1 | """Documentation-related helper functions.""" 2 | 3 | import inspect 4 | import os 5 | import sys 6 | 7 | import pybispectra 8 | 9 | 10 | def linkcode_resolve(domain: str, info: dict): 11 | """Determine the URL corresponding to a Python object. 12 | 13 | Parameters 14 | ---------- 15 | domain : str 16 | Only useful when 'py'. 17 | info : dict 18 | With keys "module" and "fullname". 19 | 20 | Returns 21 | ------- 22 | url : str 23 | The code URL. 24 | 25 | Notes 26 | ----- 27 | Shamelessly stolen from MNE-Python. 28 | """ 29 | if domain != "py": 30 | return None 31 | 32 | modname = info["module"] 33 | fullname = info["fullname"] 34 | 35 | submod = sys.modules.get(modname) 36 | if submod is None: 37 | return None 38 | 39 | obj = submod 40 | for part in fullname.split("."): 41 | try: 42 | obj = getattr(obj, part) 43 | except Exception: 44 | return None 45 | # deal with our decorators properly 46 | while hasattr(obj, "__wrapped__"): 47 | obj = obj.__wrapped__ 48 | 49 | try: 50 | fn = inspect.getsourcefile(obj) 51 | except Exception: 52 | fn = None 53 | if not fn: 54 | try: 55 | fn = inspect.getsourcefile(sys.modules[obj.__module__]) 56 | except Exception: 57 | fn = None 58 | if not fn: 59 | return None 60 | fn = os.path.relpath(fn, start=os.path.dirname(pybispectra.__file__)) 61 | fn = "/".join(os.path.normpath(fn).split(os.sep)) # in case on Windows 62 | 63 | try: 64 | source, lineno = inspect.getsourcelines(obj) 65 | except Exception: 66 | lineno = None 67 | 68 | if lineno: 69 | linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) 70 | else: 71 | linespec = "" 72 | 73 | if "dev" in pybispectra.__version__: 74 | kind = "main" 75 | else: 76 | kind = ".".join(pybispectra.__version__.split(".")) 77 | return ( 78 | f"http://github.com/braindatalab/pybispectra/tree/{kind}/src/" 79 | f"pybispectra/{fn}{linespec}" 80 | ) 81 | -------------------------------------------------------------------------------- /src/pybispectra/utils/_utils.py: -------------------------------------------------------------------------------- 1 | """Private helper tools for processing results.""" 2 | 3 | import numpy as np 4 | from mne import Info, create_info 5 | from mne.parallel import parallel_func 6 | from mne.utils import ProgressBar, set_log_level 7 | from numba import njit 8 | 9 | from pybispectra.utils._defaults import _precision 10 | 11 | 12 | def _compute_in_parallel( 13 | func: callable, 14 | loop_kwargs: list[dict], 15 | static_kwargs: dict, 16 | output: np.ndarray, 17 | message: str, 18 | n_jobs: int, 19 | verbose: bool, 20 | prefer: str = "processes", 21 | ) -> np.ndarray: 22 | """Parallelise a function with a progress bar. 23 | 24 | Parameters 25 | ---------- 26 | func : callable 27 | Function to parallelise. 28 | 29 | loop_kwargs : list of dict 30 | List of keyword arguments to pass to the function that change for each iteration 31 | of the parallelisation. 32 | 33 | static_kwargs : dict 34 | Dictionary of keyword arguments to pass to the function that do not change 35 | across iterations. 36 | 37 | output : numpy.ndarray 38 | Array to store the output of the computation. Values for each iteration of the 39 | parallelisation are stored in the first dimension, which must be at least as 40 | large as the length of the values in ``loop_kwargs``. 41 | 42 | message : str 43 | Message to display in the progress bar. 44 | 45 | n_jobs : int 46 | Number of jobs to run in parallel. 47 | 48 | verbose : bool 49 | Whether or not to report the progress of the processing. 50 | 51 | prefer : str (default "processes") 52 | Whether to use "threads" or "processes" for parallelisation. 53 | 54 | Returns 55 | ------- 56 | output : numpy.ndarray 57 | Array with the output of the computation. 58 | 59 | Notes 60 | ----- 61 | Relies on the MNE progress bar and parallel implementations. Does not perform checks 62 | on inputs for speed. 63 | """ 64 | n_steps = len(loop_kwargs) 65 | n_blocks = int(np.ceil(n_steps / n_jobs)) 66 | parallel, my_parallel_func, _ = parallel_func( 67 | func, n_jobs, prefer=prefer, verbose=verbose 68 | ) 69 | old_log_level = set_log_level( 70 | verbose="INFO" if verbose else "WARNING", return_old_level=True 71 | ) # need to set log level that is passed to tqdm 72 | for block_i in ProgressBar(range(n_blocks), mesg=message): 73 | idcs = _get_block_indices(block_i, n_steps, n_jobs) 74 | output[idcs] = parallel( 75 | my_parallel_func(**loop_kwargs[idx], **static_kwargs) for idx in idcs 76 | ) 77 | set_log_level(verbose=old_log_level) # reset log level 78 | 79 | return output 80 | 81 | 82 | def _get_block_indices(block_i: int, limit: int, n_jobs: int) -> np.ndarray: 83 | """Get the indices for a block of parallel computation, capped by a limit. 84 | 85 | Parameters 86 | ---------- 87 | block_i : int 88 | Index of the block to get indices for. 89 | 90 | limit : int 91 | Maximum index to return. 92 | 93 | n_jobs : int 94 | Number of jobs to run in parallel. 95 | 96 | Returns 97 | ------- 98 | indices : numpy.ndarray of int 99 | Indices for the block of parallel computation. 100 | """ 101 | return np.arange(block_i * n_jobs, np.min([(block_i + 1) * n_jobs, limit])) 102 | 103 | 104 | @njit 105 | def _fast_find_first( 106 | vector: np.ndarray, value: int | float, start_idx: int = 0 107 | ) -> int: # pragma: no cover 108 | """Quickly find the first index of a value in a 1D array using Numba. 109 | 110 | Parameters 111 | ---------- 112 | vector : numpy.ndarray of int or float 113 | 1D array to find ``value`` in. 114 | 115 | value : int | float 116 | Value to find in ``vector``. 117 | 118 | start_idx : int (default 0) 119 | Index to start searching for ``value`` in ``vector``. 120 | 121 | Returns 122 | ------- 123 | index : int 124 | First index of ``value`` in ``vector``. 125 | 126 | Notes 127 | ----- 128 | Does not perform checks on inputs for speed. 129 | """ 130 | for idx, val in enumerate(vector[start_idx:]): 131 | if val == value: 132 | return idx + start_idx 133 | raise ValueError("`value` is not present in `vector`.") 134 | 135 | 136 | @njit 137 | def _compute_pearsonr_2d( 138 | x: np.ndarray, y: np.ndarray, precision: type 139 | ) -> np.ndarray: # pragma: no cover 140 | """Compute Pearson correlation for epochs over time. 141 | 142 | Parameters 143 | ---------- 144 | x : numpy.ndarray, shape of [epochs, times] 145 | Array of time-series values to compute correlation of with ``y``. 146 | 147 | y : numpy.ndarray, shape of [epochs, times] 148 | Array of time-series values to compute correlation of with ``x``. 149 | 150 | precision : type 151 | Precision to use for the computation. Either ``numpy.float32`` (single) or 152 | ``numpy.float64`` (double). 153 | 154 | Returns 155 | ------- 156 | pearsonr : numpy.ndarray, shape of [epochs] 157 | Correlation coefficient between ``x`` and ``y`` over time for each epoch. 158 | 159 | Notes 160 | ----- 161 | Does not perform checks on inputs for speed. 162 | """ 163 | x_minus_mean = np.full(x.shape, fill_value=np.nan, dtype=precision) 164 | y_minus_mean = np.full(y.shape, fill_value=np.nan, dtype=precision) 165 | for idx in range(x.shape[0]): # same as y.shape[0] 166 | x_minus_mean[idx] = x[idx] - np.mean(x[idx]) 167 | y_minus_mean[idx] = y[idx] - np.mean(y[idx]) 168 | 169 | numerator = np.sum(np.multiply(x_minus_mean, y_minus_mean), axis=-1) 170 | denominator = np.sqrt( 171 | np.multiply( 172 | np.sum(np.square(x_minus_mean), axis=-1), 173 | np.sum(np.square(y_minus_mean), axis=-1), 174 | ) 175 | ) 176 | 177 | return np.divide(numerator, denominator).astype(precision) 178 | 179 | 180 | def _create_mne_info(n_chans: int, sampling_freq: float) -> Info: 181 | """Create an MNE Info object. 182 | 183 | Parameters 184 | ---------- 185 | n_chans : int 186 | Number of channels in the data to create names and types for. 187 | 188 | sampling_freq : float 189 | Sampling frequency of the data (in Hz). 190 | 191 | Returns 192 | ------- 193 | info : mne.Info 194 | MNE Info object. 195 | 196 | Notes 197 | ----- 198 | Names are set as ``[str(i) for i in range(n_chans)]``, and channel types are all set 199 | to EEG (any MNE *data* type could be used; note that not all MNE channel types are 200 | recognised as data types). 201 | """ 202 | ch_names = [str(i) for i in range(n_chans)] 203 | ch_types = ["eeg" for _ in range(n_chans)] # must be an MNE data ch. type 204 | 205 | return create_info(ch_names, sampling_freq, ch_types, verbose=False) 206 | 207 | 208 | def _generate_data( 209 | n_epochs: int, n_chans: int, n_times: int, seed: int = 44 210 | ) -> np.ndarray: 211 | """Generate random data of the specified shape.""" 212 | random = np.random.RandomState(seed) 213 | return random.rand(n_epochs, n_chans, n_times).astype(_precision.real) 214 | -------------------------------------------------------------------------------- /src/pybispectra/waveshape/__init__.py: -------------------------------------------------------------------------------- 1 | """Tools for handling waveshape analysis.""" 2 | 3 | __version__ = "1.2.1dev" 4 | 5 | from .waveshape import WaveShape 6 | -------------------------------------------------------------------------------- /src/pybispectra/waveshape/waveshape.py: -------------------------------------------------------------------------------- 1 | """Tools for handling waveshape analysis.""" 2 | 3 | import numpy as np 4 | 5 | from pybispectra.utils._defaults import _precision 6 | from pybispectra.utils._process import ( 7 | _compute_bispectrum, 8 | _compute_threenorm, 9 | _ProcessBispectrum, 10 | ) 11 | from pybispectra.utils.results import ResultsWaveShape 12 | from pybispectra.utils._utils import _compute_in_parallel 13 | 14 | np.seterr(divide="ignore", invalid="ignore") # no warning for NaN division 15 | 16 | 17 | class WaveShape(_ProcessBispectrum): 18 | """Class for computing waveshape properties using bicoherence. 19 | 20 | Parameters 21 | ---------- 22 | data : ~numpy.ndarray, shape of [epochs, channels, frequencies] 23 | Fourier coefficients. 24 | 25 | freqs : ~numpy.ndarray, shape of [frequencies] 26 | Frequencies (in Hz) in ``data``. Frequencies are expected to be evenly spaced. 27 | 28 | sampling_freq : int | float 29 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 30 | 31 | verbose : bool (default True) 32 | Whether or not to report the progress of the processing. 33 | 34 | Methods 35 | ------- 36 | compute : 37 | Compute bicoherence within channels, averaged over epochs. 38 | 39 | copy : 40 | Return a copy of the object. 41 | 42 | Attributes 43 | ---------- 44 | results : ~pybispectra.utils.ResultsWaveShape 45 | Waveshape results. 46 | 47 | data : ~numpy.ndarray, shape of [epochs, channels, frequencies] 48 | Fourier coefficients. 49 | 50 | freqs : ~numpy.ndarray, shape of [frequencies] 51 | Frequencies (in Hz) in ``data``. 52 | 53 | sampling_freq : int | float 54 | Sampling frequency (in Hz) of the data from which ``data`` was derived. 55 | 56 | verbose : bool 57 | Whether or not to report the progress of the processing. 58 | 59 | Notes 60 | ----- 61 | It is recommended that spatio-spectral filtering for a given frequency band of 62 | interest has been performed before analysing waveshape properties 63 | :footcite:`Bartz2019`. This can enhance the signal-to-noise ratio of your data as 64 | well as mitigate the risks of source-mixing in the sensor space compromising the 65 | bicoherence patterns of the data :footcite:`Bartz2019`. Filtering can be performed 66 | with :class:`pybispectra.utils.SpatioSpectralFilter`. 67 | 68 | References 69 | ---------- 70 | .. footbibliography:: 71 | """ 72 | 73 | def compute( 74 | self, 75 | indices: tuple[int] | None = None, 76 | f1s: tuple[int | float] | None = None, 77 | f2s: tuple[int | float] | None = None, 78 | n_jobs: int = 1, 79 | ) -> None: 80 | r"""Compute bicoherence within channels, averaged over epochs. 81 | 82 | Parameters 83 | ---------- 84 | indices : tuple of int | None (default None) 85 | Indices of the channels to compute bicoherence within. If :obj:`None`, 86 | bicoherence within all channels is computed. 87 | 88 | f1s : tuple of int or float, length of 2 | None (default None) 89 | Start and end lower frequencies to compute bicoherence for, respectively. If 90 | :obj:`None`, all frequencies are used. 91 | 92 | f2s : tuple of int or float, length of 2 | None (default None) 93 | Start and end higher frequencies to compute bicoherence for, respectively. 94 | If :obj:`None`, all frequencies are used. 95 | 96 | n_jobs : int (default ``1``) 97 | The number of jobs to run in parallel. If ``-1``, all available CPUs are 98 | used. 99 | 100 | Notes 101 | ----- 102 | Non-sinudoisal waveshape features can be extracted using bispectrum-based 103 | methods. The bispectrum has the general form 104 | 105 | :math:`\textbf{B}_{kmn}(f_1,f_2)=<\textbf{k}(f_1)\textbf{m}(f_2) 106 | \textbf{n}^*(f_2+f_1)>` , 107 | 108 | where :math:`kmn` is a combination of signals with Fourier coefficients 109 | :math:`\textbf{k}`, :math:`\textbf{m}`, and :math:`\textbf{n}`, respectively; 110 | :math:`f_1` and :math:`f_2` correspond to a lower and higher frequency, 111 | respectively; and :math:`<>` represents the average value over epochs. When 112 | analysing waveshape, we are interested in only a single signal, and as such 113 | :math:`k=m=n`. 114 | 115 | Furthermore, we can normalise the bispectrum to the bicoherence, 116 | :math:`\boldsymbol{\mathcal{B}}`, using the threenorm, :math:`\textbf{N}`, 117 | :footcite:`Shahbazi2014` 118 | 119 | :math:`\textbf{N}_{xxx}(f_1,f_2)=(<|\textbf{x}(f_1)|^3><|\textbf{x} (f_2)|^3> 120 | <|\textbf{x}(f_2+f_1)|^3>)^{\frac{1}{3}}` , 121 | 122 | :math:`\boldsymbol{\mathcal{B}}_{xxx}(f_1,f_2)=\Large\frac{\textbf{B}_{xxx} 123 | (f_1,f_2)}{\textbf{N}_{xxx}(f_1,f_2)}` , 124 | 125 | where the resulting values lie in the range :math:`[-1, 1]`. 126 | 127 | Bicoherence is computed for all values of ``f1s`` and ``f2s``. If any value of 128 | ``f1s`` is higher than ``f2s``, a :obj:`numpy.nan` value is returned. 129 | 130 | References 131 | ---------- 132 | .. footbibliography:: 133 | """ 134 | self._reset_attrs() 135 | 136 | self._sort_indices(indices) 137 | self._sort_freqs(f1s, f2s) 138 | self._sort_parallelisation(n_jobs) 139 | 140 | if self.verbose: 141 | print("Computing bicoherence...\n") 142 | 143 | self._compute_bispectrum() 144 | self._compute_threenorm() 145 | self._bicoherence = self._bispectrum / self._threenorm 146 | self._store_results() 147 | 148 | if self.verbose: 149 | print(" ... Bicoherence computation finished\n") 150 | 151 | def _reset_attrs(self) -> None: 152 | """Reset attrs. of the object to prevent interference.""" 153 | super()._reset_attrs() 154 | 155 | self._bispectrum = None 156 | self._threenorm = None 157 | self._bicoherence = None 158 | 159 | def _sort_indices(self, indices: tuple[int]) -> None: 160 | """Sort channel indices inputs.""" 161 | if indices is None: 162 | indices = tuple(range(self._n_chans)) 163 | if not isinstance(indices, tuple): 164 | raise TypeError("`indices` must be a tuple.") 165 | if any(not isinstance(idx, int) for idx in indices): 166 | raise TypeError("Entries of `indices` must be ints.") 167 | 168 | if any(idx < 0 or idx >= self._n_chans for idx in indices): 169 | raise ValueError( 170 | "`indices` contains indices for channels not present in the data." 171 | ) 172 | 173 | self._n_cons = len(indices) 174 | self._indices = indices 175 | 176 | def _compute_bispectrum(self) -> None: 177 | """Compute bispectrum between f1s and f2s within channels.""" 178 | if self.verbose: 179 | print(" Computing bispectrum...") 180 | 181 | loop_kwargs = [{"data": self.data[:, [channel]]} for channel in self._indices] 182 | static_kwargs = { 183 | "freqs": self.freqs, 184 | "f1s": self._f1s, 185 | "f2s": self._f2s, 186 | "kmn": np.array([np.array([0, 0, 0])]), 187 | "precision": _precision.complex, 188 | } 189 | try: 190 | self._bispectrum = _compute_in_parallel( 191 | func=_compute_bispectrum, 192 | loop_kwargs=loop_kwargs, 193 | static_kwargs=static_kwargs, 194 | output=np.zeros( 195 | (self._n_cons, 1, self._f1s.size, self._f2s.size), 196 | dtype=_precision.complex, 197 | ), 198 | message="Processing channels...", 199 | n_jobs=self._n_jobs, 200 | verbose=self.verbose, 201 | prefer="processes", 202 | ).transpose(1, 0, 2, 3)[0] 203 | except MemoryError as error: # pragma: no cover 204 | raise MemoryError( 205 | "Memory allocation for the bispectrum computation failed. Try reducing " 206 | "the sampling frequency of the data, or reduce the precision of the " 207 | "computation with `pybispectra.set_precision('single')`." 208 | ) from error 209 | 210 | if self.verbose: 211 | print(" ... Bispectrum computation finished\n") 212 | 213 | def _compute_threenorm(self) -> None: 214 | """Compute threenorm between f1s and f2s within channels.""" 215 | if self.verbose: 216 | print(" Computing threenorm...") 217 | 218 | loop_kwargs = [{"data": self.data[:, [channel]]} for channel in self._indices] 219 | static_kwargs = { 220 | "freqs": self.freqs, 221 | "f1s": self._f1s, 222 | "f2s": self._f2s, 223 | "kmn": np.array([np.array([0, 0, 0])]), 224 | "precision": _precision.real, 225 | } 226 | try: 227 | self._threenorm = _compute_in_parallel( 228 | func=_compute_threenorm, 229 | loop_kwargs=loop_kwargs, 230 | static_kwargs=static_kwargs, 231 | output=np.zeros( 232 | (self._n_cons, 1, self._f1s.size, self._f2s.size), 233 | dtype=_precision.real, 234 | ), 235 | message="Processing channels...", 236 | n_jobs=self._n_jobs, 237 | verbose=self.verbose, 238 | prefer="processes", 239 | ).transpose(1, 0, 2, 3)[0] 240 | except MemoryError as error: # pragma: no cover 241 | raise MemoryError( 242 | "Memory allocation for the threenorm computation failed. Try reducing " 243 | "the sampling frequency of the data, or reduce the precision of the " 244 | "computation with `pybispectra.set_precision('single')`." 245 | ) from error 246 | 247 | if self.verbose: 248 | print(" ... Threenorm computation finished\n") 249 | 250 | def _store_results(self) -> None: 251 | """Store computed results in objects.""" 252 | self._results = ResultsWaveShape( 253 | data=self._bicoherence, 254 | indices=self._indices, 255 | f1s=self._f1s, 256 | f2s=self._f2s, 257 | name="Waveshape", 258 | ) 259 | 260 | @property 261 | def results(self) -> ResultsWaveShape: 262 | return self._results 263 | -------------------------------------------------------------------------------- /tests/test_tde.py: -------------------------------------------------------------------------------- 1 | """Tests for TDE tools.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from pybispectra.data import get_example_data_paths 7 | from pybispectra.tde import TDE 8 | from pybispectra.utils import ResultsTDE, compute_fft 9 | from pybispectra.utils._utils import _generate_data 10 | 11 | 12 | def test_error_catch() -> None: 13 | """Check that the TDE class catches errors.""" 14 | n_chans = 3 15 | n_epochs = 5 16 | n_times = 100 17 | sampling_freq = 50 18 | data = _generate_data(n_epochs, n_chans, n_times) 19 | indices = ((0, 1, 2), (0, 1, 2)) 20 | 21 | coeffs, freqs = compute_fft( 22 | data=data, 23 | sampling_freq=sampling_freq, 24 | n_points=2 * n_times + 1, 25 | window="hamming", 26 | ) 27 | 28 | # initialisation 29 | with pytest.raises(TypeError, match="`data` must be a NumPy array."): 30 | TDE(coeffs.tolist(), freqs, sampling_freq) 31 | with pytest.raises(ValueError, match="`data` must be a 3D array."): 32 | TDE(np.random.randn(2, 2), freqs, sampling_freq) 33 | 34 | with pytest.raises(TypeError, match="`freqs` must be a NumPy array."): 35 | TDE(coeffs, freqs.tolist(), sampling_freq) 36 | with pytest.raises(ValueError, match="`freqs` must be a 1D array."): 37 | TDE(coeffs, np.random.randn(2, 2), sampling_freq) 38 | with pytest.raises(ValueError, match="The first entry of `freqs` must be 0."): 39 | TDE(coeffs[..., 1:], freqs[1:], sampling_freq) 40 | 41 | with pytest.raises( 42 | ValueError, 43 | match=("`data` and `freqs` must contain the same number of frequencies."), 44 | ): 45 | TDE(coeffs, freqs[:-1], sampling_freq) 46 | 47 | with pytest.raises( 48 | ValueError, match="At least one entry of `freqs` is > the Nyquist frequency." 49 | ): 50 | bad_freqs = np.linspace(0, sampling_freq / 2 + 1, freqs.size) 51 | TDE(coeffs, bad_freqs, sampling_freq) 52 | max_freq_i = np.argwhere(freqs == np.max(freqs))[0][0] 53 | with pytest.raises( 54 | ValueError, match="Entries of `freqs` must be in ascending order." 55 | ): 56 | TDE( 57 | coeffs, 58 | np.hstack((freqs[: max_freq_i + 1][::-1], freqs[max_freq_i + 1 :])), 59 | sampling_freq, 60 | ) 61 | with pytest.raises(ValueError, match="Entries of `freqs` must be evenly spaced."): 62 | bad_freqs = freqs.copy() 63 | bad_freqs[1] *= 2 64 | TDE(coeffs, bad_freqs, sampling_freq) 65 | 66 | with pytest.raises(TypeError, match="`sampling_freq` must be an int or a float."): 67 | TDE(coeffs, freqs, None) 68 | 69 | with pytest.raises(TypeError, match="`verbose` must be a bool."): 70 | TDE(coeffs, freqs, sampling_freq, "verbose") 71 | 72 | # compute 73 | tde = TDE(coeffs, freqs, sampling_freq) 74 | 75 | with pytest.raises(TypeError, match="`antisym` must be a bool or tuple of bools."): 76 | tde.compute(antisym="true") 77 | with pytest.raises(TypeError, match="Entries of `antisym` must be bools."): 78 | tde.compute(antisym=("true",)) 79 | 80 | with pytest.raises(TypeError, match="`method` must be an int or tuple of ints."): 81 | tde.compute(method=0.5) 82 | with pytest.raises(ValueError, match="The value of `method` is not recognised."): 83 | tde.compute(method=0) 84 | with pytest.raises(ValueError, match="The value of `method` is not recognised."): 85 | tde.compute(method=(1, 5)) 86 | 87 | with pytest.raises(TypeError, match="`indices` must be a tuple."): 88 | tde.compute(indices=list(indices)) 89 | with pytest.raises(ValueError, match="`indices` must have length of 2."): 90 | tde.compute(indices=(0, 1, 2)) 91 | with pytest.raises(TypeError, match="Entries of `indices` must be tuples."): 92 | tde.compute(indices=(0, 1)) 93 | with pytest.raises( 94 | TypeError, match="Entries for seeds and targets in `indices` must be ints." 95 | ): 96 | tde.compute(indices=((0.0,), (1.0,))) 97 | with pytest.raises( 98 | ValueError, 99 | match=("`indices` contains indices for channels not present in the data."), 100 | ): 101 | tde.compute(indices=((0,), (99,))) 102 | with pytest.raises( 103 | ValueError, match="Entries of `indices` must have equal length." 104 | ): 105 | tde.compute(indices=((0,), (1, 2))) 106 | with pytest.raises( 107 | ValueError, 108 | match=( 109 | "Seeds and targets in `indices` must not be the same channel for any " 110 | "connection." 111 | ), 112 | ): 113 | tde.compute(indices=((0,), (0,))) 114 | 115 | with pytest.raises(TypeError, match="`fmin` must be an int, float, or tuple."): 116 | tde.compute(fmin="0") 117 | with pytest.raises(TypeError, match="`fmax` must be an int, float, or tuple."): 118 | tde.compute(fmax="10") 119 | with pytest.raises( 120 | ValueError, match="`fmin` and `fmax` must have the same length." 121 | ): 122 | tde.compute(fmin=0, fmax=(10, 20)) 123 | with pytest.raises(ValueError, match="Entries of `fmin` must be >= 0."): 124 | tde.compute(fmin=-1) 125 | with pytest.raises( 126 | ValueError, match="Entries of `fmax` must be <= the Nyquist frequency." 127 | ): 128 | tde.compute(fmax=sampling_freq / 2 + 1) 129 | with pytest.raises( 130 | ValueError, 131 | match=("At least one entry of `fmin` is > the corresponding entry of `fmax`."), 132 | ): 133 | tde.compute(fmin=(5, 20), fmax=(10, 15)) 134 | with pytest.raises( 135 | ValueError, 136 | match=(r"No frequencies are present in the data for the range \(0.1, 0.2\)."), 137 | ): 138 | tde.compute(fmin=0.1, fmax=0.2) 139 | 140 | with pytest.raises(TypeError, match="`n_jobs` must be an integer."): 141 | tde.compute(n_jobs=0.5) 142 | with pytest.raises(ValueError, match="`n_jobs` must be >= 1 or -1."): 143 | tde.compute(n_jobs=0) 144 | 145 | 146 | @pytest.mark.parametrize("freq_bands", [(0, np.inf), (5, 10), ((5, 15), (10, 20))]) 147 | def test_tde_runs(freq_bands: tuple) -> None: 148 | """Test that TDE runs correctly.""" 149 | n_chans = 3 150 | n_times = 100 151 | sampling_freq = 50 152 | data = _generate_data(5, n_chans, n_times) 153 | 154 | fft, freqs = compute_fft( 155 | data=data, 156 | sampling_freq=sampling_freq, 157 | n_points=2 * n_times + 1, 158 | window="hamming", 159 | ) 160 | 161 | # check it runs with correct inputs 162 | tde = TDE(data=fft, freqs=freqs, sampling_freq=sampling_freq) 163 | tde.compute( 164 | antisym=(False, True), 165 | method=(1, 2, 3, 4), 166 | fmin=freq_bands[0], 167 | fmax=freq_bands[1], 168 | ) 169 | 170 | # check the returned results have the correct shape 171 | assert ( 172 | results.shape == (n_chans * (n_chans - 1) / 2, results.times) 173 | for results in tde.results 174 | ) 175 | 176 | # check the returned results are of the correct type 177 | result_types = [ 178 | "TDE | Method I", 179 | "TDE | Method II", 180 | "TDE | Method III", 181 | "TDE | Method IV", 182 | "TDE (antisymmetrised) | Method I", 183 | "TDE (antisymmetrised) | Method II", 184 | "TDE (antisymmetrised) | Method III", 185 | "TDE (antisymmetrised) | Method IV", 186 | ] 187 | assert (results.name == result_types[i] for i, results in enumerate(tde.results)) 188 | assert (isinstance(results, ResultsTDE) for results in tde.results) 189 | 190 | for antisym_arg, symmetrise_name in zip([False, True], ["", "(antisymmetrised) "]): 191 | for method_arg, method_name in zip([1, 2, 3, 4], ["I", "II", "III", "IV"]): 192 | tde.compute(antisym=antisym_arg, method=method_arg) 193 | assert isinstance(tde.results, ResultsTDE) 194 | assert tde.results.name == f"TDE {symmetrise_name}| Method {method_name}" 195 | 196 | # test it runs with parallelisation 197 | tde.compute(n_jobs=2) 198 | tde.compute(n_jobs=-1) 199 | 200 | # test copying works 201 | tde_copy = tde.copy() 202 | attrs = tde.__dict__.keys() 203 | for attr in attrs: 204 | if not attr.startswith("_"): 205 | assert np.all(getattr(tde, attr) == getattr(tde_copy, attr)) 206 | assert tde is not tde_copy 207 | 208 | 209 | def test_tde_results(): 210 | """Test that TDE returns the correct results. 211 | 212 | Simulated data with a 250 ms delay between channels is used. In the case that noise 213 | is correlated between channels, spurious TDE will be detected at 0 ms, which should 214 | be corrected for using antisymmetrisation to reveal the true delay of 250 ms. 215 | """ 216 | tau = 250.0 # ms 217 | 218 | # test that TDE is detected at 250 ms with independent noise 219 | # load simulated data with independent noise 220 | data = np.load(get_example_data_paths("sim_data_tde_independent_noise")) 221 | sampling_freq = 200 # Hz 222 | n_times = data.shape[2] 223 | 224 | # compute FFT 225 | fft_coeffs, freqs = compute_fft( 226 | data=data, 227 | sampling_freq=sampling_freq, 228 | n_points=2 * n_times + 1, 229 | window="hamming", 230 | ) 231 | 232 | # compute TDE (seed -> target; target -> seed) 233 | tde = TDE(data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq) 234 | tde.compute(indices=((0, 1), (1, 0)), method=(1, 2, 3, 4)) 235 | 236 | # check that 250 ms delay is detected (negative for target -> seed) 237 | assert (results.tau[0] == tau for results in tde.results) 238 | assert (results.tau[1] == -tau for results in tde.results) 239 | 240 | # test that TDE is detected at 250 ms with correlated noise (with antisym.) 241 | # load simulated data with correlated noise 242 | data = np.load(get_example_data_paths("sim_data_tde_correlated_noise")) 243 | sampling_freq = 200 # Hz 244 | n_times = data.shape[2] 245 | 246 | # compute FFT 247 | fft_coeffs, freqs = compute_fft( 248 | data=data, 249 | sampling_freq=sampling_freq, 250 | n_points=2 * n_times + 1, 251 | window="hamming", 252 | ) 253 | 254 | # compute TDE 255 | tde = TDE(data=fft_coeffs, freqs=freqs, sampling_freq=sampling_freq) 256 | tde.compute(indices=((0,), (1,)), method=(1, 2, 3, 4), antisym=False) 257 | # check that 0 ms delay is dominant without antisymmetrisation 258 | assert (results.tau[0] == 0 for results in tde.results) 259 | 260 | tde.compute(indices=((0,), (1,)), method=(1, 2, 3, 4), antisym=True) 261 | # check that 250 ms delay is recovered with antisymmetrisation 262 | assert (results.tau[0] == tau for results in tde.results) 263 | -------------------------------------------------------------------------------- /tests/test_waveshape.py: -------------------------------------------------------------------------------- 1 | """Tests for wave shape tools.""" 2 | 3 | import numpy as np 4 | import pytest 5 | from numpy.random import RandomState 6 | 7 | from pybispectra.data import get_example_data_paths 8 | from pybispectra.utils import ResultsWaveShape, compute_fft 9 | from pybispectra.utils._utils import _generate_data 10 | from pybispectra.waveshape import WaveShape 11 | 12 | 13 | def test_error_catch() -> None: 14 | """Check that WaveShape class catches errors.""" 15 | n_chans = 3 16 | n_epochs = 5 17 | n_times = 100 18 | sampling_freq = 50 19 | data = _generate_data(n_epochs, n_chans, n_times) 20 | indices = (0, 1, 2) 21 | 22 | coeffs, freqs = compute_fft(data, sampling_freq) 23 | 24 | # initialisation 25 | with pytest.raises(TypeError, match="`data` must be a NumPy array."): 26 | WaveShape(coeffs.tolist(), freqs, sampling_freq) 27 | with pytest.raises(ValueError, match="`data` must be a 3D array."): 28 | WaveShape(np.random.randn(2, 2), freqs, sampling_freq) 29 | 30 | with pytest.raises(TypeError, match="`freqs` must be a NumPy array."): 31 | WaveShape(coeffs, freqs.tolist(), sampling_freq) 32 | with pytest.raises(ValueError, match="`freqs` must be a 1D array."): 33 | WaveShape(coeffs, np.random.randn(2, 2), sampling_freq) 34 | 35 | with pytest.raises( 36 | ValueError, 37 | match=("`data` and `freqs` must contain the same number of frequencies."), 38 | ): 39 | WaveShape(coeffs, freqs[:-1], sampling_freq) 40 | 41 | with pytest.raises(ValueError, match="Entries of `freqs` must be >= 0."): 42 | WaveShape(coeffs, freqs * -1, sampling_freq) 43 | with pytest.raises( 44 | ValueError, match="At least one entry of `freqs` is > the Nyquist frequency." 45 | ): 46 | bad_freqs = np.linspace(0, sampling_freq / 2 + 1, freqs.size) 47 | WaveShape(coeffs, bad_freqs, sampling_freq) 48 | with pytest.raises( 49 | ValueError, match="Entries of `freqs` must be in ascending order." 50 | ): 51 | WaveShape(coeffs, freqs[::-1], sampling_freq) 52 | with pytest.raises(ValueError, match="Entries of `freqs` must be evenly spaced."): 53 | bad_freqs = freqs.copy() 54 | bad_freqs[1] *= 2 55 | WaveShape(coeffs, bad_freqs, sampling_freq) 56 | 57 | with pytest.raises(TypeError, match="`sampling_freq` must be an int or a float."): 58 | WaveShape(coeffs, freqs, None) 59 | 60 | with pytest.raises(TypeError, match="`verbose` must be a bool."): 61 | WaveShape(coeffs, freqs, sampling_freq, "verbose") 62 | 63 | # compute 64 | waveshape = WaveShape(coeffs, freqs, sampling_freq) 65 | 66 | with pytest.raises(TypeError, match="`indices` must be a tuple."): 67 | waveshape.compute(indices=list(indices)) 68 | with pytest.raises(TypeError, match="Entries of `indices` must be ints."): 69 | waveshape.compute(indices=(0.0, 1.0)) 70 | with pytest.raises( 71 | ValueError, 72 | match=("`indices` contains indices for channels not present in the data."), 73 | ): 74 | waveshape.compute(indices=(0, 99)) 75 | 76 | with pytest.raises(TypeError, match="`f1s` and `f2s` must be tuples."): 77 | waveshape.compute(f1s=[freqs[0], freqs[-1]]) 78 | with pytest.raises(TypeError, match="`f1s` and `f2s` must be tuples."): 79 | waveshape.compute(f2s=[freqs[0], freqs[-1]]) 80 | 81 | with pytest.raises(TypeError, match="`n_jobs` must be an integer."): 82 | waveshape.compute(n_jobs=0.5) 83 | with pytest.raises(ValueError, match="`n_jobs` must be >= 1 or -1."): 84 | waveshape.compute(n_jobs=0) 85 | 86 | 87 | def test_waveshape_runs() -> None: 88 | """Test that WaveShape runs correctly.""" 89 | n_chans = 3 90 | sampling_freq = 50 91 | data = _generate_data(5, n_chans, 100) 92 | 93 | fft, freqs = compute_fft(data=data, sampling_freq=sampling_freq) 94 | 95 | # check it runs with correct inputs 96 | waveshape = WaveShape(data=fft, freqs=freqs, sampling_freq=sampling_freq) 97 | waveshape.compute() 98 | 99 | # check the returned results have the correct shape 100 | assert waveshape.results.shape == (n_chans, len(freqs), len(freqs)) 101 | 102 | # check the returned results are of the correct type 103 | assert waveshape.results.name == "Waveshape" 104 | assert isinstance(waveshape.results, ResultsWaveShape) 105 | 106 | # check it runs with non-exact frequencies 107 | waveshape.compute(f1s=(10.25, 19.75), f2s=(10.25, 19.75)) 108 | 109 | # test it runs with parallelisation 110 | waveshape.compute(n_jobs=2) 111 | waveshape.compute(n_jobs=-1) 112 | 113 | # test copying works 114 | waveshape_copy = waveshape.copy() 115 | attrs = waveshape.__dict__.keys() 116 | for attr in attrs: 117 | if not attr.startswith("_"): 118 | assert np.all(getattr(waveshape, attr) == getattr(waveshape_copy, attr)) 119 | assert waveshape is not waveshape_copy 120 | 121 | 122 | def test_waveshape_results(): 123 | """Test that WaveShape returns the correct results. 124 | 125 | Simulated data with 10 Hz (plus harmonics) wave shape features is used. Waveshape 126 | features include a ramp up sawtooth, ramp down sawtooth, peak-dominant signal, and 127 | a trough-dominant signal. 128 | """ 129 | # tolerance for "closeness" 130 | close_atol = 0.1 131 | 132 | # identify frequencies of wave shape features (~10 Hz and harmonics) 133 | focal_freqs = np.array([10, 20, 30]) 134 | all_freqs = (0, 35) 135 | 136 | # load simulated data with non-sinusoidal features 137 | data_sawtooths = np.load(get_example_data_paths("sim_data_waveshape_sawtooths")) 138 | data_peaks_troughs = np.load( 139 | get_example_data_paths("sim_data_waveshape_peaks_troughs") 140 | ) 141 | sampling_freq = 1000 # sampling frequency in Hz 142 | 143 | # add noise for numerical stability 144 | random = RandomState(44) 145 | snr = 0.25 146 | datasets = [data_sawtooths, data_peaks_troughs] 147 | for data_idx, data in enumerate(datasets): 148 | datasets[data_idx] = snr * data + (1 - snr) * random.rand(*data.shape) 149 | data_sawtooths = datasets[0] 150 | data_peaks_troughs = datasets[1] 151 | 152 | # compute FFT 153 | coeffs_sawtooths, freqs = compute_fft( 154 | data=data_sawtooths, sampling_freq=sampling_freq, n_points=sampling_freq 155 | ) 156 | coeffs_peaks_troughs, freqs = compute_fft( 157 | data=data_peaks_troughs, sampling_freq=sampling_freq, n_points=sampling_freq 158 | ) 159 | 160 | # sawtooth waves 161 | waveshape_sawtooths = WaveShape( 162 | data=coeffs_sawtooths, freqs=freqs, sampling_freq=sampling_freq 163 | ) 164 | waveshape_sawtooths.compute(f1s=all_freqs, f2s=all_freqs) 165 | results_sawtooths = waveshape_sawtooths.results.get_results() 166 | 167 | # peaks and troughs 168 | waveshape_peaks_troughs = WaveShape( 169 | data=coeffs_peaks_troughs, freqs=freqs, sampling_freq=sampling_freq 170 | ) 171 | waveshape_peaks_troughs.compute(f1s=all_freqs, f2s=all_freqs) 172 | results_peaks_troughs = waveshape_peaks_troughs.results.get_results() 173 | 174 | # check that sawtooth features are detected 175 | for results, real_val, imag_val, phase in zip( 176 | results_sawtooths, [0, 0], [1, -1], [0.5, 1.5] 177 | ): 178 | assert np.isclose( 179 | np.nanmean(np.real(results[np.ix_(focal_freqs, focal_freqs)])), 180 | real_val, 181 | atol=close_atol, 182 | ) 183 | assert np.isclose( 184 | np.nanmean(np.imag(results[np.ix_(focal_freqs, focal_freqs)])), 185 | imag_val, 186 | atol=close_atol, 187 | ) 188 | # normalise phase to [0, 2) in units of pi 189 | phases = np.angle(results[np.ix_(focal_freqs, focal_freqs)]) 190 | phases[phases < 0] += 2 * np.pi 191 | phases /= np.pi 192 | assert np.isclose(np.nanmean(phases), phase, atol=close_atol) 193 | 194 | # check that peak/trough features are detected 195 | for results, real_val, imag_val, phase in zip( 196 | results_peaks_troughs, [1, -1], [0, 0], [0, 1] 197 | ): 198 | assert np.isclose( 199 | np.nanmean(np.real(results[np.ix_(focal_freqs, focal_freqs)])), 200 | real_val, 201 | atol=close_atol, 202 | ) 203 | assert np.isclose( 204 | np.nanmean(np.imag(results[np.ix_(focal_freqs, focal_freqs)])), 205 | imag_val, 206 | atol=close_atol, 207 | ) 208 | # normalise phase to [0, 2) in units of pi 209 | # take abs value of angle to account for phase wrapping (at 0, 2 pi) 210 | phases = np.abs(np.angle(results[np.ix_(focal_freqs, focal_freqs)])) 211 | phases[phases < 0] += 2 * np.pi 212 | phases /= np.pi 213 | assert np.isclose(np.nanmean(phases), phase, atol=close_atol) 214 | --------------------------------------------------------------------------------