├── cobrawap ├── VERSION ├── pipeline │ ├── report.rst │ ├── __init__.py │ ├── utils │ │ ├── __init__.py │ │ └── io_utils.py │ ├── stage01_data_entry │ │ ├── report.rst │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── check_input.py │ │ │ ├── plot_traces.py │ │ │ └── enter_data_template.py │ │ ├── Snakefile │ │ └── configs │ │ │ └── config_template.yaml │ ├── stage02_processing │ │ ├── report.rst │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── zscore.py │ │ │ ├── phase_transform.py │ │ │ ├── subsampling.py │ │ │ ├── check_input.py │ │ │ ├── normalization.py │ │ │ ├── frequency_filter.py │ │ │ ├── background_subtraction.py │ │ │ ├── plot_power_spectrum.py │ │ │ ├── plot_processed_trace.py │ │ │ ├── spatial_downsampling.py │ │ │ └── detrending.py │ │ ├── README.rst │ │ └── configs │ │ │ └── config_template.yaml │ ├── stage04_wave_detection │ │ ├── report.rst │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── time_slice.py │ │ │ ├── check_input.py │ │ │ ├── merge_wave_definitions.py │ │ │ ├── plot_clustering.py │ │ │ ├── plot_critical_points.py │ │ │ ├── trigger_clustering.py │ │ │ └── plot_waves.py │ │ ├── README.rst │ │ └── configs │ │ │ └── config_template.yaml │ ├── stage03_trigger_detection │ │ ├── report.rst │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── calc_thresholds_fixed.py │ │ │ ├── check_input.py │ │ │ ├── threshold.py │ │ │ ├── plot_trigger_times.py │ │ │ └── remove_short_states.py │ │ ├── README.rst │ │ ├── configs │ │ │ └── config_template.yaml │ │ └── Snakefile │ ├── stage05_wave_characterization │ │ ├── report.rst │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── merge_dataframes.py │ │ │ ├── number_of_triggers.py │ │ │ ├── duration.py │ │ │ ├── time_stamp.py │ │ │ ├── check_input.py │ │ │ ├── inter_wave_interval.py │ │ │ ├── velocity_planar.py │ │ │ └── annotations.py │ │ ├── Snakefile │ │ ├── README.rst │ │ └── configs │ │ │ └── config_template.yaml │ ├── stageXY_template │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── minimal_example.py │ │ │ ├── check_input.py │ │ │ └── script_template.py │ │ ├── configs │ │ │ └── config_template.yaml │ │ ├── README.rst │ │ └── Snakefile │ ├── stage05_channel_wave_characterization │ │ ├── report.rst │ │ ├── scripts │ │ │ ├── __init__.py │ │ │ ├── merge_dataframes.py │ │ │ ├── velocity_local.py │ │ │ ├── direction_local.py │ │ │ ├── check_input.py │ │ │ ├── flow_direction_local.py │ │ │ ├── inter_wave_interval_local.py │ │ │ └── annotations.py │ │ ├── README.rst │ │ ├── configs │ │ │ └── config_template.yaml │ │ └── Snakefile │ ├── settings_template.py │ ├── environment.yaml │ ├── configs │ │ └── config_template.yaml │ └── Snakefile └── __init__.py ├── doc ├── source │ ├── pipeline.rst │ ├── pipeline_stages.rst │ ├── command_line_interface.rst │ ├── _templates │ │ └── autosummary │ │ │ └── block.rst │ ├── index.rst │ ├── stage01_data_entry.rst │ ├── acknowledgments.rst │ ├── stageXY_template.rst │ ├── stage05_channel_wave_characterization.rst │ ├── stage05_wave_characterization.rst │ ├── stage02_processing.rst │ ├── stage03_trigger_detection.rst │ ├── stage04_wave_detection.rst │ ├── citation.rst │ ├── authors.rst │ ├── conf.py │ └── release_notes.rst ├── images │ ├── cobrawap_icon.ico │ ├── cobrawap_logo.png │ ├── folder_structure.png │ ├── institutions │ │ ├── ape.png │ │ └── infn.svg │ ├── pipeline_illustration.png │ └── cobrawap_pipeline_approach.png ├── Makefile └── make.bat ├── AUTHORS ├── ACKNOWLEDGMENTS ├── requirements ├── requirements-docs.txt └── environment-docs.yaml ├── readthedocs.yaml ├── tox.ini ├── setup.py ├── .github └── workflows │ ├── ebrains.yml │ └── publish-to-pypi-and-testpypi.yml ├── .gitignore ├── pyproject.toml └── .zenodo.json /cobrawap/VERSION: -------------------------------------------------------------------------------- 1 | 0.2.3 2 | -------------------------------------------------------------------------------- /cobrawap/pipeline/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/report.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/source/pipeline.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/README.rst 3 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Please see doc/source/authors.rst for a list of authors and contributors. 2 | -------------------------------------------------------------------------------- /ACKNOWLEDGMENTS: -------------------------------------------------------------------------------- 1 | Please see doc/source/acknowledgments.rst for a list of acknowledgments. 2 | -------------------------------------------------------------------------------- /doc/images/cobrawap_icon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/cobrawap/HEAD/doc/images/cobrawap_icon.ico -------------------------------------------------------------------------------- /doc/images/cobrawap_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/cobrawap/HEAD/doc/images/cobrawap_logo.png -------------------------------------------------------------------------------- /cobrawap/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.2.3' 2 | __author__ = 'Cobrawap authors and contributors ' 3 | -------------------------------------------------------------------------------- /doc/images/folder_structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/cobrawap/HEAD/doc/images/folder_structure.png -------------------------------------------------------------------------------- /doc/images/institutions/ape.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/cobrawap/HEAD/doc/images/institutions/ape.png -------------------------------------------------------------------------------- /doc/images/pipeline_illustration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/cobrawap/HEAD/doc/images/pipeline_illustration.png -------------------------------------------------------------------------------- /doc/images/cobrawap_pipeline_approach.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/cobrawap/HEAD/doc/images/cobrawap_pipeline_approach.png -------------------------------------------------------------------------------- /requirements/requirements-docs.txt: -------------------------------------------------------------------------------- 1 | # Packages required to build docs 2 | numpydoc>=1.1.0 3 | sphinx>=3.3.0 4 | sphinx-tabs>=1.3.0 5 | sphinx-argparse>=0.3.2 6 | -------------------------------------------------------------------------------- /doc/source/pipeline_stages.rst: -------------------------------------------------------------------------------- 1 | 2 | Pipeline Stages 3 | =============== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | :glob: 8 | 9 | stage* 10 | 11 | -------------------------------------------------------------------------------- /doc/source/command_line_interface.rst: -------------------------------------------------------------------------------- 1 | 2 | Command Line Interface 3 | ====================== 4 | 5 | .. argparse:: 6 | :filename: ../../cobrawap/__main__.py 7 | :func: get_parser -------------------------------------------------------------------------------- /readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: "ubuntu-20.04" 5 | tools: 6 | python: "mambaforge-4.10" 7 | 8 | conda: 9 | environment: requirements/environment-docs.yaml 10 | 11 | sphinx: 12 | configuration: doc/source/conf.py 13 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/block.rst: -------------------------------------------------------------------------------- 1 | {{ name | escape | underline}} 2 | **{{ module | escape }}** 3 | 4 | .. automodule:: {{ fullname }} 5 | 6 | .. argparse:: 7 | :module: {{ fullname }} 8 | :func: CLI 9 | :prog: {{ name }} 10 | :passparser: -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. toctree:: 2 | :maxdepth: 2 3 | :hidden: 4 | 5 | pipeline 6 | command_line_interface 7 | pipeline_stages 8 | release_notes 9 | acknowledgments 10 | authors 11 | citation 12 | 13 | .. include:: ../../README.rst 14 | 15 | 16 | -------------------------------------------------------------------------------- /cobrawap/pipeline/settings_template.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # path for generated data 4 | output_path = os.path.join(os.path.expanduser('~'), 'path/to/pipeline/output/folder/') 5 | 6 | # optional alternative path for config files 7 | # directory must contain stageXY_/config_.yaml 8 | # if None uses the pipeline working directory 9 | configs_dir = None 10 | -------------------------------------------------------------------------------- /doc/source/stage01_data_entry.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stage01_data_entry/README.rst 3 | 4 | Blocks 5 | ====== 6 | 7 | .. currentmodule:: stage01_data_entry.scripts 8 | 9 | Utility Blocks (*fixed*) 10 | ------------------------ 11 | .. autosummary:: 12 | :toctree: _toctree/stage01_data_entry/ 13 | :template: block 14 | 15 | check_input 16 | enter_data_template 17 | plot_traces 18 | 19 | -------------------------------------------------------------------------------- /doc/source/acknowledgments.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Acknowledgments 3 | *************** 4 | 5 | This open source software code is co-funded by: 6 | 7 | - the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreements No. 945539 (Human Brain Project SGA3) and No. 785907 (Human Brain Project SGA2) 8 | - the European Commission NextGeneration EU through grant MUR CUP B51E22000150006 (EBRAINS-Italy IR00011 PNRR) 9 | - the European Union’s Horizon Europe Programme under the Specific Grant Agreement No. 101147319 (EBRAINS 2.0 Project). 10 | -------------------------------------------------------------------------------- /doc/source/stageXY_template.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stageXY_template/README.rst 3 | 4 | 5 | Blocks 6 | ====== 7 | 8 | .. currentmodule:: stageXY_template.scripts 9 | 10 | Utility Blocks (*fixed*) 11 | ------------------------ 12 | .. autosummary:: 13 | :toctree: _toctree/stageXY_template/ 14 | :template: block 15 | 16 | check_input 17 | 18 | Other Blocks (*choose any*) 19 | ----------------------------- 20 | .. autosummary:: 21 | :toctree: _toctree/stageXY_template/ 22 | :template: block 23 | 24 | script_template 25 | minimal_example 26 | -------------------------------------------------------------------------------- /cobrawap/pipeline/environment.yaml: -------------------------------------------------------------------------------- 1 | name: cobrawap 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - python >= 3.8 8 | - pip >= 19.3.1 9 | - jinja2 >= 2.10.3 10 | - pygments >= 2.4.2 11 | - pygraphviz >= 1.5 12 | - snakemake == 7.9.0 13 | - h5py 14 | - shapely 15 | - pip: 16 | - elephant >= 0.10.0 17 | - neo == 0.11.0 18 | - nixio >= 1.5.3 19 | - pillow >= 7.0.0 20 | - pandas >= 1.2.0 21 | - scikit-learn >= 0.22.1 22 | - scikit-image >= 0.19.1 23 | - scipy >= 1.7.3 24 | - matplotlib >= 3.5.1 25 | - seaborn 26 | - networkx 27 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /requirements/environment-docs.yaml: -------------------------------------------------------------------------------- 1 | name: cobrawap 2 | channels: 3 | - conda-forge 4 | - bioconda 5 | - defaults 6 | dependencies: 7 | - python >= 3.8 8 | - pip >= 19.3.1 9 | - jinja2 >= 2.10.3 10 | - pygments >= 2.4.2 11 | - pygraphviz >= 1.5 12 | - snakemake >= 7.10.0 , < 8.0.0 13 | - h5py 14 | - shapely 15 | - ruamel.yaml == 0.17.32 16 | - pip: 17 | - elephant >= 1.0.0 18 | - neo >= 0.10.2 19 | - nixio >= 1.5.3 20 | - pillow >= 7.0.0 21 | - pandas >= 1.2.0 22 | - pulp < 2.8 23 | - scikit-learn >= 1.1.0 24 | - scikit-image >= 0.20.0 25 | - scipy >= 1.7.3 26 | - matplotlib >= 3.5.1 27 | - seaborn 28 | - networkx 29 | - sphinx>=3.3.0 30 | - sphinx-tabs>=1.3.0 31 | - sphinx-argparse>=0.3.2 32 | -------------------------------------------------------------------------------- /doc/source/stage05_channel_wave_characterization.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stage05_channel_wave_characterization/README.rst 3 | 4 | Blocks 5 | ====== 6 | 7 | .. currentmodule:: stage05_channel_wave_characterization.scripts 8 | 9 | Utility Blocks (*fixed*) 10 | ------------------------ 11 | .. autosummary:: 12 | :toctree: _toctree/stage05_channel_wave_characterization/ 13 | :template: block 14 | 15 | check_input 16 | merge_dataframes 17 | 18 | Measure Blocks (*choose any*) 19 | ----------------------------- 20 | .. autosummary:: 21 | :toctree: _toctree/stage05_channel_wave_characterization/ 22 | :template: block 23 | 24 | annotations 25 | direction_local 26 | flow_direction_local 27 | inter_wave_interval_local 28 | velocity_local 29 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/zscore.py: -------------------------------------------------------------------------------- 1 | """ 2 | Z-score the signal of each channel. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | from elephant.signal_processing import zscore 8 | from utils.io_utils import load_neo, write_neo 9 | 10 | CLI = argparse.ArgumentParser() 11 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 12 | help="path to input data in neo format") 13 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 14 | help="path of output file") 15 | 16 | if __name__ == '__main__': 17 | args, unknown = CLI.parse_known_args() 18 | 19 | block = load_neo(args.data) 20 | 21 | zscore(block.segments[0].analogsignals[0], inplace=True) 22 | 23 | write_neo(args.output, block) 24 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/scripts/minimal_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Z-score the signal of each channel. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | from elephant.signal_processing import zscore 8 | from utils.io_utils import load_neo, write_neo 9 | 10 | CLI = argparse.ArgumentParser() 11 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 12 | help="path to input data in neo format") 13 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 14 | help="path of output file") 15 | 16 | if __name__ == '__main__': 17 | args, unknown = CLI.parse_known_args() 18 | 19 | block = load_neo(args.data) 20 | 21 | zscore(block.segments[0].analogsignals[0], inplace=True) 22 | 23 | write_neo(args.output, block) 24 | -------------------------------------------------------------------------------- /doc/source/stage05_wave_characterization.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stage05_wave_characterization/README.rst 3 | 4 | 5 | Blocks 6 | ====== 7 | 8 | .. currentmodule:: stage05_wave_characterization.scripts 9 | 10 | Utility Blocks (*fixed*) 11 | ------------------------ 12 | .. autosummary:: 13 | :toctree: _toctree/stage05_wave_characterization/ 14 | :template: block 15 | 16 | check_input 17 | merge_dataframes 18 | 19 | Measure Blocks (*choose any*) 20 | ----------------------------- 21 | .. autosummary:: 22 | :toctree: _toctree/stage05_wave_characterization/ 23 | :template: block 24 | 25 | annotations 26 | direction_planar 27 | duration 28 | inter_wave_interval 29 | label_planar 30 | number_of_triggers 31 | time_stamp 32 | velocity_planar 33 | -------------------------------------------------------------------------------- /doc/source/stage02_processing.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stage02_processing/README.rst 3 | 4 | Blocks 5 | ====== 6 | 7 | .. currentmodule:: stage02_processing.scripts 8 | 9 | Utility Blocks (*fixed*) 10 | ------------------------ 11 | .. autosummary:: 12 | :toctree: _toctree/stage02_processing/ 13 | :template: block 14 | 15 | check_input 16 | plot_processed_trace 17 | 18 | Processing Blocks (*choose any*) 19 | -------------------------------- 20 | .. autosummary:: 21 | :toctree: _toctree/stage02_processing/ 22 | :template: block 23 | 24 | background_subtraction 25 | detrending 26 | frequency_filter 27 | logMUA_estimation 28 | normalization 29 | phase_transform 30 | roi_selection 31 | spatial_downsampling 32 | subsampling 33 | zscore 34 | -------------------------------------------------------------------------------- /doc/source/stage03_trigger_detection.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stage03_trigger_detection/README.rst 3 | 4 | Blocks 5 | ====== 6 | 7 | .. currentmodule:: stage03_trigger_detection.scripts 8 | 9 | Utility Blocks (*fixed*) 10 | ------------------------ 11 | .. autosummary:: 12 | :toctree: _toctree/stage03_trigger_detection/ 13 | :template: block 14 | 15 | check_input 16 | plot_trigger_times 17 | 18 | Detection Blocks (*choose one*) 19 | ------------------------------- 20 | .. autosummary:: 21 | :toctree: _toctree/stage03_trigger_detection/ 22 | :template: block 23 | 24 | hilbert_phase 25 | minima 26 | threshold 27 | 28 | Trigger Filter Blocks (*choose any*) 29 | ------------------------------------ 30 | .. autosummary:: 31 | :toctree: _toctree/stage03_trigger_detection/ 32 | 33 | remove_short_states 34 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/scripts/calc_thresholds_fixed.py: -------------------------------------------------------------------------------- 1 | """ 2 | Set the threshold between Up and Down states to a fixed value. 3 | """ 4 | 5 | import numpy as np 6 | import argparse 7 | from pathlib import Path 8 | from utils.io_utils import load_neo 9 | 10 | CLI = argparse.ArgumentParser() 11 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 12 | help="path to input data in neo format") 13 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 14 | help="path of output thresholds (numpy array)") 15 | CLI.add_argument("--threshold", nargs='?', type=float, required=True, 16 | help="") 17 | 18 | if __name__ == '__main__': 19 | args, unknown = CLI.parse_known_args() 20 | 21 | asig = load_neo(args.data, 'analogsignal') 22 | 23 | dim_t, channel_num = asig.shape 24 | 25 | np.save(args.output, np.ones(channel_num) * args.threshold) 26 | -------------------------------------------------------------------------------- /doc/source/stage04_wave_detection.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: ../../cobrawap/pipeline/stage04_wave_detection/README.rst 3 | 4 | Blocks 5 | ====== 6 | 7 | .. currentmodule:: stage04_wave_detection.scripts 8 | 9 | Utility Blocks (*fixed*) 10 | ------------------------ 11 | .. autosummary:: 12 | :toctree: _toctree/stage04_wave_detection/ 13 | :template: block 14 | 15 | check_input 16 | merge_wave_definitions 17 | plot_clustering 18 | 19 | Detection Blocks (*choose one*) 20 | ------------------------------- 21 | .. autosummary:: 22 | :toctree: _toctree/stage04_wave_detection/ 23 | :template: block 24 | 25 | trigger_clustering 26 | 27 | Additional Properties Blocks (*choose any*) 28 | ------------------------------------------- 29 | .. autosummary:: 30 | :toctree: _toctree/stage04_wave_detection/ 31 | :template: block 32 | 33 | critical_points 34 | optical_flow 35 | wave_mode_clustering 36 | 37 | 38 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = -n 0 --doctest-modules --pycodestyle --mypy --pylint --pylint-rcfile=.pylintrc --pydocstyle --cov --ignore=conda --ignore=env --ignore=.git --ignore=__pycache__ 3 | 4 | [pycodestyle] 5 | count = False 6 | #ignore = E226,E302,E41 7 | exclude = .git/, .snakemake/, .pytest_cache/, sync-test-env/, conda/, env/ 8 | max-line-length = 120 9 | statistics = True 10 | 11 | # as suggested by pylint docs for joint usage with other tools 12 | # http://pylint.pycqa.org/en/latest/faq.html?highlight=pylintrc#i-am-using-another-popular-linter-alongside-pylint-which-messages-should-i-disable-to-avoid-duplicates 13 | 14 | [pylint] 15 | options = unneeded-not, line-too-long, unnecessary-semicolon, trailing-whitespace, missing-final-newline, bad-indentation, multiple-statements, bare-except 16 | 17 | [pydocstyle] 18 | convention = numpy 19 | add-ignore = D300 20 | 21 | [mypy] 22 | exclude = .git/, .snakemake/, .pytest_cache/, sync-test-env/, conda/, env/ 23 | -------------------------------------------------------------------------------- /doc/source/citation.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Citing Cobrawap 3 | *************** 4 | To refer to the Cobrawap software package in publications, please use: 5 | 6 | Cobrawap (`doi:10.5281/zenodo.10198748 `_; 7 | `RRID:SCR_022966 `_) 8 | 9 | To cite a specific version of Cobrawap please see version-specific DOIs at: 10 | 11 | `doi:10.5281/zenodo.10198748 `_ 12 | 13 | To cite Cobrawap, please use: 14 | 15 | Gutzen, R., De Bonis, G., De Luca, C., Pastorelli, E., Capone, C., Allegra Mascaro, A. L., Resta, F., Manasanch, A., Pavone, F. S., Sanchez-Vives, M. V., Mattia, M., Grün, S., Paolucci, P. S., & Denker, M. (2022). *A modular and adaptable analysis pipeline to compare slow cerebral rhythms across heterogeneous datasets*. Cell Reports Methods 4, 100681. `https://doi.org/10.1016/j.crmeth.2023.100681 `_ 16 | 17 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage XY - Template 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stageXY_template' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of the output file 10 | STAGE_OUTPUT: 'some_output' 11 | 12 | # BLOCK - Offset Signal 13 | #################### 14 | # float value to add to the signal in each channel (None=0) 15 | OFFSET: 20 16 | 17 | # BLOCK - Other Block 18 | #################### 19 | # Some parameter 20 | A: 'a' 21 | 22 | # Plotting parameters 23 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 24 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 25 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 26 | PLOT_FORMAT: 'png' 27 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/README.rst: -------------------------------------------------------------------------------- 1 | =================== 2 | Stage XY - Template 3 | =================== 4 | 5 | **Short statement of the stage's purpose** 6 | 7 | `config template `_ 8 | 9 | Input 10 | ===== 11 | Describe type and format of the minimally required data and metadata. 12 | 13 | *should pass* |check_input|_ 14 | 15 | .. |check_input| replace:: *check_input.py* 16 | .. _check_input: https://github.com/NeuralEnsemble/cobrawap/master/editing/pipeline/stageXY_template/scripts/check_input.py 17 | 18 | Output 19 | ====== 20 | Describe type and format of the stage output data and metadata, eventual intermediate output, and where it is stored. 21 | 22 | Usage 23 | ===== 24 | Describe the functionality of the stage, what type of blocks are used and how they can be arranged, and eventually special stage parameters. However, an account of the exact blocks and their features should be placed into the Snakefile's and scripts' docstring. 25 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # encoding: utf8 2 | 'Packaging configuration.' 3 | 4 | from setuptools import setup # type: ignore 5 | 6 | setup() # everything configured via pyproject.toml 7 | 8 | 9 | # author="Cobrawap authors and contributors", 10 | # author_email="contact@cobrawap.org", 11 | # description="Collaborative Brain Wave Analysis Pipeline (Cobrawap)", 12 | # 13 | # entry_points={ 14 | # 'console_scripts': [ 15 | # 'cobrawap = cobrawap.__main__:main', 16 | # ], 17 | # }, 18 | # 19 | # license="GPL-3.0-or-later", 20 | # 21 | # url='https://github.com/NeuralEnsemble/cobrawap', 22 | # # https://pypi.org/pypi?:action=list_classifiers 23 | # classifiers=[ 24 | # 'Development Status :: 2 - Pre-Alpha', 25 | # 'Intended Audience :: Science/Research', 26 | # 'License :: OSI Approved :: GPL-3.0-or-later', 27 | # 'Natural Language :: English', 28 | # 'Operating System :: OS Independent', 29 | # 'Programming Language :: Python :: 3', 30 | # 'Topic :: Scientific/Engineering'] 31 | # ) 32 | -------------------------------------------------------------------------------- /.github/workflows/ebrains.yml: -------------------------------------------------------------------------------- 1 | name: Mirror to EBRAINS 2 | 3 | # Only pushes to master should cause a sync 4 | on: 5 | push: 6 | branches: [ master ] 7 | 8 | jobs: 9 | sync_to_ebrains: 10 | runs-on: ubuntu-latest 11 | if: ${{ github.repository_owner == 'NeuralEnsemble' }} 12 | steps: 13 | - name: syncmaster 14 | uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83 15 | with: 16 | source_repo: "NeuralEnsemble/cobrawap" 17 | source_branch: "master" 18 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/neuralensemble/cobrawap.git" 19 | destination_branch: "master" 20 | 21 | - name: synctags 22 | uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83 23 | with: 24 | source_repo: "NeuralEnsemble/cobrawap" 25 | source_branch: "refs/tags/*" 26 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/neuralensemble/cobrawap.git" 27 | destination_branch: "refs/tags/*" 28 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/phase_transform.py: -------------------------------------------------------------------------------- 1 | """ 2 | Replace the data signal value with their corresponding Hilbert phase. 3 | """ 4 | 5 | import numpy as np 6 | from elephant.signal_processing import hilbert 7 | import argparse 8 | from pathlib import Path 9 | import os 10 | from utils.io_utils import load_neo, write_neo 11 | 12 | CLI = argparse.ArgumentParser() 13 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 14 | help="path to input data in neo format") 15 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 16 | help="path of output file") 17 | 18 | if __name__ == '__main__': 19 | args, unknown = CLI.parse_known_args() 20 | 21 | block = load_neo(args.data) 22 | asig = block.segments[0].analogsignals[0] 23 | 24 | phase = np.angle(hilbert(asig).as_array()) 25 | 26 | asig = asig.duplicate_with_new_data(phase) 27 | asig.array_annotations = block.segments[0].analogsignals[0].array_annotations 28 | 29 | asig.description += "Phase signal ({}). "\ 30 | .format(os.path.basename(__file__)) 31 | block.segments[0].analogsignals[0] = asig 32 | 33 | write_neo(args.output, block) 34 | -------------------------------------------------------------------------------- /cobrawap/pipeline/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | 2 | STAGES: 3 | - 'stage01_data_entry' 4 | - 'stage02_processing' 5 | - 'stage03_trigger_detection' 6 | - 'stage04_wave_detection' 7 | - 'stage05_wave_characterization' 8 | # - 'stage05_channel_wave_characterization' 9 | 10 | # A profile collects the parameter configuration for a specific dataset. The stages are executed using the config_.yaml file, and results are stored in output_path// 11 | PROFILE: 'IDIBAPS' 12 | 13 | # File format in which all intermediate neo objects are stored 14 | NEO_FORMAT: 'nix' 15 | 16 | # If True (default), the output file of a stage is created as symbolic link 17 | # to the last block output. If False, a duplicate is created (e.g. for cloud 18 | # application, where sym-links are not supported). 19 | USE_LINK_AS_STAGE_OUTPUT: True 20 | 21 | # Plotting parameters 22 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 23 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 24 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 25 | # Note that when using the None option, the automatic creation of reports will fail 26 | PLOT_FORMAT: 'png' 27 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/subsampling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Subsample the input data to a target rate by selecting only every n-th sample point. 3 | """ 4 | 5 | import argparse 6 | import quantities as pq 7 | import numpy as np 8 | from pathlib import Path 9 | from utils.io_utils import load_neo, write_neo 10 | 11 | CLI = argparse.ArgumentParser() 12 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 13 | help="path to input data in neo format") 14 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 15 | help="path of output file") 16 | CLI.add_argument("--target_rate", nargs='?', type=float, required=True, 17 | help="rate to subsample to in Hz") 18 | 19 | if __name__ == '__main__': 20 | args, unknown = CLI.parse_known_args() 21 | 22 | block = load_neo(args.data) 23 | asig = block.segments[0].analogsignals[0] 24 | 25 | subsampling_order = asig.sampling_rate/(args.target_rate*pq.Hz) 26 | subsampling_order = int(np.round(subsampling_order.rescale('dimensionless'))) 27 | 28 | sub_asig = asig.duplicate_with_new_data(asig.as_array()[::subsampling_order]) 29 | sub_asig.sampling_rate = asig.sampling_rate/subsampling_order 30 | 31 | sub_asig.array_annotations = asig.array_annotations 32 | block.segments[0].analogsignals[0] = sub_asig 33 | 34 | write_neo(args.output, block) 35 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/merge_dataframes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Merge pandas DataFrames based on the values of selected columns. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import pandas as pd 8 | from copy import deepcopy 9 | from utils.parse import none_or_path 10 | 11 | CLI = argparse.ArgumentParser() 12 | CLI.add_argument("--data", nargs='+', type=Path, required=True, 13 | help="path to input data") 14 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 15 | help="path of output file") 16 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 17 | help="path of output html file showing the merged table") 18 | # CLI.add_argument("--merge_key", nargs='?', type=str, 19 | # help="") 20 | 21 | if __name__ == '__main__': 22 | args, unknown = CLI.parse_known_args() 23 | 24 | for i, datafile in enumerate(args.data): 25 | df = pd.read_csv(datafile) 26 | df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], 27 | axis=1, inplace=True) 28 | if i: 29 | full_df = full_df.merge(df, how='outer', on=None) 30 | else: 31 | full_df = deepcopy(df) 32 | del df 33 | 34 | if args.output_img is not None: 35 | full_df.to_html(args.output_img) 36 | 37 | full_df.to_csv(args.output) 38 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/merge_dataframes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Merge pandas DataFrames based on the values of selected columns. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import pandas as pd 8 | from copy import deepcopy 9 | from utils.parse import none_or_path 10 | 11 | CLI = argparse.ArgumentParser() 12 | CLI.add_argument("--data", nargs='+', type=Path, required=True, 13 | help="path to input data") 14 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 15 | help="path of output file") 16 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 17 | help="path of output html file showing the merged table") 18 | CLI.add_argument("--merge_key", nargs='+', type=str, 19 | help="") 20 | 21 | if __name__ == '__main__': 22 | args, unknown = CLI.parse_known_args() 23 | 24 | for i, datafile in enumerate(args.data): 25 | df = pd.read_csv(datafile) 26 | df.drop(df.columns[df.columns.str.contains('unnamed', case=False)], 27 | axis=1, inplace=True) 28 | if i: 29 | full_df = full_df.merge(df, how='outer', on=args.merge_key) 30 | else: 31 | full_df = deepcopy(df) 32 | del df 33 | 34 | if args.output_img is not None: 35 | full_df.to_html(args.output_img) 36 | 37 | full_df.to_csv(args.output) 38 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/Snakefile: -------------------------------------------------------------------------------- 1 | """ 2 | # Stage 05 Wave Characterization 3 | """ 4 | 5 | from pathlib import Path 6 | configfile: Path('configs') / 'config_template.yaml' 7 | include: Path() / '..' / 'utils' / 'Snakefile' 8 | 9 | #### Housekeeping #### 10 | 11 | def measures_output(wildcards): 12 | return [OUTPUT_DIR / measure / str(config.EVENT_NAME + "_" + measure + ".csv") 13 | for measure in config.MEASURES] 14 | 15 | if config.EVENT_NAME == 'wavemodes': 16 | config.MEASURES = [m for m in config.MEASURES if m not in 17 | ['label_planar', 'time_stamp', 'inter_wave_interval']] 18 | 19 | #### UTILITY BLOCKS #### 20 | 21 | use rule template as all with: 22 | input: 23 | check = OUTPUT_DIR / 'input.check', 24 | data = measures_output, 25 | script = SCRIPTS / 'merge_dataframes.py' 26 | params: 27 | params() 28 | output: 29 | OUTPUT_DIR / config.STAGE_OUTPUT, 30 | output_img = OUTPUT_DIR / 'overview_measures.html' 31 | 32 | #### CHARACTERIZATION BLOCKS #### 33 | 34 | use rule template as compute_measure with: 35 | input: 36 | data = config.STAGE_INPUT, 37 | script = SCRIPTS / str('{measure}' + ".py") 38 | params: 39 | params(config.__dict__) 40 | output: 41 | Path('{dir}') / '{measure}' / str(config.EVENT_NAME + "_" + '{measure}' + ".csv"), 42 | output_img = Path('{dir}') / '{measure}' 43 | / str(config.EVENT_NAME + "_" + '{measure}' + "." + config.PLOT_FORMAT) 44 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | import quantities as pq 11 | from utils.io_utils import load_neo 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | 17 | if __name__ == '__main__': 18 | args, unknown = CLI.parse_known_args() 19 | 20 | block = load_neo(args.data) 21 | 22 | if len(block.segments) > 1: 23 | print("More than one Segment found; all except the first one " \ 24 | + "will be ignored.") 25 | if len(block.segments[0].analogsignals) > 1: 26 | print("More than one AnalogSignal found; all except the first one " \ 27 | + "will be ignored.") 28 | 29 | asig = block.segments[0].analogsignals[0] 30 | 31 | x_coords = asig.array_annotations['x_coords'] 32 | y_coords = asig.array_annotations['y_coords'] 33 | 34 | print('Recording Time:\t\t', asig.t_stop - asig.t_start) 35 | print('Sampling Rate:\t\t', asig.sampling_rate) 36 | num_channels = np.count_nonzero(~np.isnan(np.sum(asig, axis=0))) 37 | print('Number of Channels:\t', num_channels) 38 | 39 | dim_x, dim_y = np.max(x_coords)+1, np.max(y_coords)+1 40 | print('Grid Dimensions:\t', f'{dim_x} x {dim_y}') 41 | print('Empty Grid Sites:\t', dim_x*dim_y - num_channels) 42 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/time_slice.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cut data according to a start and stop time. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import quantities as pq 8 | from utils.io_utils import load_neo, write_neo 9 | from utils.neo_utils import time_slice 10 | from utils.parse import none_or_float 11 | 12 | CLI = argparse.ArgumentParser() 13 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 14 | help="path to input data in neo format") 15 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 16 | help="path of output file") 17 | CLI.add_argument("--t_start", nargs='?', type=none_or_float, default=0, 18 | help="new starting time in s") 19 | CLI.add_argument("--t_stop", nargs='?', type=none_or_float, default=10, 20 | help="new stopping time in s") 21 | 22 | if __name__ == '__main__': 23 | args, unknown = CLI.parse_known_args() 24 | 25 | block = load_neo(args.data) 26 | 27 | for i, asig in enumerate(block.segments[0].analogsignals): 28 | block.segments[0].analogsignals[i] = time_slice(asig, 29 | t_start=args.t_start, 30 | t_stop=args.t_stop) 31 | 32 | for i, evt in enumerate(block.segments[0].events): 33 | block.segments[0].events[i] = time_slice(evt, 34 | t_start=args.t_start, 35 | t_stop=args.t_stop) 36 | 37 | 38 | write_neo(args.output, block) 39 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | from utils.io_utils import load_neo 11 | 12 | CLI = argparse.ArgumentParser() 13 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 14 | help="path to input data in neo format") 15 | 16 | if __name__ == '__main__': 17 | args, unknown = CLI.parse_known_args() 18 | 19 | block = load_neo(args.data) 20 | 21 | if len(block.segments) > 1: 22 | print("More than one Segment found; all except the first one " \ 23 | + "will be ignored.") 24 | if len(block.segments[0].analogsignals) > 1: 25 | print("More than one AnalogSignal found; all except the first one " \ 26 | + "will be ignored.") 27 | 28 | asig = block.segments[0].analogsignals[0] 29 | 30 | print('Recording Time:\t\t', asig.t_stop - asig.t_start) 31 | print('Sampling Rate:\t\t', asig.sampling_rate) 32 | print('Spatial Scale:\t\t', asig.annotations['spatial_scale']) 33 | 34 | num_channels = np.count_nonzero(~np.isnan(np.sum(asig, axis=0))) 35 | print('Number of Channels:\t', num_channels) 36 | 37 | x_coords = asig.array_annotations['x_coords'] 38 | y_coords = asig.array_annotations['y_coords'] 39 | dim_x, dim_y = np.max(x_coords)+1, np.max(y_coords)+1 40 | print('Grid Dimensions:\t', f'{dim_x} x {dim_y}') 41 | print('Empty Grid Sites:\t', dim_x*dim_y - num_channels) 42 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | import quantities as pq 11 | from utils.io_utils import load_neo 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | 17 | if __name__ == '__main__': 18 | args, unknown = CLI.parse_known_args() 19 | 20 | block = load_neo(args.data) 21 | 22 | if len(block.segments) > 1: 23 | print("More than one Segment found; all except the first one " \ 24 | + "will be ignored.") 25 | if len(block.segments[0].analogsignals) > 1: 26 | print("More than one AnalogSignal found; all except the first one " \ 27 | + "will be ignored.") 28 | 29 | asig = block.segments[0].analogsignals[0] 30 | 31 | print('Recording Time:\t\t', asig.t_stop - asig.t_start) 32 | print('Sampling Rate:\t\t', asig.sampling_rate) 33 | print('Spatial Scale:\t\t', asig.annotations['spatial_scale']) 34 | 35 | num_channels = np.count_nonzero(~np.isnan(np.sum(asig, axis=0))) 36 | print('Number of Channels:\t', num_channels) 37 | 38 | x_coords = asig.array_annotations['x_coords'] 39 | y_coords = asig.array_annotations['y_coords'] 40 | dim_x, dim_y = np.max(x_coords)+1, np.max(y_coords)+1 41 | print('Grid Dimensions:\t', f'{dim_x} x {dim_y}') 42 | print('Empty Grid Sites:\t', dim_x*dim_y - num_channels) 43 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/velocity_local.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the wave propagation velocity for each wave and channel. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import numpy as np 8 | import pandas as pd 9 | import matplotlib.pyplot as plt 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.parse import none_or_path 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to spatial derivative dataframe") 16 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 17 | help="path of output file") 18 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 19 | help="path of output image file") 20 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 21 | help="name of neo.Event to analyze (must contain waves)") 22 | 23 | if __name__ == '__main__': 24 | args, unknown = CLI.parse_known_args() 25 | 26 | df = pd.read_csv(args.data) 27 | 28 | velocity = df.spatial_scale * np.sqrt(1/(df.dt_x**2 + df.dt_y**2)) 29 | velocity[~np.isfinite(velocity)] = np.nan 30 | 31 | velocity_df = pd.DataFrame(velocity, columns=['velocity_local']) 32 | velocity_df['channel_id'] = df.channel_id 33 | velocity_df['velocity_local_unit'] = f'{df.spatial_scale_unit[0]}/{df.dt_unit[0]}' 34 | velocity_df[f'{args.event_name}_id'] = df[f'{args.event_name}_id'] 35 | 36 | velocity_df.to_csv(args.output) 37 | 38 | if args.output_img is not None: 39 | save_plot(args.output_img) 40 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/direction_local.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the wave directions per wave and channel, 3 | based on the spatial gradient of wave trigger times. 4 | """ 5 | 6 | import argparse 7 | from pathlib import Path 8 | import numpy as np 9 | import pandas as pd 10 | import matplotlib.pyplot as plt 11 | from utils.io_utils import load_neo, save_plot 12 | from utils.parse import none_or_path 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to spatial derivative dataframe") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output file") 19 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 20 | help="path of output image file") 21 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 22 | help="name of neo.Event to analyze (must contain waves)") 23 | 24 | if __name__ == '__main__': 25 | args, unknown = CLI.parse_known_args() 26 | 27 | df = pd.read_csv(args.data) 28 | 29 | direction_df = pd.DataFrame(df.channel_id, columns=['channel_id']) 30 | direction_df['direction_local_x'] = df.dt_x 31 | direction_df['direction_local_y'] = df.dt_y 32 | direction_df[f'{args.event_name}_id'] = df[f'{args.event_name}_id'] 33 | 34 | direction_df.to_csv(args.output) 35 | 36 | fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}) 37 | ax.hist(np.angle(df.dt_x + 1j*df.dt_y), bins=36, range=[-np.pi, np.pi]) 38 | 39 | if args.output_img is not None: 40 | save_plot(args.output_img) 41 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/README.rst: -------------------------------------------------------------------------------- 1 | ================================ 2 | Stage 05 - Wave Characterization 3 | ================================ 4 | 5 | **This stage evaluates the detected waves by deriving characteristic wave-wise measures.** 6 | 7 | `config template `_ 8 | 9 | Input 10 | ===== 11 | A ``neo.Block`` and ``Segment`` object containing 12 | 13 | a ``neo.Event`` object named _'wavefronts'_, containing 14 | 15 | * *labels*: wave ids, 16 | * *array_annotations*: ``channels``, ``x_coords``, ``y_coords``. 17 | 18 | * Some blocks may require the additional ``AnalogSignal`` object called *'optical_flow'* but containing the complex-valued optical flow values. 19 | 20 | *should pass* |check_input|_ 21 | 22 | .. |check_input| replace:: *check_input.py* 23 | .. _check_input: https://github.com/NeuralEnsemble/cobrawap/blob/master/pipeline/stage05_wave_characterization/scripts/check_input.py 24 | 25 | Output 26 | ====== 27 | A table (``pandas.DataFrame``), containing 28 | 29 | * the wave-wise characteristic measures, their unit, and if applicable their uncertainty as determined by the selected blocks 30 | * any annotations as selected via ``INCLUDE_KEYS`` or ``IGNORE_KEYS`` 31 | 32 | Usage 33 | ===== 34 | In this stage, any number of blocks can be selected via the ``MEASURES`` parameter and are applied on the stage input (*choose any*). 35 | To include specific metadata in the output table, select the corresponding annotation keys with ``INCLUDE_KEYS``, or to include all available metadata execept some specifiy only the corresponding annotations keys in ``IGNORE_KEYS``. 36 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/README.rst: -------------------------------------------------------------------------------- 1 | ======================================== 2 | Stage 05 - Channel Wave Characterization 3 | ======================================== 4 | 5 | **This stage evaluates the detected waves by deriving characteristic channel-wise measures.** 6 | 7 | `config template `_ 8 | 9 | Input 10 | ===== 11 | A ``neo.Block`` and ``Segment`` object containing 12 | 13 | a ``neo.Event`` object named *'wavefronts'*, containing 14 | 15 | * *labels*: wave ids, 16 | * *array_annotations*: ``channels``, ``x_coords``, ``y_coords``. 17 | 18 | Some blocks may require the additional ``AnalogSignal`` object called *'optical_flow'* but containing the complex-valued optical flow values. 19 | 20 | *should pass* |check_input|_ 21 | 22 | .. |check_input| replace:: *check_input.py* 23 | .. _check_input: https://github.com/NeuralEnsemble/cobrawap/blob/master/pipeline/stage05_channel_wave_characterization/scripts/check_input.py 24 | 25 | Output 26 | ====== 27 | A table (``pandas.DataFrame``), containing 28 | * the characteristic measures per wave and channel, their unit, and if applicable their uncertainty as determined by the selected blocks 29 | * any annotations as selected via ``INCLUDE_KEYS`` or ``IGNORE_KEYS`` 30 | 31 | Usage 32 | ===== 33 | In this stage, any number of blocks can be selected via the ``MEASURES`` parameter and are applied on the stage input (*choose any*). 34 | To include specific metadata in the output table, select the corresponding annotation keys with ``INCLUDE_KEYS``, or to include all available metadata execept some specifiy only the corresponding annotations keys in ``IGNORE_KEYS``. 35 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/number_of_triggers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the number of triggers involved in each wave. 3 | """ 4 | 5 | import numpy as np 6 | import argparse 7 | from pathlib import Path 8 | import pandas as pd 9 | from utils.io_utils import load_neo, save_plot 10 | from utils.parse import none_or_path 11 | 12 | CLI = argparse.ArgumentParser() 13 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 14 | help="path to input data in neo format") 15 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 16 | help="path of output file") 17 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 18 | help="path of output image file") 19 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 20 | help="name of neo.Event to analyze (must contain waves)") 21 | 22 | if __name__ == '__main__': 23 | args, unknown = CLI.parse_known_args() 24 | 25 | block = load_neo(args.data) 26 | 27 | asig = block.segments[0].analogsignals[0] 28 | evts = block.filter(name=args.event_name, objects="Event")[0] 29 | 30 | wave_ids = np.sort(np.unique(evts.labels).astype(int)) 31 | 32 | number_of_triggers = np.empty(len(wave_ids), dtype=float) 33 | 34 | for i, wave_id in enumerate(wave_ids): 35 | idx = np.where(evts.labels == str(wave_id))[0] 36 | number_of_triggers[i] = len(evts.times[idx]) 37 | 38 | # transform to DataFrame 39 | df = pd.DataFrame(number_of_triggers, columns=['number_of_triggers']) 40 | df[f'{args.event_name}_id'] = wave_ids 41 | df.to_csv(args.output) 42 | 43 | # ToDo 44 | if args.output_img is not None: 45 | save_plot(args.output_img) 46 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage 5 - Channel-wise Wave Characterization 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stage05_channel_wave_characterization' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of the output file 10 | STAGE_OUTPUT: 'channel-wise_measures.csv' 11 | 12 | # If True (default), the output file of a stage is created as symbolic link 13 | # to the last block output. If False, a duplicate is created (e.g. for cloud 14 | # application, where sym-links are not supported). 15 | USE_LINK_AS_STAGE_OUTPUT: True 16 | 17 | # Plotting parameters 18 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 19 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 20 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 21 | PLOT_FORMAT: 'png' 22 | 23 | # Wave event name 24 | # 'wavefronts', 'wavemodes' 25 | EVENT_NAME: 'wavefronts' 26 | 27 | # Measures to compute 28 | # 'velocity_local', 'direction_local', 'inter_wave_interval_local', 'annotations' 29 | MEASURES: ['annotations', 'velocity_local', 'direction_local', 'inter_wave_interval_local'] 30 | 31 | # Velocity/Direction Local 32 | ################ 33 | # simple_3x3, prewitt_3x3, scharr_3x3, sobel_3x3, sobel_5x5, sobel_7x7 34 | KERNEL: 'scharr_3x3' 35 | INTERPOLATE: True 36 | # smoothing factor for the interpolation (0 = no smoothing) 37 | SMOOTHING: 0 38 | 39 | # Annotations 40 | ############# 41 | # Use include_keys, if they are empty apply ignore_keys 42 | INCLUDE_KEYS: [] 43 | IGNORE_KEYS: [] 44 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage 5 - Wave Characterization 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stage05_wave_characterization' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of the output file 10 | STAGE_OUTPUT: 'wave-wise_measures.csv' 11 | 12 | # If True (default), the output file of a stage is created as symbolic link 13 | # to the last block output. If False, a duplicate is created (e.g. for cloud 14 | # application, where sym-links are not supported). 15 | USE_LINK_AS_STAGE_OUTPUT: True 16 | 17 | # Plotting parameters 18 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 19 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 20 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 21 | PLOT_FORMAT: 'png' 22 | 23 | # Wave event name 24 | # 'wavefronts', 'wavemodes' 25 | EVENT_NAME: 'wavefronts' 26 | 27 | # Measures to compute 28 | # 'label_planar', 'velocity_planar', 'direction_planar', 'inter_wave_interval', 29 | # 'time_stamp', 'duration', 'number_of_triggers', 'annotations' 30 | MEASURES: ['annotations', 'inter_wave_interval', 'label_planar', 'velocity_planar', 'direction_planar'] 31 | 32 | # Label Planar 33 | ############## 34 | ALIGNMENT_THRESHOLD: 0.9 35 | 36 | # Direction Planar 37 | ################## 38 | # tigger_interolation, optical_flow 39 | DIRECTION_METHOD: 'optical_flow' 40 | 41 | # Time Stamp 42 | ############ 43 | # start, middle, end 44 | TIME_STAMP_POINT: 'start' 45 | 46 | # Annotations 47 | ############# 48 | # Use include_keys, if they are empty apply ignore_keys 49 | INCLUDE_KEYS: [] 50 | IGNORE_KEYS: [] 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # workflow folder 2 | .snakemake/ 3 | temp* 4 | temp*/ 5 | .idea/ 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | doc/_build/ 72 | doc/build/ 73 | doc/index.html 74 | doc/source/_toctree/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # pyenv 83 | .python-version 84 | 85 | # celery beat schedule file 86 | celerybeat-schedule 87 | 88 | # SageMath parsed files 89 | *.sage.py 90 | 91 | # dotenv 92 | .env 93 | 94 | # virtualenv 95 | .venv 96 | venv/ 97 | ENV/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | 112 | # MacOS 113 | .DS_Store 114 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | import quantities as pq 11 | from utils.io_utils import load_neo 12 | from snakemake.logging import logger 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | 18 | if __name__ == '__main__': 19 | args, unknown = CLI.parse_known_args() 20 | 21 | block = load_neo(args.data) 22 | 23 | if len(block.segments) > 1: 24 | print("More than one Segment found; all except the first one " \ 25 | + "will be ignored.") 26 | if len(block.segments[0].analogsignals) > 1: 27 | print("More than one AnalogSignal found; all except the first one " \ 28 | + "will be ignored.") 29 | 30 | asig = block.segments[0].analogsignals[0] 31 | 32 | # print('Recording Time:\t\t', asig.t_stop - asig.t_start) 33 | # print('Sampling Rate:\t\t', asig.sampling_rate) 34 | # print('Spatial Scale:\t\t', asig.annotations['spatial_scale']) 35 | 36 | evts = block.filter(name='transitions', objects="Event") 37 | 38 | if not len(evts): 39 | raise ValueError("No 'transitions' events found!") 40 | evt = evts[0] 41 | 42 | if not 'UP' in evt.labels: 43 | logger.warning("No transitions labeled 'UP' found!") 44 | # raise KeyError("No transitions labeled 'UP' found!") 45 | 46 | up_channels = np.unique(evt.array_annotations['channels']) 47 | num_channels = np.count_nonzero(~np.isnan(np.sum(asig, axis=0))) 48 | print(f'{len(up_channels)} of {num_channels} channels show UP transitions.') 49 | 50 | evt.array_annotations['x_coords'] 51 | evt.array_annotations['y_coords'] 52 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/README.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Stage 03 - Trigger Detection 3 | ============================ 4 | 5 | **This stage detects the potential passing of wavefronts on each channel (for example, transitions between Down and Up states) as trigger times.** 6 | 7 | `config template `_ 8 | 9 | Input 10 | ===== 11 | 12 | A ``neo.Block`` and ``Segment`` object containing an ``AnalogSignal`` object containing all signal channels (additional ``AnalogSignal`` objects are ignored). 13 | 14 | *should pass* |check_input|_ 15 | 16 | .. |check_input| replace:: *check_input.py* 17 | .. _check_input: https://github.com/NeuralEnsemble/cobrawap/blob/master/pipeline/stage03_trigger_detection/scripts/check_input.py 18 | 19 | Output 20 | ====== 21 | 22 | The same input data object, but extended with a ``neo.Event`` object named *'transitions'*, containing 23 | 24 | * *times*: time stamps where a potential wavefront, i.e., state transition, was detected, 25 | * *labels*: either ``UP`` or ``DOWN``, 26 | * *annotations*: information about the detection methods and copy of ``AnalogSignal.annotations``, 27 | * *array_annotations*: ``channels`` and the ``array_annotations`` of the ``AnalogSignal`` object that correspond to the respective channels. 28 | 29 | The output ``neo.Block`` is stored in ``{output_path}/{profile}/stage03_trigger_detection/trigger_times.{NEO_FORMAT}``. 30 | 31 | The intermediate results and plots of each processing block are stored in the ``{output_path}/{profile}/stage03_trigger_detection/{block_name}/``. 32 | 33 | Usage 34 | ===== 35 | In this stage offers alternative trigger detection methods (*choose one*), which can be selected via the ``DETECTION_BLOCK`` parameter. 36 | There are additional filter blocks to post-process the detected triggers, they can be selected (*choose any*) via the ``TRIGGER_FILTER`` parameter. 37 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/duration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the time from the first to the last trigger in each wave. 3 | """ 4 | 5 | import numpy as np 6 | import argparse 7 | from pathlib import Path 8 | import pandas as pd 9 | from utils.io_utils import load_neo, save_plot 10 | from utils.parse import none_or_path 11 | 12 | CLI = argparse.ArgumentParser() 13 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 14 | help="path to input data in neo format") 15 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 16 | help="path of output file") 17 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 18 | help="path of output image file") 19 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 20 | help="name of neo.Event to analyze (must contain waves)") 21 | 22 | if __name__ == '__main__': 23 | args, unknown = CLI.parse_known_args() 24 | 25 | block = load_neo(args.data) 26 | 27 | asig = block.segments[0].analogsignals[0] 28 | evts = block.filter(name=args.event_name, objects="Event")[0] 29 | 30 | wave_ids = np.sort(np.unique(evts.labels).astype(int)) 31 | if wave_ids[0] == -1: 32 | wave_ids = np.delete(wave_ids, 0) 33 | 34 | durations = np.empty(len(wave_ids), dtype=float) 35 | 36 | t_unit = evts.times[0].dimensionality.string 37 | 38 | for i, wave_id in enumerate(wave_ids): 39 | idx = np.where(evts.labels == str(wave_id))[0] 40 | tmin, tmax = np.min(evts.times[idx]), np.max(evts.times[idx]) 41 | durations[i] = tmax - tmin 42 | 43 | # transform to DataFrame 44 | df = pd.DataFrame(durations, columns=['duration']) 45 | df['duration_unit'] = t_unit 46 | df[f'{args.event_name}_id'] = wave_ids 47 | 48 | df.to_csv(args.output) 49 | 50 | # ToDo 51 | if args.output_img is not None: 52 | save_plot(args.output_img) 53 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "cobrawap" 3 | description = "Collaborative Brain Wave Analysis Pipeline (Cobrawap)" 4 | version = "0.2.3" 5 | readme = "README.rst" 6 | authors = [ 7 | { name = "Cobrawap authors and contributors", email = "contact@cobrawap.org"} 8 | ] 9 | 10 | requires-python = ">=3.8" 11 | dependencies = [ 12 | "ruamel.yaml == 0.17.32", 13 | "jinja2 >= 2.10.3", 14 | "scipy >= 1.7.3", 15 | "pygments >= 2.4.2", 16 | "snakemake >= 7.10.0, < 8.0.0", 17 | "h5py", 18 | "shapely", 19 | "elephant >= 1.0.0", 20 | "neo >= 0.10.2", 21 | "nixio >= 1.5.3", 22 | "pillow >= 7.0.0", 23 | "pandas >= 1.2.0", 24 | "pulp < 2.8", 25 | "scikit-learn >= 1.1.0", 26 | "scikit-image >= 0.19.0", 27 | "matplotlib >= 3.5.1", 28 | "seaborn", 29 | "networkx", 30 | ] 31 | 32 | [metadata] 33 | description-file = "README.rst" 34 | classifiers = [ 35 | "Intended Audience :: Developers", 36 | "License :: OSI Approved :: GPL-3.0-or-later", 37 | "Programming Language :: Python :: 3", 38 | "Topic :: Software Development :: Libraries :: Python Modules" 39 | ] 40 | 41 | [project.urls] 42 | "Homepage" = "https://cobrawap.readthedocs.io/" 43 | "Bug Tracker" = "https://github.com/NeuralEnsemble/cobrawap/issues" 44 | 45 | [project.scripts] 46 | cobrawap = "cobrawap.__main__:main" 47 | 48 | [project.optional-dependencies] 49 | dev = [ 50 | "black", 51 | "bump2version", 52 | "flake8", 53 | "mypy", 54 | "pre-commit", 55 | "pydocstyle", 56 | "pylint", 57 | "pytest", 58 | "pytest-black", 59 | "pytest-cov", 60 | "pytest-flake8", 61 | "pytest-mypy", 62 | "pytest-pydocstyle", 63 | "pytest-pylint", 64 | "pytest-xdist", 65 | ] 66 | doc = [ 67 | "sphinx>=3.3.0", 68 | "numpydoc>=1.1.0", 69 | "sphinx-tabs>=1.3.0", 70 | "sphinx-argparse>=0.4.0", 71 | "sphinx-autodoc-typehints", 72 | "pygments", 73 | "recommonmark", 74 | ] 75 | cwl = [ 76 | 77 | ] 78 | 79 | [build-system] 80 | build-backend = "flit_core.buildapi" 81 | requires = ["flit_core >=3.2,<4"] 82 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/merge_wave_definitions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Combine the AnalogSignal and Event objects from different wave analysis blocks 3 | into the same Neo Block. 4 | """ 5 | 6 | import argparse 7 | from pathlib import Path 8 | from utils.io_utils import load_neo, write_neo 9 | 10 | CLI = argparse.ArgumentParser() 11 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 12 | help="path to input data in neo format") 13 | CLI.add_argument("--properties", nargs='*', type=Path, default=[], 14 | help="paths to input data in neo format") 15 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 16 | help="path of output file") 17 | 18 | if __name__ == '__main__': 19 | args, unknown = CLI.parse_known_args() 20 | 21 | waves_block = load_neo(args.data) 22 | 23 | asig_names = [asig.name for asig in waves_block.segments[0].analogsignals] 24 | event_names = [event.name for event in waves_block.segments[0].events] 25 | 26 | if not args.properties or not args.properties[0]: 27 | args.properties = [] 28 | 29 | for property in args.properties: 30 | block = load_neo(property) 31 | 32 | for asig in block.segments[0].analogsignals: 33 | if asig.name not in asig_names: 34 | waves_block.segments[0].analogsignals.append(asig) 35 | 36 | for event in block.segments[0].events: 37 | if event.name in event_names: 38 | waves_evt = waves_block.filter(name=event.name, objects="Event")[0] 39 | for key, value in event.annotations.items(): 40 | if key not in waves_evt.annotations: 41 | waves_evt.annotations[key] = value 42 | for key, value in event.array_annotations.items(): 43 | if key not in waves_evt.array_annotations: 44 | waves_evt.array_annotations[key] = value 45 | else: 46 | waves_block.segments[0].events.append(event) 47 | 48 | del block 49 | 50 | write_neo(args.output, waves_block) 51 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | import quantities as pq 11 | import warnings 12 | import re 13 | from utils.io_utils import load_neo 14 | from utils.parse import none_or_str 15 | 16 | CLI = argparse.ArgumentParser() 17 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 18 | help="path to input data in neo format") 19 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=none_or_str, default=None, 20 | help="name of neo.Event to analyze (must contain waves)") 21 | CLI.add_argument("--measures", "--MEASURES", nargs='+', type=none_or_str, default=None, 22 | help="list of measure names to apply") 23 | 24 | if __name__ == '__main__': 25 | args, unknown = CLI.parse_known_args() 26 | 27 | block = load_neo(args.data) 28 | 29 | if len(block.segments) > 1: 30 | print("More than one Segment found; all except the first one " \ 31 | + "will be ignored.") 32 | 33 | evts = block.filter(name='wavefronts', objects="Event") 34 | if not len(evts): 35 | raise ValueError("No 'wavefronts' events found!") 36 | 37 | evt = evts[0] 38 | evt = evt[evt.labels != '-1'] 39 | num_waves = len(np.unique(evt.labels)) 40 | 41 | if num_waves: 42 | print(f'{num_waves} wavefronts found.') 43 | else: 44 | raise ValueError("There are no waves detected!") 45 | 46 | evt.array_annotations['x_coords'] 47 | evt.array_annotations['y_coords'] 48 | evt.annotations['spatial_scale'] 49 | 50 | optical_flow = block.filter(name='optical_flow', objects="AnalogSignal") 51 | if not len(optical_flow): 52 | warnings.warn('No Optical-Flow signal available!') 53 | 54 | evts = block.filter(name='wavemodes', objects="Event") 55 | if len(evts): 56 | print(f'{len(np.unique(evts[0].labels))} wavemodes found') 57 | else: 58 | warnings.warn("No 'wavemodes' events found!") 59 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | from utils.io_utils import load_neo 11 | from utils.neo_utils import analogsignal_to_imagesequence, imagesequence_to_analogsignal 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | 17 | if __name__ == '__main__': 18 | args, unknown = CLI.parse_known_args() 19 | 20 | block = load_neo(args.data) 21 | 22 | if len(block.segments) > 1: 23 | print("More than one Segment found; all except the first one " \ 24 | + "will be ignored.") 25 | if len(block.segments[0].analogsignals) > 1: 26 | print("More than one AnalogSignal found; all except the first one " \ 27 | + "will be ignored.") 28 | 29 | asig = block.segments[0].analogsignals[0] 30 | 31 | imgseq = analogsignal_to_imagesequence(asig) 32 | asig2 = imagesequence_to_analogsignal(imgseq) 33 | 34 | if asig.shape != asig2.shape: 35 | raise ValueError("AnalogSignal doesn't include empty grid sites. " 36 | + f"Reshape {asig.shape} to {asig2.shape} according to " 37 | "x/y_coords. You may use `add_empty_sites_to_analogsignal` " 38 | "from the utils.neo_utils module.") 39 | 40 | print('Recording Time:\t\t', asig.t_stop - asig.t_start) 41 | print('Sampling Rate:\t\t', asig.sampling_rate) 42 | print('Spatial Scale:\t\t', asig.annotations['spatial_scale']) 43 | 44 | num_channels = np.count_nonzero(~np.isnan(np.sum(asig, axis=0))) 45 | print('Number of Channels:\t', num_channels) 46 | 47 | x_coords = asig.array_annotations['x_coords'] 48 | y_coords = asig.array_annotations['y_coords'] 49 | 50 | dim_x, dim_y = np.max(x_coords)+1, np.max(y_coords)+1 51 | 52 | print('Grid Dimensions:\t', f'{dim_x} x {dim_y}') 53 | print('Empty Grid Sites:\t', dim_x*dim_y - num_channels) 54 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/normalization.py: -------------------------------------------------------------------------------- 1 | """ 2 | Divide the signal in each channel by their max/mean/median value. 3 | """ 4 | 5 | import numpy as np 6 | import argparse 7 | from pathlib import Path 8 | import neo 9 | import quantities as pq 10 | import os 11 | import sys 12 | from utils.io_utils import write_neo, load_neo 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output file") 19 | CLI.add_argument("--normalize_by", nargs='?', type=str, default='mean', 20 | help="division factor: 'max', 'mean', or 'median'") 21 | 22 | def normalize(asig, normalize_by): 23 | if normalize_by == 'median': 24 | norm_function = np.median 25 | elif normalize_by == 'max': 26 | norm_function = np.max 27 | elif normalize_by == 'mean': 28 | norm_function = np.mean 29 | else: 30 | raise ValueError("The method to normalize by is not recognized. "\ 31 | + "Please choose either 'mean', 'median', or 'max'.") 32 | 33 | dim_t, num_channels = asig.shape 34 | norm_asig = asig.as_array() 35 | for i in range(num_channels): 36 | norm_value = norm_function(norm_asig[:,i]) 37 | if norm_value: 38 | norm_asig[:,i] /= norm_value 39 | else: 40 | print("Normalization factor is {} for channel {} "\ 41 | .format(norm_value, i) + "and was skipped.") 42 | 43 | new_asig = asig.duplicate_with_new_data(norm_asig, units='dimensionless') 44 | new_asig.array_annotations = asig.array_annotations 45 | return new_asig 46 | 47 | 48 | if __name__ == '__main__': 49 | args, unknown = CLI.parse_known_args() 50 | 51 | block = load_neo(args.data) 52 | 53 | asig = normalize(block.segments[0].analogsignals[0], args.normalize_by) 54 | 55 | asig.description += "Normalized by {} ({})."\ 56 | .format(args.normalize_by, os.path.basename(__file__)) 57 | block.segments[0].analogsignals[0] = asig 58 | 59 | write_neo(args.output, block) 60 | -------------------------------------------------------------------------------- /cobrawap/pipeline/utils/io_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import neo 3 | import matplotlib.pyplot as plt 4 | import warnings 5 | from snakemake.logging import logger 6 | from pathlib import Path 7 | 8 | def load_neo(filename, object='block', lazy=False, *args, **kwargs): 9 | try: 10 | filename = Path(filename) 11 | if filename.suffix == '.nix': 12 | kwargs.update(mode='ro') 13 | 14 | nio = neo.io.get_io(str(filename), *args, **kwargs) 15 | 16 | if lazy and nio.support_lazy: 17 | block = nio.read_block(lazy=lazy) 18 | # elif lazy and isinstance(io, neo.io.nixio.NixIO): 19 | # with neo.NixIOFr(filename, *args, **kwargs) as nio: 20 | # block = nio.read_block(lazy=lazy) 21 | else: 22 | block = nio.read_block() 23 | 24 | except Exception as e: 25 | # nio.close() 26 | raise e 27 | finally: 28 | if not lazy and hasattr(nio, 'close'): 29 | nio.close() 30 | 31 | if block is None: 32 | raise IOError(f'{filename} does not exist!') 33 | 34 | if object == 'block': 35 | return block 36 | elif object == 'analogsignal': 37 | return block.segments[0].analogsignals[0] 38 | else: 39 | raise IOError(f"{object} not recognized! Choose 'block' or 'analogsignal'.") 40 | 41 | 42 | def write_neo(filename, block, *args, **kwargs): 43 | # muting saving imagesequences for now, since they do not yet 44 | # support array_annotations 45 | block.segments[0].imagesequences = [] 46 | try: 47 | # for neo >= 0.12.0 filename can't contain '|' 48 | nio = neo.io.get_io(str(filename), *args, **kwargs) 49 | nio.write(block) 50 | except Exception as e: 51 | warnings.warn(str(e)) 52 | finally: 53 | nio.close() 54 | return True 55 | 56 | 57 | def save_plot(filename, dpi=300, **kwargs): 58 | dirname = os.path.dirname(filename) 59 | if not os.path.exists(dirname): 60 | os.makedirs(dirname) 61 | try: 62 | plt.savefig(fname=filename, dpi=dpi, bbox_inches='tight', **kwargs) 63 | except ValueError as ve: 64 | warnings.warn(str(ve)) 65 | plt.subplots() 66 | plt.savefig(fname=filename) 67 | plt.close() 68 | return None 69 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/Snakefile: -------------------------------------------------------------------------------- 1 | """ 2 | # Stage 05 Channel-wise Wave Characterization 3 | """ 4 | 5 | from pathlib import Path 6 | configfile: Path('configs') / 'config_template.yaml' 7 | include: Path() / '..' / 'utils' / 'Snakefile' 8 | 9 | #### Housekeeping #### 10 | 11 | def measures_output(wildcards): 12 | return [OUTPUT_DIR / measure / str(config.EVENT_NAME + "_" + measure + ".csv") 13 | for measure in config.MEASURES] 14 | 15 | if config.EVENT_NAME == 'wavemodes': 16 | config.MEASURES = [m for m in config.MEASURES if m not in 17 | ['inter_wave_interval_local', 'flow_direction_local']] 18 | 19 | def input(wildcards): 20 | spatial_measures = ['velocity_local', 'direction_local'] 21 | if wildcards.measure in spatial_measures: 22 | return rules.spatial_derivative.output 23 | else: 24 | return config.STAGE_INPUT 25 | 26 | ruleorder: spatial_derivative > compute_measure 27 | 28 | #### UTILITY BLOCKS #### 29 | 30 | use rule template as all with: 31 | input: 32 | check = OUTPUT_DIR / 'input.check', 33 | data = measures_output, 34 | script = SCRIPTS / 'merge_dataframes.py' 35 | params: 36 | params(merge_key = ['channel_id', str(config.EVENT_NAME + "_id")]) 37 | output: 38 | OUTPUT_DIR / config.STAGE_OUTPUT, 39 | output_img = OUTPUT_DIR / 'overview_measures.html' 40 | 41 | #### CHARACTERIZATION BLOCKS #### 42 | 43 | use rule template as compute_measure with: 44 | input: 45 | data = input, 46 | script = SCRIPTS / str('{measure}' + ".py") 47 | params: 48 | params(config.__dict__) 49 | output: 50 | Path('{dir}') / '{measure}' / str(config.EVENT_NAME + "_" + '{measure}' + ".csv"), 51 | output_img = Path('{dir}') / '{measure}' 52 | / str(config.EVENT_NAME + "_" + '{measure}' + "." + config.PLOT_FORMAT) 53 | 54 | 55 | use rule compute_measure as spatial_derivative with: 56 | input: 57 | data = config.STAGE_INPUT, 58 | script = SCRIPTS / 'spatial_derivative.py' 59 | output: 60 | Path('{dir}') / '{rule_name}' 61 | / str(config.EVENT_NAME + "_spatial_derivative.csv"), 62 | output_img = Path('{dir}') / '{rule_name}' 63 | / str(config.EVENT_NAME + "_spatial_derivative." + config.PLOT_FORMAT) 64 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/flow_direction_local.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the wave directions per wave and channel, 3 | based on the optical flow at wave trigger times and locations. 4 | """ 5 | 6 | import argparse 7 | from pathlib import Path 8 | import numpy as np 9 | import pandas as pd 10 | import matplotlib.pyplot as plt 11 | from utils.io_utils import load_neo, save_plot 12 | from utils.parse import none_or_path 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to neo object") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output file") 19 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 20 | help="path of output image file") 21 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 22 | help="name of neo.Event to analyze (must contain waves)") 23 | 24 | if __name__ == '__main__': 25 | args, unknown = CLI.parse_known_args() 26 | 27 | block = load_neo(args.data) 28 | evt = block.filter(name=args.event_name, objects="Event")[0] 29 | evt = evt[evt.labels.astype('str') != '-1'] 30 | 31 | optical_flow = block.filter(name='optical_flow', objects="AnalogSignal")[0] 32 | 33 | df_dict = {f'{args.event_name}_id': evt.labels, 34 | 'channel_id': evt.array_annotations['channels'], 35 | 'flow_direction_local_x': np.empty(len(evt), dtype=float), 36 | 'flow_direction_local_y': np.empty(len(evt), dtype=float), 37 | } 38 | 39 | for i, trigger in enumerate(evt): 40 | t_idx = np.argmax(optical_flow.times >= trigger) 41 | channel_id = evt.array_annotations['channels'][i], 42 | direction = optical_flow[t_idx, channel_id] 43 | df_dict['flow_direction_local_x'][i] = np.real(direction) 44 | df_dict['flow_direction_local_y'][i] = np.imag(direction) 45 | 46 | df = pd.DataFrame(df_dict) 47 | df.to_csv(args.output) 48 | 49 | fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}) 50 | ax.hist(np.angle(df.flow_direction_local_x + 1j*df.flow_direction_local_y), 51 | bins=36, range=[-np.pi, np.pi]) 52 | 53 | if args.output_img is not None: 54 | save_plot(args.output_img) 55 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/Snakefile: -------------------------------------------------------------------------------- 1 | """ 2 | # Stage 01 Data Entry 3 | """ 4 | 5 | from pathlib import Path 6 | # configfile: Path('configs') / 'config_template.yaml' 7 | include: Path() / '..' / 'utils' / 'Snakefile' 8 | 9 | config.DATA_NAME = list(config.DATA_SETS.keys())[0] 10 | 11 | 12 | use rule template_all as all with: 13 | input: 14 | check = OUTPUT_DIR / str(config.DATA_NAME + ".check"), 15 | img = OUTPUT_DIR / config.DATA_NAME 16 | / str("trace_" + config.DATA_NAME + "." + config.PLOT_FORMAT), 17 | data = OUTPUT_DIR / config.DATA_NAME 18 | / str(config.DATA_NAME + "." + config.NEO_FORMAT) 19 | 20 | 21 | use rule check_input as check_data_entry with: 22 | input: 23 | data = Path('{dir}') / '{data_name}' / str('{data_name}' + "." + config.NEO_FORMAT), 24 | script = SCRIPTS / 'check_input.py' 25 | output: 26 | Path('{dir}') / '{data_name}.check' 27 | 28 | 29 | use rule template as plot_traces with: 30 | input: 31 | data = Path('{dir}') / '{data_name}' / str('{data_name}' + "." + config.NEO_FORMAT), 32 | script = SCRIPTS / 'plot_traces.py' 33 | params: 34 | params(t_start = config.PLOT_TSTART, 35 | t_stop = config.PLOT_TSTOP, 36 | channels = config.PLOT_CHANNELS) 37 | output: 38 | img = Path('{dir}') / '{data_name}' / str("trace_" + '{data_name}' + "." + config.PLOT_FORMAT) 39 | 40 | 41 | def curation_script(w): 42 | if CONFIG_PATH is None: 43 | prefix = Path() 44 | else: 45 | prefix = CONFIG_PATH / config.STAGE_NAME 46 | return prefix / 'scripts' / config.CURATION_SCRIPT 47 | 48 | 49 | def data_path(w): 50 | data_path = Path(config.DATA_SETS[config.DATA_NAME]).resolve() 51 | if not data_path.exists(): 52 | raise FileNotFoundError("Data file `" + data_path + "` not found!") 53 | return data_path 54 | 55 | 56 | use rule template as enter_data with: 57 | input: 58 | data = data_path, 59 | script = curation_script, 60 | params: 61 | params('spatial_scale', 'sampling_rate', 't_start', 't_stop', 62 | 'orientation_top', 'orientation_right', 'annotations', 63 | 'array_annotations', 'kwargs', 'trial', config=config) 64 | output: 65 | Path('{dir}') / '{data_name}' / str('{data_name}' + "." + config.NEO_FORMAT) 66 | -------------------------------------------------------------------------------- /doc/source/authors.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Authors and contributors 3 | ************************ 4 | 5 | The following people have contributed code and/or ideas to the current version 6 | of Cobrawap. The institutional affiliations are those at the time of the 7 | contribution, and may not be the current affiliation of a contributor. 8 | 9 | * Robin Gutzen (orcid: 0000-0001-7373-5962) [1, 2] 10 | * Giulia De Bonis (orcid: 0000-0001-7079-5724) [3] 11 | * Chiara De Luca (orcid: 0000-0003-3488-0088) [3,4] 12 | * Elena Pastorelli (orcid: 0000-0003-0682-1232) [3] 13 | * Cristiano Capone (orcid: 0000-0002-9958-2551) [3] 14 | * Anna Letizia Allegra Mascaro (orcid: 0000-0002-8489-0076) [5, 6] 15 | * Francesco Resta (orcid: 0000-0002-9605-5852) [5, 7] 16 | * Arnau Manasanch (orcid: 0000-0002-8306-0759) [8] 17 | * Francesco Saverio Pavone (orcid: 0000-0002-0675-3981) [5, 7, 9] 18 | * Maria V. Sanchez-Vives (orcid: 0000-0002-8437-9083) [8, 10] 19 | * Maurizio Mattia (orcid: 0000-0002-2356-4509) [11] 20 | * Sonja Grün (orcid: 0000-0003-2829-2220) [1, 2] 21 | * Pier Stanislao Paolucci (orcid: 0000-0003-1937-6086) [3] 22 | * Michael Denker (orcid: 0000-0003-1255-7300) [1] 23 | * Cosimo Lupo (orcid: 0000-0002-2651-1277) [3] 24 | * Federico Marmoreo (orcid: 0000-0002-5933-2873) [3] 25 | 26 | 1. Institute of Neuroscience and Medicine (INM-6) and Institute for Advanced Simulation (IAS-6) and JARA-Institute Brain Structure-Function Relationships (INM-10), Jülich Research Centre, Jülich, Germany 27 | 2. Theoretical Systems Neurobiology, RWTH Aachen University, Aachen, Germany 28 | 3. Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy 29 | 4. Institute of Neuroinformatics, University of Zürich and ETH Zürich, Zürich, Switzerland 30 | 5. European Laboratory for Non-linear Spectroscopy (LENS), University of Florence, Florence, Italy 31 | 6. Neuroscience Institute, National Research Council, Pisa, Italy 32 | 7. Department of Physics and Astronomy, University of Florence, Florence, Italy 33 | 8. Institut d’Investigacions Biomèdiques August Pi i Sunyer (IDIBAPS), Barcelona, Spain 34 | 9. National Institute of Optics, National Research Council, Sesto Fiorentino, Italy 35 | 10. Institució Catalana de Recerca i Estudis Avançats (ICREA), Barcelona, Spain 36 | 11. Natl. Center for Radiation Protection and Computational Physics, Istituto Superiore di Sanità (ISS), Rome, Italy 37 | 38 | If we've somehow missed you off the list we're very sorry - please let us know. 39 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/README.rst: -------------------------------------------------------------------------------- 1 | ===================== 2 | Stage 02 - Processing 3 | ===================== 4 | **This stage prepares the data for analysis. The user can select the required processing steps depending on the data and analysis objectives.** 5 | 6 | `config template `_ 7 | 8 | Input 9 | ===== 10 | Simultaneous neural activity recordings from electrodes/pixels, spatially arranged on a grid. 11 | 12 | A ``neo.Block`` and ``Segment`` object containing an ``AnalogSignal`` object containing all signal channels (additional ``AnalogSignal`` objects are ignored) with 13 | 14 | * *array_annotations*: ``x_coords`` and ``y_coords`` specifying the integer position on the channel grid; 15 | * *annotations*: ``spatial_scale`` specifying the distance between electrodes/pixels as ``quantities.Quantity`` object. 16 | 17 | *should pass* |check_input|_ 18 | 19 | .. |check_input| replace:: *check_input.py* 20 | .. _check_input: https://github.com/NeuralEnsemble/cobrawap/blob/master/pipeline/stage02_processing/scripts/check_input.py 21 | 22 | Output 23 | ====== 24 | * The same structured ``neo.Block`` object containing an ``AnalogSignal`` object. The channel signals in ``AnalogSignal`` are processed by the specified blocks and parameters. 25 | * The respective block parameters are added as metadata to the annotations of the ``AnalogSignal``. 26 | * The output ``neo.Block`` is stored in ``{output_path}/{profile}/stage02_processing/processed_data.{NEO_FORMAT}`` 27 | * The intermediate results and plots of each processing block are stored in the ``{output_path}/{profile}/stage02_processing/{block_name}/`` 28 | 29 | Usage 30 | ===== 31 | In this stage, all blocks can be selected and arranged in arbitrary order (*choose any*). The execution order is specified by the config parameter ``BLOCK_ORDER``. All blocks, generally, have the same output data representation as their input, just transforming the ``AnalogSignal`` and adding metadata, without adding data objects. 32 | 33 | When the block order is changed in-between runs, it may happen that not all the necessary blocks are re-executed correctly, because of Snakemake's time-stamp-based re-execution mechanism. Therefore, to be sure all blocks are re-executed, you can set ``RERUN_MODE`` is set to ``True``. However, when you are not changing the block order, setting it to ``False`` prevents unnecessary reruns. 34 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/README.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Stage 04 - Wave Detection 3 | ========================= 4 | 5 | **This stage detects individual propagating waves based on the local transition times and optionally complements the wave description with additionally derived properties.** 6 | 7 | `config template `_ 8 | 9 | Input 10 | ===== 11 | A ``neo.Block`` and ``Segment`` object containing 12 | 13 | an ``AnalogSignal`` object with all signal channels with 14 | 15 | * ``array_annotations``: ``x_coords`` and ``y_coords`` specifying the integer position on the channel grid; 16 | 17 | an ``Event`` object named *'transitions'* with 18 | 19 | * *times*: time stamps where a potential wavefront, i.e., state transition, was detected, 20 | * *labels*: ``UP`` (``DOWN`` or other are ignored), 21 | * *array_annotations*: ``channels``, ``x_coords``, ``y_coords`` 22 | 23 | *should pass* |check_input|_ 24 | 25 | .. |check_input| replace:: *check_input.py* 26 | .. _check_input: https://github.com/NeuralEnsemble/cobrawap/blob/master/pipeline/stage04_wave_detection/scripts/check_input.py 27 | 28 | Output 29 | ====== 30 | The same input data object, but extended with a ``neo.Event`` object named *'wavefronts'*, containing 31 | 32 | * *times*: ``UP`` transitions times from 'transitions' event, 33 | * *labels*: wave ids, 34 | * *annotations*: parameters of clustering algorithm, copy of transitions event annotations, 35 | * *array_annotations*: ``channels``, ``x_coords``, ``y_coords`` 36 | 37 | eventually additional ``AnalogSignal`` and ``Event`` objects from the blocks specified as ``ADDITIONAL_PROPERTIES`` 38 | 39 | * such as an ``AnalogSignal`` object called *'optical_flow'* equivalent to the primary ``AnalogSignal`` object, but containing the complex-valued optical flow values. 40 | 41 | The output ``neo.Block`` is stored in ``{output_path}/{profile}/stage04_wave_detection/waves.{NEO_FORMAT}`` 42 | 43 | The intermediate results and plots of each processing block are stored in the ``{output_path}/{profile}/stage04_wave_detection/{block_name}/`` 44 | 45 | Usage 46 | ===== 47 | In this stage offers alternative wave detection methods (*choose one*), which can be selected via the ``DETECTION_BLOCK`` parameter. 48 | There are blocks to add additional properties, to be selected (*choose any*) via the ``ADDITIONAL_PROPERTIES`` parameter. 49 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/frequency_filter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Filter between a highpass and a lowpass frequency using a butterworth filter. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import quantities as pq 8 | import os 9 | from elephant.signal_processing import butter 10 | from utils.io_utils import load_neo, write_neo 11 | from utils.parse import none_or_float 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 17 | help="path of output file") 18 | CLI.add_argument("--highpass_frequency", nargs='?', type=none_or_float, 19 | default=None, help="lower bound of frequency band in Hz") 20 | CLI.add_argument("--lowpass_frequency", nargs='?', type=none_or_float, 21 | default=None, help="upper bound of frequency band in Hz") 22 | CLI.add_argument("--order", nargs='?', type=int, default=2, 23 | help="order of the filter function") 24 | CLI.add_argument("--filter_function", nargs='?', type=str, default='filtfilt', 25 | help="filter function used in the scipy backend") 26 | 27 | if __name__ == '__main__': 28 | args, unknown = CLI.parse_known_args() 29 | 30 | block = load_neo(args.data) 31 | 32 | asig = butter(block.segments[0].analogsignals[0], 33 | highpass_frequency=args.highpass_frequency*pq.Hz, 34 | lowpass_frequency=args.lowpass_frequency*pq.Hz, 35 | order=args.order, 36 | filter_function=args.filter_function) 37 | 38 | asig.array_annotations = block.segments[0].analogsignals[0].array_annotations 39 | asig.annotate(highpass_frequency=args.highpass_frequency*pq.Hz, 40 | lowpass_frequency=args.lowpass_frequency*pq.Hz, 41 | filter_order=args.order) 42 | 43 | asig.description += "Frequency filtered with [{}, {}]Hz order {} "\ 44 | .format(args.highpass_frequency, 45 | args.lowpass_frequency, 46 | args.order)\ 47 | + " using {} scipy algorithm.({}). "\ 48 | .format(args.filter_function, 49 | os.path.basename(__file__)) 50 | block.segments[0].analogsignals[0] = asig 51 | 52 | write_neo(args.output, block) 53 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/plot_clustering.py: -------------------------------------------------------------------------------- 1 | """ 2 | Visualize the wave detection by means of clustering the detected trigger 3 | in (time,x,y) space. 4 | """ 5 | 6 | import numpy as np 7 | import quantities as pq 8 | import argparse 9 | from pathlib import Path 10 | import matplotlib.pyplot as plt 11 | from matplotlib.colors import ListedColormap 12 | import seaborn as sns 13 | import random 14 | import warnings 15 | from utils.io_utils import load_neo, save_plot 16 | from utils.parse import none_or_float 17 | from utils.neo_utils import time_slice 18 | 19 | CLI = argparse.ArgumentParser() 20 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 21 | help="path to input data in neo format") 22 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 23 | help="path of output file") 24 | CLI.add_argument("--time_slice", nargs='?', type=none_or_float, default=None, 25 | help="length of time_slice in seconds.") 26 | 27 | def plot_clustering(events, ax=None): 28 | if ax is None: 29 | fig = plt.figure() 30 | ax = fig.add_subplot(111, projection='3d') 31 | N = len(np.unique(events.labels)) 32 | if N: 33 | cmap = sns.husl_palette(N-1, h=.5, l=.6) 34 | cmap = random.sample([c for c in cmap], N-1) 35 | cmap = ListedColormap(['k']+cmap) 36 | 37 | ax.scatter(events.times, 38 | events.array_annotations['x_coords'], 39 | events.array_annotations['y_coords'], 40 | c=[int(label) for label in events.labels], 41 | cmap=cmap, s=2) 42 | else: 43 | warnings.warn('No trigger events to plot in clusters!') 44 | 45 | ax.set_xlabel('time [{}]'.format(events.times.dimensionality.string)) 46 | ax.set_ylabel('x-pixel') 47 | ax.set_zlabel('y-pixel') 48 | ax.view_init(45, -75) 49 | return ax, cmap 50 | 51 | 52 | if __name__ == '__main__': 53 | args, unknown = CLI.parse_known_args() 54 | 55 | block = load_neo(args.data) 56 | 57 | evts = block.filter(name='wavefronts', objects="Event")[0] 58 | 59 | if len(evts): 60 | 61 | if args.time_slice is not None: 62 | asig = block.segments[0].analogsignals[0] 63 | t_stop = asig.t_start.rescale('s') + args.time_slice*pq.s 64 | evts = time_slice(evts, t_start=asig.t_start, t_stop=t_stop) 65 | 66 | ax, cmap = plot_clustering(evts) 67 | 68 | save_plot(args.output) 69 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/background_subtraction.py: -------------------------------------------------------------------------------- 1 | """ 2 | Subtract the background of the input data by subtracting the mean of each channel. 3 | """ 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import argparse 8 | from pathlib import Path 9 | import os 10 | from utils.io_utils import load_neo, write_neo, save_plot 11 | from utils.parse import none_or_path 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 17 | help="path of output file") 18 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, 19 | help="path of output image", default=None) 20 | CLI.add_argument("--output_array", nargs='?', type=none_or_path, 21 | help="path of output numpy array", default=None) 22 | 23 | def shape_frame(value_array, xy_coords): 24 | dim_x = np.max(xy_coords[:,0]) + 1 25 | dim_y = np.max(xy_coords[:,1]) + 1 26 | frame = np.empty((dim_y, dim_x)) * np.nan 27 | for pixel, (x,y) in zip(value_array, xy_coords): 28 | frame[int(y), int(x)] = pixel 29 | return frame 30 | 31 | def plot_frame(frame): 32 | fig, ax = plt.subplots() 33 | ax.imshow(frame, interpolation='nearest', cmap=plt.cm.gray, origin='lower') 34 | ax.axis('image') 35 | ax.set_xticks([]) 36 | ax.set_yticks([]) 37 | return ax 38 | 39 | if __name__ == '__main__': 40 | args, unknown = CLI.parse_known_args() 41 | 42 | block = load_neo(args.data) 43 | asig = block.segments[0].analogsignals[0] 44 | signal = asig.as_array() 45 | background = np.nanmean(signal, axis=0) 46 | signal -= background 47 | 48 | if args.output_img or args.output_array is not None: 49 | xy_coords = np.array([(x,y) for x,y in 50 | zip(asig.array_annotations['x_coords'], 51 | asig.array_annotations['y_coords'])], 52 | dtype=int) 53 | frame = shape_frame(background, xy_coords) 54 | if args.output_array is not None: 55 | np.save(args.output_array, frame) 56 | if args.output_img is not None: 57 | plot_frame(frame) 58 | save_plot(args.output_img) 59 | 60 | new_asig = asig.duplicate_with_new_data(signal) 61 | new_asig.array_annotations = asig.array_annotations 62 | new_asig.description += "The mean of each channel was subtracted ({})."\ 63 | .format(os.path.basename(__file__)) 64 | block.segments[0].analogsignals[0] = new_asig 65 | 66 | write_neo(args.output, block) 67 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/time_stamp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the timing of each wave. 3 | """ 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import argparse 8 | from pathlib import Path 9 | import pandas as pd 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.parse import none_or_path 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 17 | help="path of output file") 18 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 19 | help="path of output image") 20 | CLI.add_argument("--time_point", "--TIME_STAMP_POINT", nargs='?', type=str, default='start', 21 | help="when to register the time for a wave [start, middle, end]") 22 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 23 | help="name of neo.Event to analyze (must contain waves)") 24 | 25 | if __name__ == '__main__': 26 | args, unknown = CLI.parse_known_args() 27 | 28 | block = load_neo(args.data) 29 | 30 | if args.time_point == 'start': 31 | time_stamp_func = np.min 32 | elif args.time_point == 'end': 33 | time_stamp_func = np.max 34 | elif args.time_point == 'middle': 35 | time_stamp_func = np.mean 36 | else: 37 | raise InputError('') 38 | 39 | asig = block.segments[0].analogsignals[0] 40 | evts = block.filter(name=args.event_name, objects="Event")[0] 41 | evts = evts[evts.labels.astype('str') != '-1'] 42 | 43 | wave_ids = np.sort(np.unique(evts.labels).astype(int)) 44 | 45 | time_stamps = np.empty(len(wave_ids), dtype=float) 46 | 47 | t_unit = evts.times[0].dimensionality.string 48 | 49 | for i, wave_id in enumerate(wave_ids): 50 | idx = np.where(evts.labels == str(wave_id))[0] 51 | time_stamps[i] = time_stamp_func(evts.times[idx]) 52 | 53 | 54 | fig, ax = plt.subplots(figsize=(15,2)) 55 | for i, wave_id in enumerate(wave_ids): 56 | idx = np.where(evts.labels == str(wave_id))[0] 57 | t0, t1 = np.min(evts.times[idx]), np.max(evts.times[idx]) 58 | ax.plot([t0,t1], [1,1], marker='|', color='b') 59 | ax.set_ylim((0,2)) 60 | ax.set_xlabel(f'time [{t_unit}]') 61 | ax.set_title('wave occurences') 62 | if args.output_img is not None: 63 | save_plot(args.output_img) 64 | 65 | # transform to DataFrame 66 | df = pd.DataFrame(time_stamps, columns=['time_stamp']) 67 | df['time_stamp_unit'] = t_unit 68 | df[f'{args.event_name}_id'] = wave_ids 69 | 70 | df.to_csv(args.output) 71 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/plot_power_spectrum.py: -------------------------------------------------------------------------------- 1 | """ 2 | Create a plot of the channel-wise and average power spectrum density. 3 | """ 4 | 5 | import numpy as np 6 | import quantities as pq 7 | import argparse 8 | from pathlib import Path 9 | import matplotlib.pyplot as plt 10 | import seaborn as sns 11 | from elephant.spectral import welch_psd 12 | from utils.io_utils import load_neo, save_plot 13 | from utils.parse import none_or_float 14 | 15 | CLI = argparse.ArgumentParser() 16 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 17 | help="path to input data in neo format") 18 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 19 | help="path of output figure") 20 | CLI.add_argument("--highpass_frequency", nargs='?', type=none_or_float, 21 | default='None', help="lower bound of frequency band in Hz") 22 | CLI.add_argument("--lowpass_frequency", nargs='?', type=none_or_float, 23 | default='None', help="upper bound of frequency band in Hz") 24 | CLI.add_argument("--psd_frequency_resolution", nargs='?', type=float, default=5, 25 | help="frequency resolution of the power spectrum in Hz") 26 | CLI.add_argument("--psd_overlap", nargs='?', type=float, default=0.5, 27 | help="overlap parameter for Welch's algorithm [0-1]") 28 | 29 | def plot_psd(frequencies, psd, highpass_frequency, lowpass_frequency): 30 | sns.set(style='ticks', palette="deep", context="notebook") 31 | fig, ax = plt.subplots() 32 | for channel in psd: 33 | ax.semilogy(frequencies, channel, alpha=0.7) 34 | ax.semilogy(frequencies, np.mean(psd, axis=0), linewidth=2, 35 | color='k', label='channel average') 36 | ax.set_title('Power Spectrum') 37 | ax.set_xlabel('frequency [Hz]') 38 | ax.set_ylabel('power spectral density') 39 | 40 | if highpass_frequency is not None and lowpass_frequency is not None: 41 | left = highpass_frequency if highpass_frequency is not None else 0 42 | right = lowpass_frequency if lowpass_frequency is not None else ax.get_xlim()[1] 43 | ax.axvspan(left, right, alpha=0.2, color='k', label='filtered region') 44 | 45 | plt.legend() 46 | return fig 47 | 48 | 49 | if __name__ == '__main__': 50 | args, unknown = CLI.parse_known_args() 51 | 52 | asig = load_neo(args.data, 'analogsignal') 53 | 54 | freqs, psd = welch_psd(asig, 55 | frequency_resolution=args.psd_frequency_resolution*pq.Hz, 56 | window='hann', 57 | overlap=args.psd_overlap) 58 | 59 | plot_psd(frequencies=freqs, 60 | psd=psd, 61 | highpass_frequency=args.highpass_frequency, 62 | lowpass_frequency=args.lowpass_frequency) 63 | 64 | save_plot(args.output) 65 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/check_input.py: -------------------------------------------------------------------------------- 1 | """ 2 | Check whether the input data representation adheres to the stage's requirements. 3 | 4 | Additionally prints a short summary of the data attributes. 5 | """ 6 | 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | import warnings 11 | import re 12 | from utils.io_utils import load_neo 13 | from utils.parse import none_or_str 14 | 15 | CLI = argparse.ArgumentParser() 16 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 17 | help="path to input data in neo format") 18 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=none_or_str, default=None, 19 | help="name of neo.Event to analyze (must contain waves)") 20 | CLI.add_argument("--measures", "--MEASURES", nargs='+', type=none_or_str, default=None, 21 | help="list of measure names to apply") 22 | 23 | if __name__ == '__main__': 24 | args, unknown = CLI.parse_known_args() 25 | 26 | if args.measures is not None and args.event_name == 'wavemodes': 27 | mode_invalid = ['label_planar', 'inter_wave_interval', 28 | 'number_of_triggers', 'time_stamp'] 29 | args.measures = [re.sub(r"[\[\],\s]", "", measure) for measure in args.measures] 30 | invalid_measures = [measure for measure in args.measures \ 31 | if measure in mode_invalid] 32 | if len(invalid_measures): 33 | warnings.warn('The following selected measures are can not be ' 34 | 'calculated for wavemodes and will be skipped: ' 35 | f'{", ".join(invalid_measures)}.') 36 | 37 | block = load_neo(args.data) 38 | 39 | if len(block.segments) > 1: 40 | print("More than one Segment found; all except the first one " \ 41 | + "will be ignored.") 42 | 43 | evts = block.filter(name='wavefronts', objects="Event") 44 | if not len(evts): 45 | raise ValueError("No 'wavefronts' events found!") 46 | 47 | evt = evts[0] 48 | evt = evt[evt.labels != '-1'] 49 | num_waves = len(np.unique(evt.labels)) 50 | 51 | if num_waves: 52 | print(f'{num_waves} wavefronts found.') 53 | else: 54 | raise ValueError("There are no waves detected!") 55 | 56 | evt.array_annotations['x_coords'] 57 | evt.array_annotations['y_coords'] 58 | evt.annotations['spatial_scale'] 59 | 60 | evts = block.filter(name='optical_flow', objects="AnalogSignal") 61 | if not len(evts): 62 | warnings.warn('No Optical-Flow signal available!') 63 | 64 | evts = block.filter(name='wavemodes', objects="Event") 65 | if len(evts): 66 | print(f'{len(np.unique(evts[0].labels))} wavemodes found') 67 | else: 68 | warnings.warn("No 'wavemodes' events found!") 69 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/inter_wave_interval.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the period between two consecutive waves for each wave. 3 | """ 4 | 5 | from pathlib import Path 6 | import neo 7 | import numpy as np 8 | import quantities as pq 9 | import matplotlib.pyplot as plt 10 | import seaborn as sns 11 | import os 12 | import argparse 13 | import scipy 14 | import pandas as pd 15 | from utils.io_utils import load_neo, save_plot 16 | from utils.parse import none_or_path 17 | 18 | CLI = argparse.ArgumentParser() 19 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 20 | help="path to input data in neo format") 21 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 22 | help="path of output file") 23 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 24 | help="path of output image file") 25 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 26 | help="name of neo.Event to analyze (must contain waves)") 27 | 28 | if __name__ == '__main__': 29 | args, unknown = CLI.parse_known_args() 30 | 31 | block = load_neo(args.data) 32 | 33 | asig = block.segments[0].analogsignals[0] 34 | evts = block.filter(name='wavefronts', objects="Event")[0] 35 | num_nonnan_channels = np.sum(np.isfinite(asig[0]).astype(int)) 36 | 37 | wave_ids = np.sort(np.unique(evts.labels).astype(int)) 38 | if wave_ids[0] == -1: 39 | wave_ids = np.delete(wave_ids, 0) 40 | 41 | wave_times = np.empty((2, asig.shape[1]), dtype=float) 42 | 43 | IWIs = np.empty((len(wave_ids),2)) 44 | IWIs.fill(np.nan) 45 | 46 | t_unit = evts.times[0].dimensionality.string 47 | 48 | for wave_id in wave_ids[:-1]: 49 | wave_times.fill(np.nan) 50 | for i, wi in enumerate([wave_id, wave_id+1]): 51 | idx = np.where(evts.labels == str(wi))[0] 52 | trigger_channels = evts.array_annotations['channels'][idx] 53 | trigger_times = evts.times[idx] 54 | for channel, t in zip(trigger_channels, trigger_times): 55 | wave_times[i, int(channel)] = t 56 | inter_wave_intervals = wave_times[1] - wave_times[0] 57 | IWIs[wave_id, 0] = np.nanmean(inter_wave_intervals) 58 | IWIs[wave_id, 1] = np.nanstd(inter_wave_intervals) 59 | 60 | # transform to DataFrame 61 | df = pd.DataFrame(IWIs, columns=['inter_wave_interval', 'inter_wave_interval_std']) 62 | df['inter_wave_interval_unit'] = t_unit 63 | df[f'{args.event_name}_id'] = wave_ids 64 | df.to_csv(args.output) 65 | 66 | # ToDo 67 | fig, ax = plt.subplots() 68 | 69 | if np.isfinite(IWIs[:,0]).any(): 70 | ax.hist(IWIs[:,0]) 71 | 72 | ax.set_xlabel('inter-wave interval [s]') 73 | if args.output_img is not None: 74 | save_plot(args.output_img) 75 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/plot_critical_points.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot the critical points in the optical flow vector field. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import numpy as np 8 | from copy import copy 9 | import matplotlib.pyplot as plt 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.neo_utils import analogsignal_to_imagesequence 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 17 | help="path of output file") 18 | CLI.add_argument("--skip_step", nargs='?', type=int, default=3, 19 | help="skipping every x vector for the plot") 20 | CLI.add_argument("--frame_id", nargs='?', type=int, default=0, 21 | help="number of the frame to plot") 22 | 23 | def plot_frame(frame, ax=None, skip_step=3): 24 | dim_y, dim_x = frame.shape 25 | 26 | if ax is None: 27 | fig, ax = plt.subplots() 28 | 29 | ax.quiver(np.arange(dim_x)[::skip_step], 30 | np.arange(dim_y)[::skip_step], 31 | np.real(frame[::skip_step,::skip_step]), 32 | np.imag(frame[::skip_step,::skip_step])) 33 | 34 | Y, X = np.meshgrid(np.arange(dim_y), np.arange(dim_x), indexing='ij') 35 | ZR = np.real(frame) 36 | ZI = np.imag(frame) 37 | contourR = ax.contour(X, Y, ZR, levels=[0], colors='b', label='x = 0') 38 | contourI = ax.contour(X, Y, ZI, levels=[0], colors='g', label='y = 0') 39 | return ax 40 | 41 | 42 | if __name__ == '__main__': 43 | args, unknown = CLI.parse_known_args() 44 | block = load_neo(args.data) 45 | 46 | asigs = block.filter(name='optical_flow', objects="AnalogSignal") 47 | 48 | if asigs: 49 | asig = asigs[0] 50 | else: 51 | raise ValueError("Input does not contain a signal with name " \ 52 | + "'optical_flow'!") 53 | 54 | imgseq = analogsignal_to_imagesequence(asig) 55 | 56 | crit_point_evt = [evt for evt in block.segments[0].events 57 | if evt.name == "critical_points"] 58 | if crit_point_evt: 59 | crit_point_evt = crit_point_evt[0] 60 | else: 61 | raise ValueError("Input does not contain an event with name " \ 62 | + "'critical_points'!") 63 | 64 | fig, ax = plt.subplots() 65 | 66 | ax = plot_frame(imgseq.as_array()[args.frame_id], 67 | skip_step=args.skip_step, 68 | ax=ax) 69 | 70 | start_id = np.argmax(crit_point_evt.times >= asig.times[args.frame_id]) 71 | stop_id = np.argmax(crit_point_evt.times >= asig.times[args.frame_id+1]) 72 | 73 | ax.scatter(crit_point_evt.array_annotations['x'][start_id:stop_id], 74 | crit_point_evt.array_annotations['y'][start_id:stop_id], 75 | color='r') 76 | 77 | ax.set_title(f"{asig.times[args.frame_id]:.2f}") 78 | ax.set_xticklabels([]) 79 | ax.set_yticklabels([]) 80 | 81 | save_plot(args.output) 82 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage 1 - Data Entry 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stage01_data_entry' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of stage output file. 10 | STAGE_OUTPUT: 'data' 11 | 12 | # File format in which all intermediate neo objects are stored 13 | NEO_FORMAT: 'nix' 14 | 15 | # If True (default), the output file of a stage is created as symbolic link 16 | # to the last block output. If False, a duplicate is created (e.g. for cloud 17 | # application, where sym-links are not supported). 18 | USE_LINK_AS_STAGE_OUTPUT: True 19 | 20 | # Datasets which should be processed with identical settings. 21 | # To set as key-value pairs 22 | # Name_of_dataset: /path/to/data/files/ 23 | DATA_SETS: # Note that only the first entry will be used for the pipeline! 24 | data_name: '/path/to/data/' 25 | 26 | # Name of script in scripts/ folder, handling the loading and curation 27 | CURATION_SCRIPT: 'curate_dataset_key.py' 28 | 29 | # Distance between electrodes (or pixel size) 30 | SPATIAL_SCALE: 0.05 # mm 31 | 32 | # Rate of acquisition of data points 33 | # Can be 'None' if information is provided in the data file 34 | SAMPLING_RATE: 25 # Hz 35 | 36 | # Time slice in s 37 | # if None, the entire recording is used 38 | T_START: None 39 | T_STOP: None 40 | 41 | # Orientation of the recorded cortical region 42 | # recommend top: ventral 43 | ORIENTATION_TOP: 'ventral' 44 | # recommended right: 'lateral' for right hemisphere, 'medial' for left hemisphere 45 | ORIENTATION_RIGHT: 'lateral' 46 | 47 | TRIAL: None 48 | 49 | # Metadata - general information (non-specific to individual channels) 50 | # This must include an entry 'grid_size' with an int tuple 51 | # (x-dim, y-dim). The size of this rectangual grid would thus be 52 | # (x-dim*SPATIAL_SCALE, y-dim*SPATIAL_SCALE). 53 | ANNOTATIONS: 54 | experimental_lab: 'Santa Corp, Antarctica' 55 | grid_size: [2, 2] 56 | anesthetic: 'Isoflurane' 57 | # 58 | # # Metadata - channel-wise information (e.g. coordinates, SNR, ...), can be 'None' 59 | # # Here, the entries must be lists of the same length as the number of channels 60 | ARRAY_ANNOTATIONS: 61 | x_coords: [0, 0, 1, 1] 62 | y_coords: [0, 1, 0, 1] 63 | 64 | # Script parameters 65 | # Here, additional parameters can be passed to the script, 66 | # to be used to determine additional annotations or array_annotations. Can be 'None' or empty. 67 | KWARGS: 'None' 68 | 69 | # Plotting parameters 70 | # These parameters are used to create an example plot to get a first view 71 | # on the signals and ensure that the data was loaded and annotated correctly 72 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 73 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 74 | PLOT_CHANNELS: 'None' # int, or list of int, or 'None' -> randomly chosen channel 75 | PLOT_FORMAT: 'png' # file extension for storing the plot 76 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/plot_processed_trace.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot an example signal trace before and after application of some processing steps. 3 | """ 4 | 5 | import matplotlib.pyplot as plt 6 | import seaborn as sns 7 | import argparse 8 | from pathlib import Path 9 | import os 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.neo_utils import time_slice 12 | from utils.parse import none_or_float 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--original_data", nargs='?', type=Path, required=True, 16 | help="path to original input data in neo format") 17 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 18 | help="path to processed input data in neo format") 19 | CLI.add_argument("--img_dir", nargs='?', type=Path, required=True, 20 | help="path of output figure directory") 21 | CLI.add_argument("--img_name", nargs='?', type=str, 22 | default='processed_trace_channel0.png', 23 | help='example filename for channel 0') 24 | CLI.add_argument("--t_start", nargs='?', type=none_or_float, default=0, 25 | help="start time in seconds") 26 | CLI.add_argument("--t_stop", nargs='?', type=none_or_float, default=10, 27 | help="stop time in seconds") 28 | CLI.add_argument("--channels", nargs='+', type=int, default=0, 29 | help="channel to plot") 30 | 31 | def plot_traces(original_asig, processed_asig, channel): 32 | sns.set(style='ticks', palette="deep", context="notebook") 33 | fig, ax1 = plt.subplots() 34 | palette = sns.color_palette() 35 | 36 | ax1.plot(original_asig.times, 37 | original_asig.as_array()[:,channel], 38 | color=palette[0]) 39 | ax1.set_ylabel('original signal', color=palette[0]) 40 | ax1.tick_params('y', colors=palette[0]) 41 | 42 | ax2 = ax1.twinx() 43 | ax2.plot(processed_asig.times, 44 | processed_asig.as_array()[:,channel], 45 | color=palette[1]) 46 | ax2.set_ylabel('processed signal', color=palette[1]) 47 | ax2.tick_params('y', colors=palette[1]) 48 | 49 | ax1.set_title('Channel {}'.format(channel)) 50 | ax1.set_xlabel('time [{}]'.format(original_asig.times.units.dimensionality.string)) 51 | 52 | return ax1, ax2 53 | 54 | 55 | if __name__ == '__main__': 56 | args, unknown = CLI.parse_known_args() 57 | 58 | orig_asig = load_neo(args.original_data, 'analogsignal', lazy=False) 59 | orig_asig = time_slice(orig_asig, t_start=args.t_start, t_stop=args.t_stop, 60 | lazy=False, channel_indexes=args.channels) 61 | 62 | proc_asig = load_neo(args.data, 'analogsignal', lazy=False) 63 | proc_asig = time_slice(proc_asig, t_start=args.t_start, t_stop=args.t_stop, 64 | lazy=False, channel_indexes=args.channels) 65 | 66 | for channel in args.channels: 67 | plot_traces(orig_asig, proc_asig, channel) 68 | output_path = os.path.join(args.img_dir, 69 | args.img_name.replace('_channel0', f'_channel{channel}')) 70 | save_plot(output_path) 71 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/Snakefile: -------------------------------------------------------------------------------- 1 | """ 2 | Blocks 3 | ====== 4 | 5 | *fixed* Blocks 6 | ************** 7 | 8 | .. autosummary:: 9 | :toctree: _toctree/stageXY_template/ 10 | 11 | check_input 12 | 13 | Other Blocks 14 | ************ 15 | 16 | .. autosummary:: 17 | :toctree: _toctree/stageXY_template/ 18 | 19 | offset_signal 20 | standard_rule 21 | calssic_rule 22 | """ 23 | 24 | from pathlib import Path 25 | configfile: Path('configs') / 'config_template.yaml' 26 | include: Path() / '..' / 'utils' / 'Snakefile' 27 | 28 | #### Housekeeping #### 29 | 30 | def _final_rule_output(wildcards, default_output=config.STAGE_INPUT): 31 | # you can use function of the wildcards and config 32 | # to select the output of a rule that 33 | # yields the, for example, the final output of the stage 34 | if hasattr(wildcards, 'measure'): 35 | return OUTPUT_DIR / wildcards.measure \ 36 | / f'{wildcards.measure}.{config.NEO_FORMAT}' 37 | elif config.OFFSET is None: 38 | return default_output 39 | else: 40 | return OUTPUT_DIR / 'offset_signal' / f'offset_signal.{config.NEO_FORMAT}' 41 | 42 | #### UTILITY BLOCKS #### 43 | 44 | use rule template_all as all with: 45 | input: 46 | check = OUTPUT_DIR / 'input.check', 47 | data = _final_rule_output, 48 | # img = OUTPUT_DIR / '' 49 | 50 | #### OTHER BLOCKS #### 51 | 52 | use rule template as standard_rule with: 53 | # use a standard template (recommended) 54 | # see utils/Snakefile and utils/scripts/snakefile.py for details 55 | input: 56 | data = config.STAGE_INPUT, 57 | script = SCRIPTS / '{measure}.py' 58 | output: 59 | Path('{dir}') / '{measure}' / f'output_data.{config.NEO_FORMAT}', 60 | params: 61 | params(a=config.A) 62 | # equivalen to params('a', config=config) 63 | 64 | use rule template as offset_signal with: 65 | # example for a specific rule 66 | input: 67 | data = config.STAGE_INPUT, 68 | script = SCRIPTS / 'script_template.py' 69 | output: 70 | Path('{dir}') / 'offset_signal' / f'offset_signal.{config.NEO_FORMAT}', 71 | img_dir = directory(OUTPUT_DIR / 'offset_signal') 72 | params: 73 | params('offset', 'plot_tstart', 'plot_tstop', 'plot_channels', 74 | config=config, 75 | img_name='offset_channel0'+config.PLOT_FORMAT) 76 | 77 | rule classic_rule: 78 | # legacy rule syntax without using a rule template 79 | input: 80 | data = config.STAGE_INPUT, 81 | script = SCRIPTS / '{measure}.py' 82 | params: 83 | a = config.A 84 | output: 85 | Path('{dir}') / '{measure}' / f'output_data.{config.NEO_FORMAT}', 86 | img = Path('{dir}') / '{measure}' / f'{{measure}}.{config.PLOT_FORMAT}' 87 | shell: 88 | """ 89 | {ADD_UTILS} 90 | python3 {input.script:q} --data {input.data:q} \ 91 | --output {output:q}" \ 92 | --output_img {output.img:q} \ 93 | --a {params.a} 94 | """ 95 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/scripts/plot_traces.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plots excerpts of the input data with its corresponding metadata. 3 | """ 4 | 5 | import numpy as np 6 | import argparse 7 | from pathlib import Path 8 | import matplotlib.pyplot as plt 9 | import seaborn as sns 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.neo_utils import time_slice 12 | from utils.parse import parse_plot_channels, none_or_int, none_or_float 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output figure") 19 | CLI.add_argument("--t_start", nargs='?', type=none_or_float, default=0, 20 | help="start time in seconds") 21 | CLI.add_argument("--t_stop", nargs='?', type=none_or_float, default=10, 22 | help="stop time in seconds") 23 | CLI.add_argument("--channels", nargs='+', type=none_or_int, default=0, 24 | help="list of channels to plot") 25 | 26 | def plot_traces(asig, channels): 27 | sns.set(style='ticks', palette="deep", context="notebook") 28 | fig, ax = plt.subplots() 29 | 30 | offset = np.max(np.abs(asig.as_array()[:,channels])) 31 | for i, signal in enumerate(asig.as_array()[:,channels].T): 32 | ax.plot(asig.times, signal + i*offset) 33 | 34 | annotations = [f'{k}: {v}' for k,v in asig.annotations.items() 35 | if k not in ['nix_name', 'neo_name']] 36 | array_annotations = [f'{k}: {v[channels]}' 37 | for k,v in asig.array_annotations.items()] 38 | 39 | x_coords = asig.array_annotations['x_coords'] 40 | y_coords = asig.array_annotations['y_coords'] 41 | dim_x, dim_y = np.max(x_coords)+1, np.max(y_coords)+1 42 | 43 | ax.text(1.05, 0.5, 44 | f'ANNOTATIONS FOR CHANNEL(s): {channels}' + '\n' \ 45 | + '\n' \ 46 | + 'ANNOTATIONS:' + '\n' \ 47 | + ' - ' + '\n - '.join(annotations) + '\n' \ 48 | + '\n' \ 49 | + 'ARRAY ANNOTATIONS:' + '\n' \ 50 | + ' - ' + '\n - '.join(array_annotations) + '\n' \ 51 | + f' - t_start: {asig.t_start}; t_stop: {asig.t_stop}' + '\n' \ 52 | + f' - dimensions(x,y): {dim_x}, {dim_y}', 53 | ha='left', va='center', transform=ax.transAxes) 54 | 55 | ax.set_xlabel(f'time [{asig.times.units.dimensionality.string}]') 56 | ax.set_ylabel(f'channels [in {asig.units.dimensionality.string}]') 57 | ax.set_yticks([i*offset for i in range(len(channels))]) 58 | ax.set_yticklabels(channels) 59 | return ax 60 | 61 | 62 | if __name__ == '__main__': 63 | args, unknown = CLI.parse_known_args() 64 | 65 | asig = load_neo(args.data, 'analogsignal', lazy=True) 66 | 67 | channels = parse_plot_channels(args.channels, args.data) 68 | 69 | asig = time_slice(asig, t_start=args.t_start, t_stop=args.t_stop, 70 | lazy=True, channel_indexes=channels) 71 | 72 | ax = plot_traces(asig, channels) 73 | save_plot(args.output) 74 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/scripts/threshold.py: -------------------------------------------------------------------------------- 1 | """ 2 | Detect trigger times (i.e., state transition / local wavefronts onsets) 3 | by applying a threshold to each channel signal. 4 | """ 5 | 6 | import neo 7 | import numpy as np 8 | import argparse 9 | from pathlib import Path 10 | from utils.io_utils import load_neo, write_neo 11 | from utils.neo_utils import remove_annotations 12 | 13 | CLI = argparse.ArgumentParser() 14 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 15 | help="path to input data in neo format") 16 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 17 | help="path of output file") 18 | CLI.add_argument("--thresholds", nargs='?', type=str, required=True, 19 | help="path of thresholds (numpy array)") 20 | 21 | def threshold(asig, threshold_array): 22 | dim_t, channel_num = asig.shape 23 | th_signal = asig.as_array()\ 24 | - np.repeat(threshold_array[np.newaxis, :], dim_t, axis=0) 25 | state_array = th_signal > 0 26 | rolled_state_array = np.roll(state_array, 1, axis=0) 27 | 28 | all_times = np.array([]) 29 | all_channels = np.array([], dtype=int) 30 | all_labels = np.array([]) 31 | for label, func in zip(['UP', 'DOWN'], 32 | [lambda x: x, lambda x: np.bitwise_not(x)]): 33 | trans = np.where(func(np.bitwise_not(rolled_state_array))\ 34 | * func(state_array)) 35 | channels = trans[1] 36 | times = asig.times[trans[0]] 37 | 38 | if not len(times): 39 | raise ValueError("The chosen threshold lies not within the range "\ 40 | + "of the signal values!") 41 | 42 | all_channels = np.append(all_channels, channels) 43 | all_times = np.append(all_times, times) 44 | all_labels = np.append(all_labels, np.array([label for _ in times])) 45 | 46 | sort_idx = np.argsort(all_times) 47 | 48 | evt = neo.Event(times=all_times[sort_idx]*asig.times.units, 49 | labels=all_labels[sort_idx], 50 | name='transitions', 51 | trigger_detection='threshold', 52 | array_annotations={'channels':all_channels[sort_idx]}, 53 | threshold=threshold_array, 54 | description='Transitions between down and up states with '\ 55 | +'labels "UP" and "DOWN". '\ 56 | +'Annotated with the channel id ("channels").') 57 | 58 | for key in asig.array_annotations.keys(): 59 | evt_ann = {key : asig.array_annotations[key][all_channels[sort_idx]]} 60 | evt.array_annotations.update(evt_ann) 61 | 62 | remove_annotations(asig, del_keys=['nix_name', 'neo_name']) 63 | evt.annotations.update(asig.annotations) 64 | return evt 65 | 66 | 67 | if __name__ == '__main__': 68 | args, unknown = CLI.parse_known_args() 69 | 70 | block = load_neo(args.data) 71 | 72 | asig = block.segments[0].analogsignals[0] 73 | 74 | transition_event = threshold(asig, np.load(args.thresholds)) 75 | 76 | block.segments[0].events.append(transition_event) 77 | 78 | write_neo(args.output, block) 79 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/spatial_downsampling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Downsample the input data by combining and averaging neighboring channels. 3 | """ 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import argparse 8 | from pathlib import Path 9 | import os 10 | import neo 11 | from skimage import measure 12 | from utils.io_utils import load_neo, write_neo, save_plot 13 | from utils.parse import none_or_path 14 | from utils.neo_utils import analogsignal_to_imagesequence, imagesequence_to_analogsignal 15 | 16 | CLI = argparse.ArgumentParser() 17 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 18 | help="path to input data in neo format") 19 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 20 | help="path of output file") 21 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, 22 | help="path of output image", default=None) 23 | CLI.add_argument("--macro_pixel_dim", nargs='?', type=int, 24 | help="smoothing factor", default=2) 25 | 26 | def spatial_smoothing(imgseq, macro_pixel_dim): 27 | images_reduced = measure.block_reduce(imgseq.as_array(), 28 | block_size=(1, macro_pixel_dim, macro_pixel_dim), 29 | func=np.nanmean, 30 | cval=np.nan) #np.nanmedian(imgseq.as_array())) 31 | 32 | imgseq_reduced = neo.ImageSequence(images_reduced, 33 | units=imgseq.units, 34 | spatial_scale=imgseq.spatial_scale * macro_pixel_dim, 35 | macro_pixel_dim=macro_pixel_dim, 36 | sampling_rate=imgseq.sampling_rate, 37 | file_origin=imgseq.file_origin, 38 | t_start=imgseq.t_start) 39 | 40 | if 'array_annotations' in imgseq.annotations: 41 | del imgseq.annotations['array_annotations'] 42 | 43 | imgseq_reduced.annotations.update(imgseq.annotations) 44 | 45 | if imgseq.name: 46 | imgseq_reduced.name = imgseq.name 47 | imgseq_reduced.annotations.update(macro_pixel_dim=macro_pixel_dim) 48 | imgseq_reduced.description = imgseq.description + \ 49 | "spatially downsampled ({}).".format(os.path.basename(__file__)) 50 | 51 | return imgseq_reduced 52 | 53 | def plot_downsampled_image(image, output_path): 54 | plt.figure() 55 | plt.imshow(image, interpolation='nearest', cmap='viridis', origin='lower') 56 | save_plot(output_path) 57 | return plt.gca() 58 | 59 | if __name__ == '__main__': 60 | args, unknown = CLI.parse_known_args() 61 | 62 | block = load_neo(args.data) 63 | asig = block.segments[0].analogsignals[0] 64 | imgseq = analogsignal_to_imagesequence(asig) 65 | 66 | imgseq_reduced = spatial_smoothing(imgseq, args.macro_pixel_dim) 67 | 68 | if args.output_img is not None: 69 | plot_downsampled_image(imgseq_reduced.as_array()[0], args.output_img) 70 | 71 | new_asig = imagesequence_to_analogsignal(imgseq_reduced) 72 | 73 | block.segments[0].analogsignals[0] = new_asig 74 | 75 | write_neo(args.output, block) 76 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage 3 - Trigger Detection 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stage03_trigger_detection' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of the stage output file 10 | STAGE_OUTPUT: 'trigger_times' 11 | 12 | # File format in which all intermediate neo objects are stored 13 | NEO_FORMAT: 'nix' 14 | 15 | # If True (default), the output file of a stage is created as symbolic link 16 | # to the last block output. If False, a duplicate is created (e.g. for cloud 17 | # application, where sym-links are not supported). 18 | USE_LINK_AS_STAGE_OUTPUT: True 19 | 20 | # Plotting parameters 21 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 22 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 23 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 24 | PLOT_FORMAT: 'png' 25 | 26 | # DETECTION BLOCK 27 | ################# 28 | # Available Blocks: 'threshold', 'hilbert_phase', 'minima' 29 | DETECTION_BLOCK: 'threshold' 30 | 31 | # TRIGGER FILTER 32 | ################# 33 | # Available Blocks: 'remove_short_states' 34 | TRIGGER_FILTER: ['remove_short_states'] 35 | 36 | # BLOCK - Threshold 37 | ################### 38 | # Threshold method: 'fixed', 'fitted' 39 | THRESHOLD_METHOD: 'fixed' 40 | 41 | # Fitting Parameters 42 | # available fit functions: 'HalfGaussian', 'DoubleGaussian' 43 | # FirstGaussian: detects main peak (DOWN), fits Gaussian to left half, set threshold to mean + sigma*SIGMA_FACTOR 44 | # DoubleGaussian: Fits two Gaussians, threshold = central minima, or if no second peak mean1 + sigma1*SIGMA_FACTOR 45 | FIT_FUNCTION: 'DoubleGaussian' 46 | BIN_NUM: 100 47 | # Factor to multiply with the standard deviation 48 | # to determine threshold 49 | SIGMA_FACTOR: 2 50 | 51 | # Fixed Parameters 52 | FIXED_THRESHOLD: 0 53 | 54 | # BLOCK - Hilbert_phase 55 | ####################### 56 | # Phase at which to define the upward transition [-pi, 0] 57 | TRANSITION_PHASE: -1.570796 58 | 59 | # BLOCK - Minima 60 | ################ 61 | # Number of points to be used in the parabolic interpolation 62 | # 0 skips the interpolation (recommended) 63 | NUM_INTERPOLATION_POINTS: 0 64 | # minimum distance between two peaks (s) 65 | MIN_PEAK_DISTANCE: 0.28 66 | # amplitude fraction to set the threshold detecting local maxima 67 | MAXIMA_THRESHOLD_FRACTION: 0.5 68 | # time window to use to set the threshold detecting local maxima (s) 69 | # default value 'None' is meant to set the time window equal to the entire signal length 70 | MAXIMA_THRESHOLD_WINDOW: 'None' 71 | # minimum time the signal must be increasing after a minima candidate (s) 72 | MINIMA_PERSISTENCE: 0.16 73 | 74 | # BLOCK - Remove Short States 75 | ############################# 76 | # minimum duration of UP and DOWN states (in s) 77 | # If there are no DOWN transitions, UP transitions are removed closer than 78 | # the sum of UP and DOWN durations. 79 | MIN_UP_DURATION: 0.005 80 | MIN_DOWN_DURATION: 0.005 81 | # If true, short down states are removed first, merging the neighboring up states. 82 | # Only then the remaining short up states are removed. 83 | # Applies only when down transitions are detected. 84 | REMOVE_DOWN_FIRST: True 85 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/scripts/detrending.py: -------------------------------------------------------------------------------- 1 | """ 2 | Detrend the signal in each channel by order 0 (constant) or 1 (linear). 3 | """ 4 | 5 | import numpy as np 6 | import scipy 7 | import matplotlib.pyplot as plt 8 | import argparse 9 | from pathlib import Path 10 | import os 11 | import warnings 12 | from utils.io_utils import load_neo, write_neo, save_plot 13 | from utils.parse import none_or_int 14 | 15 | CLI = argparse.ArgumentParser() 16 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 17 | help="path to input data in neo format") 18 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 19 | help="path of output file") 20 | CLI.add_argument("--order", nargs='?', type=int, default=1, 21 | help="detrending order") 22 | CLI.add_argument("--img_dir", nargs='?', type=Path, required=True, 23 | help="path of output figure directory") 24 | CLI.add_argument("--img_name", nargs='?', type=str, 25 | default='processed_trace_channel0.png', 26 | help='example filename for channel 0') 27 | CLI.add_argument("--plot_channels", nargs='+', type=none_or_int, default=None, 28 | help="list of channels to plot") 29 | 30 | def detrend(asig, order): 31 | if (order != 0) and (order != 1): 32 | warnings.warn("Detrending order must be either 0 (constant) or 1 (linear)! Skip.") 33 | return asig 34 | 35 | dtrend = 'linear' if order else 'constant' 36 | detrended_signals = np.empty(asig.shape) 37 | detrended_signals.fill(np.nan) 38 | 39 | for channel in range(asig.shape[1]): 40 | channel_signal = asig.as_array()[:,channel] 41 | if np.isnan(channel_signal).any(): 42 | continue 43 | detrended = scipy.signal.detrend(channel_signal, type=dtrend, axis=0) 44 | detrended_signals[:,channel] = detrended 45 | detrend_asig = asig.duplicate_with_new_data(detrended_signals) 46 | detrend_asig.array_annotate(**asig.array_annotations) 47 | return detrend_asig 48 | 49 | 50 | def plot_detrend(asig, detrend_asig, channel): 51 | fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(17,8)) 52 | 53 | ax[0].plot(asig.times, asig.as_array()[:,channel], color='b', linewidth=1) 54 | ax[0].axhline(0, linestyle="--", color='k', linewidth=1) 55 | ax[0].set_ylabel('signal') 56 | 57 | ax[1].plot(asig.times, detrend_asig.as_array()[:,channel], color='g', linewidth=1) 58 | ax[1].axhline(0, linestyle="--", color='k', linewidth=1) 59 | ax[1].set_ylabel('detrended signal') 60 | ax[1].set_xlabel(f'time [{asig.times.dimensionality.string}]') 61 | return ax 62 | 63 | 64 | if __name__ == '__main__': 65 | args, unknown = CLI.parse_known_args() 66 | 67 | block = load_neo(args.data) 68 | asig = block.segments[0].analogsignals[0] 69 | 70 | detrend_asig = detrend(asig, args.order) 71 | 72 | if args.plot_channels[0] is not None: 73 | for channel in args.plot_channels: 74 | plot_detrend(asig, detrend_asig, channel) 75 | output_path = os.path.join(args.img_dir, 76 | args.img_name.replace('_channel0', f'_channel{channel}')) 77 | save_plot(output_path) 78 | 79 | detrend_asig.description += "Detrended by order {} ({}). "\ 80 | .format(args.order, os.path.basename(__file__)) 81 | block.segments[0].analogsignals[0] = detrend_asig 82 | 83 | write_neo(args.output, block) 84 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stageXY_template/scripts/script_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | Offsets the signal in all channels by a fixed value. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import matplotlib.pyplot as plt 8 | from utils.io_utils import load_neo, write_neo, save_plot 9 | from utils.parse import none_or_int, none_or_float, none_or_path 10 | from utils.neo_utils import time_slice 11 | 12 | CLI = argparse.ArgumentParser() 13 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 14 | help="path to input data in neo format") 15 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 16 | help="path of output file") 17 | CLI.add_argument("--offset", nargs='?', type=none_or_float, default=None, 18 | help="offset the signal by some value") 19 | CLI.add_argument("--img_dir", nargs='?', type=none_or_path, default=None, 20 | help="path of figure directory") 21 | CLI.add_argument("--img_name", nargs='?', type=str, default='offset_channel0.png', 22 | help='example image filename for channel 0') 23 | CLI.add_argument("--plot_channels", nargs='+', type=none_or_int, default=None, 24 | help="list of channels to plot") 25 | CLI.add_argument("--plot_tstart", nargs='?', type=none_or_float, default=0, 26 | help="plotting start time in seconds") 27 | CLI.add_argument("--plot_tstop", nargs='?', type=none_or_float, default=10, 28 | help="plotting stop time in seconds") 29 | 30 | def offset_signal(asig, offset=None): 31 | if offset is None: 32 | offset = 0 33 | 34 | new_signal = asig.as_array() + offset 35 | 36 | new_asig = asig.duplicate_with_new_data(new_signal) 37 | 38 | new_asig.array_annotate(**asig.array_annotations) 39 | new_asig.annotate(offset=offset) 40 | new_asig.description += f"Offset by {offset_signal} ({__file__}). " 41 | 42 | return new_asig 43 | 44 | 45 | def plot_signal(asig, new_asig, channel=0, t_start=None, t_stop=None): 46 | fig, ax = plt.subplots(figsize=(17,8)) 47 | 48 | asig = time_slice(asig, t_start=t_start, t_stop=t_stop) 49 | ax.plot(asig.times, asig.as_array()[:,channel], color='b', 50 | linewidth=1, label='original signal') 51 | 52 | new_asig = time_slice(new_asig, t_start=t_start, t_stop=t_stop) 53 | ax.plot(new_asig.times, new_asig.as_array()[:,channel], color='g', 54 | linewidth=1, label='offset signal') 55 | 56 | ax.set_xlabel(f'time [{asig.times.dimensionality.string}]') 57 | ax.set_ylabel('signal') 58 | plt.legend() 59 | return None 60 | 61 | 62 | if __name__ == '__main__': 63 | args, unknown = CLI.parse_known_args() 64 | 65 | # LOADING 66 | block = load_neo(args.data) 67 | asig = block.segments[0].analogsignals[0] 68 | 69 | # PERFORMING METHOD 70 | new_asig = offset_signal(asig, offset=args.offset) 71 | block.segments[0].analogsignals[0] = new_asig 72 | 73 | # PLOTTING 74 | if args.plot_channels[0] is not None: 75 | if args.img_dir is None: 76 | args.img_dir = args.output.parent 77 | for channel in args.plot_channels: 78 | plot_signal(asig, new_asig, channel=channel, 79 | t_start=args.plot_tstart, t_stop=args.plot_tstop) 80 | output_path = args.img_dir \ 81 | / args.img_name.replace('_channel0', f'_channel{channel}') 82 | save_plot(output_path) 83 | 84 | # SAVING 85 | write_neo(args.output, block) 86 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/inter_wave_interval_local.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the period between two consecutive waves for each wave and channel. 3 | """ 4 | 5 | import argparse 6 | from pathlib import Path 7 | import numpy as np 8 | import pandas as pd 9 | import matplotlib.pyplot as plt 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.parse import none_or_path, none_or_str 12 | from utils.neo_utils import analogsignal_to_imagesequence 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output file") 19 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 20 | help="path of output image file") 21 | CLI.add_argument("--kernel", "--KERNEL", nargs='?', type=none_or_str, default=None, 22 | help="derivative kernel") 23 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 24 | help="name of neo.Event to analyze (must contain waves)") 25 | 26 | def calc_local_wave_intervals(evts): 27 | wave_labels = evts.labels.astype(int) 28 | unique_labels = np.sort(np.unique(wave_labels)) 29 | unique_channels = np.sort(np.unique(evts.array_annotations['channels'].astype(int))) 30 | 31 | channel_idx_map = np.empty(np.max(unique_channels)+1) * np.nan 32 | for i, channel in enumerate(unique_channels): 33 | channel_idx_map[channel] = i 34 | 35 | trigger_collection = np.empty((len(unique_labels), len(unique_channels)), 36 | dtype=float) * np.nan 37 | 38 | for (i, wave_id) in enumerate(unique_labels): 39 | wave_trigger_evts = evts[wave_labels == wave_id] 40 | 41 | channels = wave_trigger_evts.array_annotations['channels'].astype(int) 42 | 43 | channel_idx = channel_idx_map[channels].astype(int) 44 | trigger_collection[i, channel_idx] = wave_trigger_evts.times 45 | 46 | intervals = np.diff(trigger_collection, axis=0) 47 | intervals = intervals.reshape((len(unique_labels)-1)*len(unique_channels)) 48 | 49 | mask = np.isfinite(intervals) 50 | intervals = intervals[mask] 51 | 52 | channel_ids = np.tile(unique_channels, len(unique_labels)-1)[mask] 53 | wave_ids = np.repeat(unique_labels[:-1], len(unique_channels))[mask] 54 | 55 | return wave_ids, channel_ids, intervals*evts.times.units 56 | 57 | 58 | 59 | if __name__ == '__main__': 60 | args, unknown = CLI.parse_known_args() 61 | 62 | block = load_neo(args.data) 63 | asig = block.segments[0].analogsignals[0] 64 | imgseq = analogsignal_to_imagesequence(asig) 65 | 66 | evts = block.filter(name=args.event_name, objects="Event")[0] 67 | evts = evts[evts.labels != '-1'] 68 | 69 | wave_ids, channel_ids, intervals = calc_local_wave_intervals(evts) 70 | 71 | # transform to DataFrame 72 | df = pd.DataFrame(intervals.magnitude, columns=['inter_wave_interval_local']) 73 | df['inter_wave_interval_local_unit'] = [intervals.dimensionality.string]*len(channel_ids) 74 | df['channel_id'] = channel_ids 75 | df[f'{args.event_name}_id'] = wave_ids 76 | 77 | df.to_csv(args.output) 78 | 79 | fig, ax = plt.subplots() 80 | ax.hist(1./intervals.magnitude[np.where(np.isfinite(1./intervals))[0]], 81 | bins=100, range=[0, 8]) 82 | plt.xlabel('local rate of waves (Hz)', fontsize=7.) 83 | if args.output_img is not None: 84 | save_plot(args.output_img) 85 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_channel_wave_characterization/scripts/annotations.py: -------------------------------------------------------------------------------- 1 | """ 2 | Extract the annotations of Neo objects and structure them in a DataFrame 3 | to complement a wave characterization. 4 | """ 5 | 6 | import argparse 7 | from pathlib import Path 8 | import numpy as np 9 | import pandas as pd 10 | import quantities as pq 11 | import re 12 | from utils.io_utils import load_neo, save_plot 13 | from utils.parse import none_or_path, none_or_str 14 | from utils.neo_utils import remove_annotations 15 | 16 | CLI = argparse.ArgumentParser() 17 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 18 | help="path to input data in neo format") 19 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 20 | help="path of output file") 21 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 22 | help="path of output image file") 23 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 24 | help="name of neo.Event to analyze (must contain waves)") 25 | CLI.add_argument("--ignore_keys", "--IGNORE_KEYS", nargs='*', type=str, default=[], 26 | help="neo object annotations keys to not include in dataframe") 27 | CLI.add_argument("--include_keys", "--INCLUDE_KEYS", nargs='*', type=str, default=[], 28 | help="neo object annotations keys to include in dataframe") 29 | CLI.add_argument("--profile", "--PROFILE", nargs='?', type=none_or_str, default=None, 30 | help="profile name") 31 | 32 | def add_annotations_to_df(df, annotations, include_keys=[]): 33 | use_all_keys = not bool(len(include_keys)) 34 | 35 | for key, value in annotations.items(): 36 | key_is_relevant = use_all_keys or key in include_keys 37 | 38 | if key_is_relevant and key not in df.columns: 39 | if type(value) == pq.Quantity: 40 | df[f'{key}_unit'] = value.dimensionality.string 41 | value = value.magnitude 42 | df[key] = value 43 | 44 | return df 45 | 46 | if __name__ == '__main__': 47 | args, unknown = CLI.parse_known_args() 48 | args.ignore_keys = [re.sub(r"[\[\],\s]", "", key) for key in args.ignore_keys] 49 | args.include_keys = [re.sub(r"[\[\],\s]", "", key) for key in args.include_keys] 50 | if len(args.include_keys): 51 | args.ignore_keys = [] 52 | 53 | block = load_neo(args.data) 54 | 55 | asig = block.segments[0].analogsignals[0] 56 | evts = block.filter(name=args.event_name, objects="Event")[0] 57 | evts = evts[evts.labels.astype(str) != '-1'] 58 | 59 | df = pd.DataFrame(evts.labels, columns=[f'{args.event_name}_id']) 60 | df['channel_id'] = evts.array_annotations['channels'] 61 | args.ignore_keys += ['channels'] 62 | 63 | remove_annotations(evts, del_keys=['nix_name', 'neo_name']+args.ignore_keys) 64 | remove_annotations(asig, del_keys=['nix_name', 'neo_name']+args.ignore_keys) 65 | 66 | for annotations in [evts.annotations, evts.array_annotations, 67 | asig.annotations]: 68 | df = add_annotations_to_df(df, annotations, args.include_keys) 69 | 70 | df['profile'] = [args.profile] * len(df.index) 71 | df['sampling_rate'] = asig.sampling_rate.magnitude 72 | df['sampling_rate_unit'] = asig.sampling_rate.dimensionality.string 73 | df['recording_length'] = (asig.t_stop - asig.t_start).magnitude 74 | df['recording_length_unit'] = asig.t_start.dimensionality.string 75 | df['dim_x'] = int(max(asig.array_annotations['x_coords']))+1 76 | df['dim_y'] = int(max(asig.array_annotations['y_coords']))+1 77 | 78 | df.to_csv(args.output) 79 | 80 | # ToDo 81 | if args.output_img is not None: 82 | save_plot(args.output_img) 83 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage 4 - Wave Detection 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stage04_wave_detection' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of the output file 10 | STAGE_OUTPUT: "waves" 11 | 12 | # File format in which all intermediate neo objects are stored 13 | NEO_FORMAT: 'nix' 14 | 15 | # If True (default), the output file of a stage is created as symbolic link 16 | # to the last block output. If False, a duplicate is created (e.g. for cloud 17 | # application, where sym-links are not supported). 18 | USE_LINK_AS_STAGE_OUTPUT: True 19 | 20 | # Plotting parameters 21 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 22 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 23 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 24 | PLOT_FORMAT: 'png' 25 | 26 | 27 | # DETECTION BLOCK 28 | ################## 29 | # Available Blocks: 'trigger_clustering' 30 | DETECTION_BLOCK: 'trigger_clustering' 31 | 32 | # ADDITIONAL PROPERTIES 33 | ####################### 34 | # Available Blocks: 'optical_flow', 'critical_points', 'wave_mode_clustering' 35 | # use empty list [] for selecting none 36 | ADDITIONAL_PROPERTIES: ['wave_mode_clustering', 'optical_flow'] 37 | 38 | # Trigger Clustering 39 | ###################### 40 | # Using sklearn.cluster.DBSCAN 41 | METRIC: 'euclidean' 42 | # eps, maximum distance between points to be neighbours 43 | NEIGHBOUR_DISTANCE: 15 44 | MIN_SAMPLES_PER_WAVE: 30 45 | # Factor from time dimension to space dimension in sampling_rate*spatial_scale 46 | # i.e. distance between 2 frames corresponds to X pixel 47 | TIME_SPACE_RATIO: 1 48 | 49 | # Optical Flow (Horn-Schunck algorithm) 50 | ############## 51 | USE_PHASES: True 52 | # weight of the smoothness constraint over the brightness constancy constraint 53 | ALPHA: 0.1 54 | # maximum number of iterations optimizing the vector field 55 | MAX_NITER: 100 56 | # the optimization end either after MAX_NITER iteration or when the 57 | # maximal change between iterations is smaller than the CONVERGENCE_LIMIT 58 | CONVERGENCE_LIMIT: 0.0001 59 | # standard deviations for the Gaussian filter applied on the vector field 60 | # [t_std, x_std, y_std]. [0,0,0] for no filter 61 | GAUSSIAN_SIGMA: [0,3,3] 62 | # Kernel filter to use to calculate the spatial derivatives. 63 | # simple_3x3, prewitt_3x3, scharr_3x3, sobel_3x3, sobel_5x5, sobel_7x7 64 | DERIVATIVE_FILTER: 'scharr_3x3' 65 | 66 | # Critical Point Clustering 67 | ########################### 68 | 69 | # Wave Mode Clustering 70 | ###################### 71 | # fraction of channels that need to be involved in a wave to be included 72 | MIN_TRIGGER_FRACTION: 0.5 73 | # number of similar waves to use to extrapolate missing trigger from 74 | NUM_WAVE_NEIGHBOURS: 5 75 | # percentage of wave similarity to keep for the clustering 76 | WAVE_OUTLIER_QUANTILE: 0.95 77 | # number of pca dims to project the trigger patterns onto before clustering 78 | # None -> no dimensionality reduction 79 | PCA_DIMS: 10 80 | # number of clusters for the kmeans algorithm 81 | NUM_KMEANS_CLUSTER: 4 82 | # grid spacing for the interpolation [0,1] 83 | INTERPOLATION_STEP_SIZE: 0.2 84 | # smoothing factor (0: no smoothing) 85 | INTERPOLATION_SMOOTHING: 0 86 | 87 | # VIDEO SETTINGS 88 | ################ 89 | QUALITY: 5 # 0(good) - 31(bad) 90 | SCALE_X: 720 91 | SCALE_Y: 720 92 | FPS: 10 93 | BITRATE: 20M 94 | # displayed sampling rate, the data will be stretched or compressed to. 95 | # If None, the inherent sampling rate is used. 96 | FRAME_RATE: None 97 | # 'gray', 'viridis' (sequential), 'coolwarm' (diverging), 'twilight' (cyclic) 98 | COLORMAP: 'twilight' 99 | PLOT_EVENT: 'wavefronts' # name of neo event to plot, default is None 100 | MARKER_COLOR: 'r' 101 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage02_processing/configs/config_template.yaml: -------------------------------------------------------------------------------- 1 | # Config file for Stage 2 - Processing 2 | 3 | # Name of stage, must be identical with folder name 4 | STAGE_NAME: 'stage02_processing' 5 | 6 | # The profile name is the key for this parameter configuration. Results are stored in output_path// (output_path is defined in settings.py) 7 | PROFILE: 'dataset_key' 8 | 9 | # Name of the output file 10 | STAGE_OUTPUT: "processed_data" 11 | 12 | # File format in which all intermediate neo objects are stored 13 | NEO_FORMAT: 'nix' 14 | 15 | # If True (default), the output file of a stage is created as symbolic link 16 | # to the last block output. If False, a duplicate is created (e.g. for cloud 17 | # application, where sym-links are not supported). 18 | USE_LINK_AS_STAGE_OUTPUT: True 19 | 20 | # Plotting parameters 21 | PLOT_TSTART: 0 # float (in s) or 'None' -> starting time of the input signal is used 22 | PLOT_TSTOP: 10 # float (in s) or 'None' -> stopping time of the input signal is used 23 | PLOT_CHANNELS: 'None' # int or None. default 'None' -> randomly selected 24 | PLOT_FORMAT: 'png' 25 | 26 | # The block order determines which processing steps (blocks) will be applied 27 | # and in which order. Execution order is from first to last entry. 28 | # The available blocks are (in alphabetical order): 29 | # "background_subtraction", "detrending", "frequency_filter", 30 | # "logMUA_estimation", "normalization", "phase_transform", "roi_selection", 31 | # "spatial_downsampling", "subsampling", "zscore" 32 | # Default: empty list [] -> it skips any processing blocks and returns the same 33 | # as input but, different from plot obtained from stage01, here the values on 34 | # the y-scale are correctly reported 35 | BLOCK_ORDER: [] 36 | 37 | # To make sure that the processing blocks are always executed in the correct 38 | # order that results from previous runs don't confound the workflow, all blocks 39 | # are rerun upon each execution. To turn this off, e.g., because the block order 40 | # didn't change, set to False (do with care!). 41 | RERUN_MODE: True 42 | 43 | # BLOCK - background_subtraction 44 | ################################# 45 | # No parameters needed 46 | 47 | # BLOCK - spatial_smoothing 48 | ################################# 49 | MACRO_PIXEL_DIM: 2 50 | 51 | # BLOCK - normalization 52 | ####################### 53 | # Normalize the data (divide channels-wise) by either: 54 | # 'mean', 'median', 'max' 55 | NORMALIZE_BY: 'max' 56 | 57 | # BLOCK - frequency_filter 58 | ########################## 59 | # parameters to be passed to the butterworth frequency filter 60 | # function by elephant 61 | HIGHPASS_FREQUENCY: 0 # in Hz 62 | LOWPASS_FREQUENCY: 150 # in Hz 63 | FILTER_ORDER: 2 64 | # filter function used in scipy backend. 65 | # options: ‘filtfilt’, 'lfilter’, ‘sosfiltfilt’ 66 | FILTER_FUNCTION: 'sosfiltfilt' 67 | # Plotting parameters for the power spectrum 68 | PSD_FREQUENCY_RESOLUTION: 5 # in Hz 69 | PSD_OVERLAP: 0.5 70 | 71 | # BLOCK - detrending 72 | #################### 73 | # Detrending: 0 - mean detrending; 74 | # 1 - mean and slope detrending (linear): this should be the default. 75 | DETRENDING_ORDER: 1 76 | 77 | # BLOCK - subsampling 78 | ##################### 79 | TARGET_RATE: 200 # in Hz 80 | 81 | # BLOCK - img_roi_selection 82 | ########################### 83 | # Threshold below which the pixels are discarded (set to nan). 84 | # Given in percent of the range between minimum and maximum intensity. 85 | INTENSITY_THRESHOLD: 0.5 86 | CROP_TO_SELECTION: True 87 | 88 | # BLOCK - logMUA_estimation 89 | ######################## 90 | MUA_HIGHPASS_FREQUENCY: 50 # in Hz 91 | MUA_LOWPASS_FREQUENCY: 1200 # in Hz 92 | # Rate of the logMUA signal. Must be <= the original sampling rate 93 | # in Hz (default: 'None', takes highpass_frequency) 94 | logMUA_RATE: 70 95 | # Length of time slice (in s) to estimate the local power spectrum 96 | # default 'None', takes minimum number of samples determined 97 | # by the lower bound of the frequency band 98 | FFT_SLICE: 0.1 99 | # PSD_OVERLAP: determined by setting in block frequency_filter 100 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import inspect 14 | import sys 15 | from pathlib import Path 16 | from datetime import date 17 | sys.path.insert(0, str(Path(__file__).parents[2])) 18 | sys.path.insert(0, str(Path(__file__).parents[2] / 'cobrawap' / 'pipeline')) 19 | # -- Project information ----------------------------------------------------- 20 | 21 | 22 | # The master toctree document. 23 | master_doc = 'index' 24 | 25 | 26 | # General information about the project. 27 | project = 'Collaborative Brain Wave Analysis Pipeline (Cobrawap)' 28 | authors = u'Cobrawap authors and contributors' 29 | copyright = u"2017-{this_year}, {authors}".format(this_year=date.today().year, 30 | authors=authors) 31 | 32 | # The version info for the project you're documenting, acts as replacement for 33 | # |version| and |release|, also used in various other places throughout the 34 | # built documents. 35 | root_dir = Path(inspect.getfile(lambda: None)).parents[2] 36 | with open(root_dir / 'cobrawap' / 'VERSION') as version_file: 37 | # The full version, including alpha/beta/rc tags. 38 | release = version_file.read().strip() 39 | 40 | # The short X.Y version. 41 | version = '.'.join(release.split('.')[:-1]) 42 | 43 | 44 | # -- General configuration --------------------------------------------------- 45 | 46 | # Add any Sphinx extension module names here, as strings. They can be 47 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 48 | # ones. 49 | extensions = [ 50 | 'sphinx.ext.autodoc', 51 | 'sphinx.ext.autosummary', 52 | 'sphinxarg.ext', 53 | # 'myst_parser', 54 | ] 55 | 56 | source_suffix = ['.rst', '.md'] 57 | 58 | # Add any paths that contain templates here, relative to this directory. 59 | templates_path = ['_templates', '_templates/autosummary'] 60 | 61 | # List of patterns, relative to source directory, that match files and 62 | # directories to ignore when looking for source files. 63 | # This pattern also affects html_static_path and html_extra_path. 64 | exclude_patterns = [] 65 | 66 | # Required to automatically create a summary page for each function listed in 67 | # the autosummary fields of each module. 68 | autosummary_generate = True 69 | 70 | # Set to False to not overwrite the custom _toctree/*.rst 71 | autosummary_generate_overwrite = True 72 | 73 | 74 | # -- Options for HTML output ------------------------------------------------- 75 | 76 | # The theme to use for HTML and HTML Help pages. See the documentation for 77 | # a list of builtin themes. 78 | # 79 | html_theme = 'alabaster' 80 | 81 | # Theme options are theme-specific and customize the look and feel of a theme 82 | # further. For a list of options available for each theme, see the 83 | # documentation. 84 | html_theme_options = { 85 | 'font_family': 'Arial', 86 | 'page_width': '1200px', # default is 940 87 | 'sidebar_width': '280px', # default is 220 88 | 'logo': 'cobrawap_logo.png', # add logo to sidebar 89 | 'fixed_sidebar': 'true' 90 | } 91 | 92 | html_favicon = '../images/cobrawap_icon.ico' 93 | 94 | # Add any paths that contain custom static files (such as style sheets) here, 95 | # relative to this directory. They are copied after the builtin static files, 96 | # so a file named "default.css" will overwrite the builtin "default.css". 97 | html_static_path = ['_static', '../images'] 98 | 99 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 100 | html_show_sphinx = False 101 | 102 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 103 | html_show_copyright = True 104 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/scripts/plot_trigger_times.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot an excerpt of the input data and corresponding trigger times. 3 | """ 4 | 5 | import matplotlib.pyplot as plt 6 | import seaborn as sns 7 | import argparse 8 | from pathlib import Path 9 | import os 10 | from utils.io_utils import load_neo, save_plot 11 | from utils.parse import none_or_int, none_or_float 12 | from utils.neo_utils import time_slice 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, 18 | required=True, help="path of output directory") 19 | CLI.add_argument("--filename", nargs='?', type=str, 20 | default='trigger_times_channel0.png', 21 | help='example filename for channel 0') 22 | CLI.add_argument("--plot_tstart", nargs='?', type=none_or_float, default=0, 23 | help="start time in seconds") 24 | CLI.add_argument("--plot_tstop", nargs='?', type=none_or_float, default=10, 25 | help="stop time in seconds") 26 | CLI.add_argument("--plot_channels", nargs='+', type=none_or_int, default=None, 27 | help="list of channels to plot") 28 | 29 | def plot_trigger_times(asig, event, channel): 30 | sns.set(style='ticks', palette="deep", context="notebook") 31 | fig, ax = plt.subplots() 32 | 33 | ax.plot(asig.times, asig.as_array()[:,channel], label='signal') 34 | 35 | times = [time for i, time in enumerate(event.times) 36 | if event.array_annotations['channels'][i]==channel] 37 | labels = [label for i, label in enumerate(event.labels) 38 | if event.array_annotations['channels'][i]==channel] 39 | 40 | if 'DOWN'.encode('UTF-8') in labels or 'DOWN' in labels: 41 | # plot up states 42 | plot_states(times, labels, ax, 43 | t_start=asig.t_start, t_stop=asig.t_stop, label='UP states') 44 | elif 'UP'.encode('UTF-8') in labels or 'UP' in labels: 45 | # plot only up transitions 46 | for i, trans_time in enumerate(times): 47 | ax.axvline(trans_time, c='k', 48 | label='UP transitions' if not i else '') 49 | else: 50 | print("Warning: No 'UP' (or 'DOWN') transition events "\ 51 | + f"in channel {channel} found!") 52 | 53 | ax.set_title(f'Channel {channel}') 54 | ax.set_xlabel(f'time [{asig.times.units.dimensionality.string}]') 55 | ax.set_ylabel(f'signal [{asig.units.dimensionality.string}]') 56 | 57 | plt.legend() 58 | return fig 59 | 60 | 61 | def plot_states(times, labels, ax, t_start, t_stop, label=''): 62 | if labels[0] == 'DOWN'.encode('UTF-8') or labels[0] == 'DOWN': 63 | ax.axvspan(t_start, times[0], alpha=0.5, color='red') 64 | if labels[-1] == 'UP'.encode('UTF-8') or labels[-1] == 'UP': 65 | ax.axvspan(times[-1], t_stop, alpha=0.5, color='red') 66 | 67 | for i, (time, label) in enumerate(zip(times, labels)): 68 | if (label == 'UP'.encode('UTF-8') or label == 'UP') \ 69 | and i < len(times)-1: 70 | ax.axvspan(time, times[i+1], alpha=0.5, color='red', 71 | label='UP' if not i else '') 72 | return None 73 | 74 | 75 | if __name__ == '__main__': 76 | args, unknown = CLI.parse_known_args() 77 | 78 | block = load_neo(args.data) 79 | asig = block.segments[0].analogsignals[0] 80 | 81 | args.plot_tstart = asig.t_start if args.plot_tstart is None else args.plot_tstart 82 | args.plot_tstop = asig.t_stop if args.plot_tstop is None else args.plot_tstop 83 | # slice signals 84 | asig = time_slice(asig, args.plot_tstart, args.plot_tstop) 85 | 86 | # get transition events 87 | event = block.filter(name='transitions', objects="Event")[0] 88 | event = event.time_slice(args.plot_tstart, args.plot_tstop) 89 | 90 | for channel in args.plot_channels: 91 | plot_trigger_times(asig=asig, 92 | event=event, 93 | channel=channel) 94 | output_path = os.path.join(args.output, 95 | args.filename.replace('_channel0', f'_channel{channel}')) 96 | save_plot(output_path) 97 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage01_data_entry/scripts/enter_data_template.py: -------------------------------------------------------------------------------- 1 | """ 2 | Loads a dataset and brings it into the required data representation (using Neo). 3 | """ 4 | 5 | import argparse 6 | import quantities as pq 7 | from pathlib import Path 8 | import neo 9 | from utils.parse import parse_string2dict, none_or_float, none_or_int, none_or_str 10 | from utils.neo_utils import imagesequence_to_analogsignal, merge_analogsignals 11 | from utils.neo_utils import flip_image, rotate_image, time_slice 12 | from utils.io_utils import load_neo, write_neo 13 | 14 | 15 | CLI = argparse.ArgumentParser() 16 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 17 | help="path to input data") 18 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 19 | help="path of output file") 20 | CLI.add_argument("--data_name", nargs='?', type=str, default='None', 21 | help="chosen name of the dataset") 22 | CLI.add_argument("--sampling_rate", nargs='?', type=none_or_float, 23 | default=None, help="sampling rate in Hz") 24 | CLI.add_argument("--spatial_scale", nargs='?', type=float, required=True, 25 | help="distance between electrodes or pixels in mm") 26 | CLI.add_argument("--t_start", nargs='?', type=none_or_float, default=None, 27 | help="start time, in s, delimits the interval of recordings to be analyzed") 28 | CLI.add_argument("--t_stop", nargs='?', type=none_or_float, default=None, 29 | help="stop time, in s, delimits the interval of recordings to be analyzed") 30 | CLI.add_argument("--orientation_top", nargs='?', type=str, required=True, 31 | help="upward orientation of the recorded cortical region") 32 | CLI.add_argument("--orientation_right", nargs='?', type=str, required=True, 33 | help="right-facing orientation of the recorded cortical region") 34 | CLI.add_argument("--annotations", nargs='+', type=none_or_str, default=None, 35 | help="metadata of the dataset") 36 | CLI.add_argument("--array_annotations", nargs='+', type=none_or_str, 37 | default=None, help="channel-wise metadata") 38 | CLI.add_argument("--kwargs", nargs='+', type=none_or_str, default=None, 39 | help="additional optional arguments") 40 | 41 | if __name__ == '__main__': 42 | args, unknown = CLI.parse_known_args() 43 | 44 | # Load data with Neo IO or custom loading routine 45 | block = load_neo(args.data) 46 | # If there is no Neo IO for the data type available, 47 | # the data must be loaded conventionally and added to a newly constructed 48 | # Neo block. For building a Neo objects, have a look into the documentation 49 | # https://neo.readthedocs.io/ 50 | 51 | # In case the dataset is imaging data and therefore stored as an 52 | # ImageSequence object, it needs to be transformed into an AnalogSignal 53 | # object. To do this use the function imagesequence_to_analogsignal in utils/neo_utils.py 54 | 55 | asig = block.segments[0].analogsignals[0] 56 | 57 | asig = time_slice(asig, args.t_start, args.t_stop) 58 | 59 | # Add metadata from ANNOTATION dict 60 | asig.annotations.update(parse_string2dict(args.annotations)) 61 | asig.annotations.update(spatial_scale=args.spatial_scale*pq.mm) 62 | asig.annotations.update(orientation_top=args.orientation_top) 63 | asig.annotations.update(orientation_right=args.orientation_right) 64 | 65 | # Add metadata from ARRAY_ANNOTATION dict 66 | asig.array_annotations.update(parse_string2dict(args.array_annotations)) 67 | 68 | # Do custom metadata processing from KWARGS dict (optional) 69 | # kwargs = parse_string2dict(args.kwargs) 70 | # ... do something 71 | 72 | # Add description to the Neo object 73 | block.name = args.data_name 74 | block.segments[0].name = 'Segment 1' 75 | block.segments[0].description = 'Loaded with neo.io version {}'\ 76 | .format(neo.__version__) 77 | if asig.description is None: 78 | asig.description = '' 79 | asig.description += 'some signal. ' 80 | 81 | # Update the annotated AnalogSignal object in the Neo Block 82 | block.segments[0].analogsignals[0] = asig 83 | 84 | # Save data to file 85 | write_neo(args.output, block) 86 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi-and-testpypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python 🐍 distribution 📦 to PyPI and TestPyPI 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | workflow_dispatch: 7 | 8 | 9 | jobs: 10 | build: 11 | name: Build distribution 📦 12 | runs-on: ubuntu-latest 13 | if: ${{ github.repository_owner == 'NeuralEnsemble' }} 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Set up Python 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: "3.x" 21 | - name: Install pypa/build 22 | run: >- 23 | python3 -m 24 | pip install 25 | build 26 | --user 27 | - name: Build a binary wheel and a source tarball 28 | run: python3 -m build 29 | - name: Store the distribution packages 30 | uses: actions/upload-artifact@v4 31 | with: 32 | name: python-package-distributions 33 | path: dist/ 34 | 35 | 36 | publish-to-pypi: 37 | name: Publish Python 🐍 distribution 📦 to PyPI 38 | # only publish to PyPI on tag pushes 39 | if: ${{ github.repository_owner == 'NeuralEnsemble' && startsWith(github.ref, 'refs/tags/v') }} 40 | needs: 41 | - build 42 | runs-on: ubuntu-latest 43 | 44 | environment: 45 | name: pypi 46 | url: https://pypi.org/p/cobrawap 47 | 48 | permissions: 49 | id-token: write # IMPORTANT: mandatory for trusted publishing 50 | 51 | steps: 52 | - name: Download all the dists 53 | uses: actions/download-artifact@v4.1.7 54 | with: 55 | name: python-package-distributions 56 | path: dist/ 57 | - name: Publish distribution 📦 to PyPI 58 | uses: pypa/gh-action-pypi-publish@release/v1 59 | 60 | 61 | github-release: 62 | name: >- 63 | Sign the Python 🐍 distribution 📦 with Sigstore 64 | and upload them to GitHub Release 65 | needs: 66 | - publish-to-pypi 67 | runs-on: ubuntu-latest 68 | if: ${{ github.repository_owner == 'NeuralEnsemble' }} 69 | 70 | permissions: 71 | contents: write # IMPORTANT: mandatory for making GitHub Releases 72 | id-token: write # IMPORTANT: mandatory for sigstore 73 | 74 | steps: 75 | - name: Download all the dists 76 | uses: actions/download-artifact@v4.1.7 77 | with: 78 | name: python-package-distributions 79 | path: dist/ 80 | - name: Sign the dists with Sigstore 81 | uses: sigstore/gh-action-sigstore-python@v1.2.3 82 | with: 83 | inputs: >- 84 | ./dist/*.tar.gz 85 | ./dist/*.whl 86 | - name: Create GitHub Release 87 | env: 88 | GITHUB_TOKEN: ${{ github.token }} 89 | run: >- 90 | gh release create 91 | '${{ github.ref_name }}' 92 | --repo '${{ github.repository }}' 93 | --notes "" 94 | - name: Upload artifact signatures to GitHub Release 95 | env: 96 | GITHUB_TOKEN: ${{ github.token }} 97 | # Upload to GitHub Release using the `gh` CLI. 98 | # `dist/` contains the built packages, and the 99 | # sigstore-produced signatures and certificates. 100 | run: >- 101 | gh release upload 102 | '${{ github.ref_name }}' dist/** 103 | --repo '${{ github.repository }}' 104 | 105 | publish-to-testpypi: 106 | name: Publish Python 🐍 distribution 📦 to TestPyPI 107 | needs: 108 | - build 109 | runs-on: ubuntu-latest 110 | if: ${{ github.repository_owner == 'NeuralEnsemble' }} 111 | 112 | environment: 113 | name: testpypi 114 | url: https://test.pypi.org/p/cobrawap 115 | 116 | permissions: 117 | id-token: write # IMPORTANT: mandatory for trusted publishing 118 | 119 | steps: 120 | - name: Download all the dists 121 | uses: actions/download-artifact@v4.1.7 122 | with: 123 | name: python-package-distributions 124 | path: dist/ 125 | - name: Publish distribution 📦 to TestPyPI 126 | uses: pypa/gh-action-pypi-publish@release/v1 127 | with: 128 | repository-url: https://test.pypi.org/legacy/ 129 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/Snakefile: -------------------------------------------------------------------------------- 1 | """ 2 | # Stage 03 Trigger Detection 3 | """ 4 | 5 | from pathlib import Path 6 | configfile: Path('configs') / 'config_template.yaml' 7 | include: Path() / '..' / 'utils' / 'Snakefile' 8 | 9 | #### Housekeeping #### 10 | 11 | def filtered_triggers(wildcards): 12 | default_input = OUTPUT_DIR / config.DETECTION_BLOCK / config.STAGE_OUTPUT 13 | return prev_rule_output(wildcards, rule_list=config.TRIGGER_FILTER, 14 | default_input=default_input) 15 | 16 | #### UTILITY BLOCKS #### 17 | 18 | use rule template_all as all with: 19 | input: 20 | check = OUTPUT_DIR / 'input.check', 21 | data = filtered_triggers, 22 | img = OUTPUT_DIR / 'plot_trigger_times' 23 | 24 | 25 | use rule template as plot_trigger_times with: 26 | input: 27 | data = filtered_triggers, 28 | script = SCRIPTS / 'plot_trigger_times.py' 29 | output: 30 | directory(OUTPUT_DIR / 'plot_trigger_times') 31 | params: 32 | params('plot_tstart', 'plot_tstop', 'plot_channels', 33 | filename='trigger_times_channel0.'+config.PLOT_FORMAT, 34 | config=config) 35 | 36 | #### DETECTION BLOCKS (choose one) #### 37 | 38 | rule threshold: 39 | input: 40 | data = config.STAGE_INPUT, 41 | thresholds = Path('{dir}') / 'threshold' / str(config.THRESHOLD_METHOD + "_thresholds.npy"), 42 | script = SCRIPTS / 'threshold.py' 43 | output: 44 | data = Path('{dir}') / 'threshold' / config.STAGE_OUTPUT 45 | shell: 46 | """ 47 | {ADD_UTILS} 48 | python3 {input.script:q} --data {input.data:q} \ 49 | --output {output.data:q} \ 50 | --thresholds {input.thresholds:q} 51 | """ 52 | 53 | 54 | use rule template as calc_threshold_fixed with: 55 | # subrule of threshold 56 | input: 57 | data = config.STAGE_INPUT, 58 | script = SCRIPTS / 'calc_thresholds_fixed.py' 59 | output: 60 | Path('{dir}') / 'threshold' / 'fixed_thresholds.npy', 61 | params: 62 | params(threshold=config.FIXED_THRESHOLD) 63 | 64 | 65 | use rule template as calc_threshold_fitted with: 66 | # subrule of threshold 67 | input: 68 | data = config.STAGE_INPUT, 69 | script = SCRIPTS / 'calc_thresholds_fitted.py' 70 | output: 71 | Path('{dir}') / 'threshold' / 'fitted_thresholds.npy', 72 | img_dir = directory(Path('{dir}') / 'threshold' / 'fitted_thresholds') 73 | params: 74 | params('sigma_factor', 'fit_function', 'bin_num', 'plot_channels', 75 | img_name="amplitudes_channel0." + config.PLOT_FORMAT, config=config) 76 | 77 | 78 | use rule template as hilbert_phase with: 79 | input: 80 | data = config.STAGE_INPUT, 81 | script = SCRIPTS / 'hilbert_phase.py' 82 | output: 83 | Path('{dir}') / 'hilbert_phase' / config.STAGE_OUTPUT, 84 | img_dir = directory(Path('{dir}') / 'hilbert_phase' / 'hilbert_phase_plots') 85 | params: 86 | params('transition_phase', 'plot_channels', 'plot_tstart', 87 | 'plot_tstop', config=config, 88 | img_name="hilbert_phase_channel0." + config.PLOT_FORMAT) 89 | 90 | 91 | use rule template as minima with: 92 | input: 93 | data = config.STAGE_INPUT, 94 | script = SCRIPTS / 'minima.py' 95 | output: 96 | Path('{dir}') / 'minima' / config.STAGE_OUTPUT, 97 | img_dir = directory(Path('{dir}') / 'minima' / 'minima_plots') 98 | params: 99 | params('minima_persistence', 'min_peak_distance', 'maxima_threshold_fraction', 100 | 'maxima_threshold_window', 'num_interpolation_points', 101 | 'plot_channels', 'plot_tstart', 'plot_tstop', 102 | img_name="minima_channel0." + config.PLOT_FORMAT, config=config) 103 | 104 | 105 | #### FILTER BLOCKS (choose any) #### 106 | 107 | use rule template as remove_short_states with: 108 | input: 109 | data = filtered_triggers, 110 | script = SCRIPTS / 'remove_short_states.py' 111 | output: 112 | Path('{dir}') / '{rule_name}' / str("remove_short_states." + config.NEO_FORMAT) 113 | params: 114 | params('min_up_duration', 'min_down_duration', 'remove_down_first', 115 | config=config) 116 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/trigger_clustering.py: -------------------------------------------------------------------------------- 1 | """ 2 | Detect waves by clustering triggers that are close to each other in time and space. 3 | """ 4 | 5 | import neo 6 | import numpy as np 7 | import quantities as pq 8 | import argparse 9 | from pathlib import Path 10 | from sklearn.cluster import DBSCAN 11 | from utils.io_utils import load_neo, write_neo 12 | from utils.neo_utils import remove_annotations 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output file") 19 | CLI.add_argument("--metric", nargs='?', type=str, default='euclidean', 20 | help="parameter for sklearn.cluster.DBSCAN") 21 | CLI.add_argument("--time_space_ratio", nargs='?', type=float, default=1, 22 | help="factor to apply to time values") 23 | CLI.add_argument("--neighbour_distance", nargs='?', type=float, default=30, 24 | help="eps parameter in sklearn.cluster.DBSCAN") 25 | CLI.add_argument("--min_samples", nargs='?', type=int, default=10, 26 | help="minimum number of trigger times to form a wavefront") 27 | 28 | def cluster_triggers(event, metric, neighbour_distance, min_samples, 29 | time_space_ratio, sampling_rate): 30 | up_idx = np.where(event.labels == 'UP')[0] 31 | # build 3D array of trigger times 32 | triggers = np.zeros((len(up_idx), 3)) 33 | triggers[:,0] = event.array_annotations['x_coords'][up_idx] 34 | triggers[:,1] = event.array_annotations['y_coords'][up_idx] 35 | triggers[:,2] = event.times[up_idx].rescale('s') \ 36 | * sampling_rate.rescale('Hz') * time_space_ratio 37 | 38 | clustering = DBSCAN(eps=neighbour_distance, 39 | min_samples=min_samples, 40 | metric=metric) 41 | clustering.fit(triggers) 42 | 43 | if len(np.unique(clustering.labels_)) < 1: 44 | raise ValueError("No clusters found, please adapt the parameters!") 45 | 46 | # remove unclassified trigger points (label == -1) 47 | cluster_idx = np.where(clustering.labels_ != -2)[0] 48 | if not len(cluster_idx): 49 | raise ValueError("Clusters couldn't be classified, please adapt the parameters!") 50 | 51 | wave_idx = up_idx[cluster_idx] 52 | 53 | evt = neo.Event(times=event.times[wave_idx], 54 | labels=clustering.labels_[cluster_idx].astype(str), 55 | name='wavefronts', 56 | array_annotations={'channels':event.array_annotations['channels'][wave_idx], 57 | 'x_coords':triggers[:,0][cluster_idx].astype(int), 58 | 'y_coords':triggers[:,1][cluster_idx].astype(int)}, 59 | description='transitions from down to up states. '\ 60 | +'Labels are ids of wavefronts. ' 61 | +'Annotated with the channel id ("channels") and '\ 62 | +'its position ("x_coords", "y_coords").', 63 | cluster_algorithm='sklearn.cluster.DBSCAN', 64 | cluster_eps=neighbour_distance, 65 | cluster_metric=metric, 66 | cluster_min_samples=min_samples) 67 | 68 | remove_annotations(event, del_keys=['nix_name', 'neo_name']) 69 | evt.annotations.update(event.annotations) 70 | return evt 71 | 72 | if __name__ == '__main__': 73 | args, unknown = CLI.parse_known_args() 74 | 75 | block = load_neo(args.data) 76 | asig = block.segments[0].analogsignals[0] 77 | 78 | evts = block.filter(name='transitions', objects="Event")[0] 79 | 80 | if len(evts): 81 | wave_evt = cluster_triggers(event=evts, 82 | metric=args.metric, 83 | neighbour_distance=args.neighbour_distance, 84 | min_samples=args.min_samples, 85 | time_space_ratio=args.time_space_ratio, 86 | sampling_rate=asig.sampling_rate) 87 | 88 | else: 89 | wave_evt = neo.Event(name='wavefronts', 90 | times=np.array([])*pq.s, labels=[]) 91 | 92 | block.segments[0].events.append(wave_evt) 93 | 94 | write_neo(args.output, block) 95 | -------------------------------------------------------------------------------- /doc/images/institutions/infn.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 21 | 22 | 24 | image/svg+xml 25 | 27 | 28 | 29 | 30 | 31 | 33 | 57 | 60 | 66 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage03_trigger_detection/scripts/remove_short_states.py: -------------------------------------------------------------------------------- 1 | """ 2 | Remove detected triggers (state transitions) when the corresponding Up and Down 3 | states are shorter than a minimum duration. 4 | """ 5 | 6 | import numpy as np 7 | import neo 8 | import argparse 9 | from pathlib import Path 10 | import quantities as pq 11 | from utils.io_utils import load_neo, write_neo 12 | from utils.parse import str_to_bool 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path to output data in neo format") 19 | CLI.add_argument("--min_up_duration", nargs='?', type=float, default=0.005, 20 | help="minimum duration of UP states in seconds") 21 | CLI.add_argument("--min_down_duration", nargs='?', type=float, default=0.005, 22 | help="minimum duration of DOWN states in seconds") 23 | CLI.add_argument("--remove_down_first", nargs='?', type=str_to_bool, default=True, 24 | help="If True, remove short down states first") 25 | 26 | 27 | def remove_short_states(evt, min_duration, start_label='UP', stop_label='DOWN'): 28 | # assumes event times to be sorted 29 | del_idx = np.array([], dtype=int) 30 | 31 | for channel in np.unique(evt.array_annotations['channels']): 32 | # select channel 33 | c_idx = np.where(channel == evt.array_annotations['channels'])[0] 34 | c_times = evt.times[c_idx] 35 | c_labels = evt.labels[c_idx] 36 | 37 | # sepearate start and stop times 38 | start_idx = np.where(start_label == c_labels)[0] 39 | stop_idx = np.where(stop_label == c_labels)[0] 40 | start_times = c_times[start_idx] 41 | stop_times = c_times[stop_idx] 42 | 43 | # clean borders 44 | leading_stops = np.argmax(stop_times > start_times[0]) 45 | stop_idx = stop_idx[leading_stops:] 46 | stop_times = stop_times[leading_stops:] 47 | start_times = start_times[:len(stop_times)] 48 | 49 | # find short states 50 | short_state_idx = np.where((stop_times-start_times).rescale('s') 51 | < min_duration.rescale('s'))[0] 52 | 53 | # remove end points of short states 54 | del_idx = np.append(del_idx, c_idx[stop_idx[short_state_idx]]) 55 | if not start_label == stop_label: 56 | # remove start points of short states 57 | del_idx = np.append(del_idx, c_idx[start_idx[short_state_idx]]) 58 | 59 | cleaned_evt = neo.Event(times=np.delete(evt.times.rescale('s'), del_idx)*pq.s, 60 | labels=np.delete(evt.labels, del_idx), 61 | name=evt.name, 62 | description=evt.description) 63 | cleaned_evt.annotations = evt.annotations 64 | for key in evt.array_annotations: 65 | cleaned_evt.array_annotations[key] = np.delete(evt.array_annotations[key], 66 | del_idx) 67 | return cleaned_evt 68 | 69 | 70 | if __name__ == '__main__': 71 | args, unknown = CLI.parse_known_args() 72 | 73 | block = load_neo(args.data) 74 | 75 | evt_idx, evt = [(i,ev) for i, ev in enumerate(block.segments[0].events) 76 | if ev.name == 'transitions'][0] 77 | 78 | 79 | if 'DOWN' in evt.labels: 80 | if args.remove_down_first: 81 | evt = remove_short_states(evt, args.min_down_duration*pq.s, 82 | start_label='DOWN', stop_label='UP') 83 | evt = remove_short_states(evt, args.min_up_duration*pq.s, 84 | start_label='UP', stop_label='DOWN') 85 | else: 86 | evt = remove_short_states(evt, args.min_up_duration*pq.s, 87 | start_label='UP', stop_label='DOWN') 88 | evt = remove_short_states(evt, args.min_down_duration*pq.s, 89 | start_label='DOWN', stop_label='UP') 90 | else: 91 | remove_short_states(evt, (args.min_down_duration+args.min_up_duration)*pq.s, 92 | start_label='UP', stop_label='UP') 93 | 94 | evt.annotations.update(min_up_duration=args.min_up_duration*pq.s) 95 | evt.annotations.update(min_down_duration=args.min_down_duration*pq.s) 96 | 97 | block.segments[0].events[evt_idx] = evt 98 | 99 | write_neo(args.output, block) 100 | -------------------------------------------------------------------------------- /doc/source/release_notes.rst: -------------------------------------------------------------------------------- 1 | ************* 2 | Release Notes 3 | ************* 4 | 5 | 6 | Release 0.2.3 7 | ============= 8 | Bug fixes 9 | --------- 10 | * Fixed bug in `--version` handling of installed versions (`#113 `_) 11 | * Fixed breaking syntax error in stage 2 (`#115 `_) 12 | 13 | Other changes 14 | ------------- 15 | * Housekeeping (`#114 `_) 16 | 17 | 18 | Release 0.2.2 19 | ============= 20 | Bug fixes 21 | --------- 22 | * Fixed `--version` argument of `cobrawap` command (`#95 `_) 23 | * Fixed path handling issues (`#92 `_), (`#101 `_) 24 | 25 | Other changes 26 | ------------- 27 | * Fixed path issue in automated documentation builds (`#102 `_) 28 | * Fixed Python 3.12 compatibility (`#105 `_) 29 | * Improved figure outputs (`#98 `_) 30 | * Added `--force-overwrite` flag (`#110 `_) 31 | * Various maintenance fixes (`#86 `_), (`#89 `_), (`#90 `_), (`#93 `_), (`#97 `_), (`#102 `_), (`#107 `_), (`#108 `_) 32 | 33 | 34 | Release 0.2.1 35 | ============= 36 | Other changes 37 | ------------- 38 | * Improved internal handling of pathnames (`#79 `_) 39 | * Maintenance fixes, including dependency adjustments (`#80 `_), (`#83 `_) 40 | 41 | 42 | Release 0.2.0 43 | ============= 44 | New functionality and features 45 | ------------------------------ 46 | * Ability to plot complete signal ranges using `TSTART` and `TSTOP` set to `None` (`#48 `_) 47 | * New default value `None` for `MAXIMA_THRESHOLD_WINDOW` to indicate that the complete signal duration is considered (`#49 `_) 48 | * Added additional keyword arguments to `cobrawap` command (`#76 `_) 49 | 50 | Bug fixes 51 | --------- 52 | * Fixed bug related to updating of AnalogSignal names (`#67 `_) 53 | * Fixed issue where `roi_selection` and `spatial_derivative` incorrectly handled boolean arguments (`#65 `_) 54 | * Fixed issue related to directly specifying a stage from the cobrawap interface (`#70 `_) 55 | * Fixed issue with cyclic boundary conditions during phase convolution (`#66 `_) 56 | 57 | Documentation 58 | ------------- 59 | * Updated `README` information (`#59 `_), (`#74 `_), (`#77 `_) 60 | 61 | Other changes 62 | ------------- 63 | * Automated package distribution to PyPI (`#62 `_) 64 | 65 | 66 | Release 0.1.1 67 | ============= 68 | Documentation 69 | ------------- 70 | * Added help statement for CLI client 71 | 72 | Bug fixes 73 | --------- 74 | * Fixed install by disallowing Snakemake versions >=8.0.0, which are missing subworkflow support 75 | 76 | Selected dependency changes 77 | --------------------------- 78 | * snakemake >= 7.10.0, < 8.0.0 79 | 80 | 81 | Release 0.1.0 82 | ============= 83 | Initial release of Cobrawap accompanying the manuscript 84 | 85 | Gutzen, R., De Bonis, G., De Luca, C., Pastorelli, E., Capone, C., Allegra Mascaro, A. L., Resta, F., Manasanch, A., Pavone, F. S., Sanchez-Vives, M. V., Mattia, M., Grün, S., Paolucci, P. S., & Denker, M. (2022). *A modular and adaptable analysis pipeline to compare slow cerebral rhythms across heterogeneous datasets*. Cell Reports Methods 4, 100681. `https://doi.org/10.1016/j.crmeth.2023.100681 `_ 86 | 87 | 88 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage04_wave_detection/scripts/plot_waves.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot snapshots of the input data showing the detected waves. 3 | """ 4 | 5 | import os 6 | import numpy as np 7 | import quantities as pq 8 | import argparse 9 | from pathlib import Path 10 | import matplotlib.pyplot as plt 11 | from utils.io_utils import load_neo, save_plot 12 | from utils.neo_utils import analogsignal_to_imagesequence 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output_dir", nargs='?', type=Path, required=True, 18 | help="path to output directory") 19 | CLI.add_argument("--img_name", nargs='?', type=str, 20 | help="") 21 | CLI.add_argument("--time_window", nargs='?', type=float, default=0.4, 22 | help="size of the plotted window in seconds.") 23 | CLI.add_argument("--colormap", nargs='?', type=str, default='viridis', 24 | help="") 25 | 26 | def plot_wave(wave_id, waves_event, asig, frames, vec_frames, 27 | time_window=0.4*pq.s, cmap='virids'): 28 | idx = np.where(waves_event.labels == str(wave_id))[0] 29 | x_coords = waves_event.array_annotations['x_coords'][idx] 30 | y_coords = waves_event.array_annotations['y_coords'][idx] 31 | 32 | t = waves_event.times[idx] 33 | time_steps = np.unique(t) 34 | 35 | dim_t, dim_y, dim_x = frames.shape 36 | y_idx, x_idx = np.meshgrid(np.arange(dim_y), np.arange(dim_x), indexing='ij') 37 | markersize = 50 / max([dim_x, dim_y]) 38 | skip_step = int(min([dim_x, dim_y]) / 50) + 1 39 | 40 | vmin, vmax = np.nanmin(frames), np.nanmax(frames) 41 | 42 | time_window_steps = asig.sampling_rate.rescale('Hz') * time_window.rescale('s') 43 | half_window = int(time_window_steps.magnitude / 2) 44 | 45 | fig, axes = plt.subplots(nrows=2, ncols=len(time_steps), 46 | figsize=(2*len(time_steps), 5), sharey='row') 47 | if len(time_steps) < 2: 48 | axes = [[axes[0]], [axes[1]]] 49 | 50 | axes[0][0].set_ylabel(f"Wave {wave_id}", fontsize=20) 51 | 52 | for i, ax in enumerate(axes[0]): 53 | ax.set_title('{:.3f} '.format(time_steps[i].magnitude) + f'{time_steps[i].dimensionality}') 54 | 55 | for i in idx: 56 | x = waves_event.array_annotations['x_coords'][i] 57 | y = waves_event.array_annotations['y_coords'][i] 58 | t = waves_event.times[i] 59 | ax_i = np.where(time_steps == t)[0][0] 60 | 61 | channel = waves_event.array_annotations['channels'][i] 62 | t_i = np.argmax(asig.times >= t) 63 | i_start = np.max([0, t_i-half_window]) 64 | i_stop = np.min([t_i+half_window, len(asig.times)-1]) 65 | axes[0][ax_i].plot(asig.times[i_start : i_stop], 66 | asig.as_array()[i_start : i_stop, channel]) 67 | axes[0][ax_i].axvline(t, color='r') 68 | 69 | axes[1][ax_i].imshow(frames[t_i], origin='lower', cmap=cmap, 70 | vmin=vmin, vmax=vmax) 71 | axes[1][ax_i].plot(x, y, linestyle='None', marker='D', 72 | markersize=markersize, color='r') 73 | axes[1][ax_i].set_axis_off() 74 | 75 | axes[1][ax_i].quiver(x_idx[::skip_step,::skip_step], 76 | y_idx[::skip_step,::skip_step], 77 | np.real(vec_frames[t_i])[::skip_step,::skip_step], 78 | np.imag(vec_frames[t_i])[::skip_step,::skip_step], 79 | zorder=5) 80 | return axes 81 | 82 | 83 | if __name__ == '__main__': 84 | args, unknown = CLI.parse_known_args() 85 | 86 | block = load_neo(args.data) 87 | 88 | asig = block.segments[0].analogsignals[0] 89 | vec_asig = block.filter(name='optical_flow', objects="AnalogSignal")[0] 90 | 91 | frames = analogsignal_to_imagesequence(asig).as_array() 92 | vec_frames = analogsignal_to_imagesequence(vec_asig).as_array() 93 | 94 | waves_event = block.filter(name='wavefronts', objects="Event")[0] 95 | 96 | cmap = plt.get_cmap(args.colormap) 97 | 98 | for wave_id in np.unique(waves_event.labels): 99 | if int(wave_id) != -1: # collection of not-clustered triggers 100 | ax = plot_wave(wave_id=wave_id, 101 | waves_event=waves_event, 102 | asig=asig, 103 | frames=frames, 104 | vec_frames=vec_frames, 105 | time_window=args.time_window*pq.s, 106 | cmap=cmap) 107 | 108 | output_path = os.path.join(args.output_dir, 109 | args.img_name.replace('id0', f'id{wave_id}')) 110 | save_plot(output_path) 111 | plt.close() 112 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/velocity_planar.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate the wave propagation velocity for each wave. 3 | """ 4 | 5 | from pathlib import Path 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | import argparse 9 | import scipy 10 | import pandas as pd 11 | from utils.io_utils import load_neo, save_plot 12 | from utils.parse import none_or_path 13 | 14 | CLI = argparse.ArgumentParser() 15 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 16 | help="path to input data in neo format") 17 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 18 | help="path of output file") 19 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 20 | help="path of output image file") 21 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 22 | help="name of neo.Event to analyze (must contain waves)") 23 | 24 | def center_points(x, y): 25 | return x - np.mean(x), y - np.mean(y) 26 | 27 | def linregress(times, locations): 28 | times, locations = center_points(times, locations) 29 | slope, offset, _, _, stderr = scipy.stats.linregress(times, locations) 30 | return slope, stderr, offset 31 | 32 | def calc_planar_velocities(evts): 33 | spatial_scale = evts.annotations['spatial_scale'] 34 | v_unit = (spatial_scale.units/evts.times.units).dimensionality.string 35 | 36 | wave_ids = np.unique(evts.labels) 37 | 38 | velocities = np.zeros((len(wave_ids), 2)) * np.nan 39 | 40 | ncols = int(np.round(np.sqrt(len(wave_ids)+1))) 41 | nrows = int(np.ceil((len(wave_ids)+1)/ncols)) 42 | fig, ax = plt.subplots(nrows=nrows, ncols=ncols, 43 | figsize=(3*nrows, 3*ncols)) 44 | 45 | # loop over waves 46 | for i, wave_i in enumerate(wave_ids): 47 | # Fit wave displacement 48 | idx = np.where(evts.labels == wave_i)[0] 49 | times = evts.times[idx].magnitude 50 | if (times == times[0]).all(): 51 | continue 52 | x_times, x_locations = center_points(times, 53 | evts.array_annotations['x_coords'][idx] 54 | * spatial_scale.magnitude) 55 | y_times, y_locations = center_points(times, 56 | evts.array_annotations['y_coords'][idx] 57 | * spatial_scale.magnitude) 58 | vx, vx_err, dx = linregress(x_times, x_locations) 59 | vy, vy_err, dy = linregress(y_times, y_locations) 60 | v = np.sqrt(vx**2 + vy**2) 61 | v_err = 1/v * np.sqrt((vx*vx_err)**2 + (vy+vy_err)**2) 62 | velocities[i] = np.array([v, v_err]) 63 | 64 | # Plot fit 65 | row = int(i/ncols) 66 | if ncols == 1: 67 | cax = ax[row] 68 | col = 0 69 | else: 70 | col = i % ncols 71 | cax = ax[row][col] 72 | cax.plot(x_times, x_locations, 73 | color='b', label='x coords', linestyle='', marker='.', alpha=0.5) 74 | cax.plot(x_times, [vx*t + dx for t in x_times], color='b') 75 | cax.plot(y_times, y_locations, 76 | color='r', label='y coords', linestyle='', marker='.', alpha=0.5) 77 | cax.plot(y_times, [vy*t + dy for t in y_times], color='r') 78 | if not col: 79 | cax.set_ylabel('x/y position [{}]'\ 80 | .format(spatial_scale.dimensionality.string)) 81 | if row == nrows-1: 82 | cax.set_xlabel('time [{}]'\ 83 | .format(evts.times[idx].dimensionality.string)) 84 | cax.set_title('wave {}'.format(wave_i)) 85 | 86 | # plot total velocities 87 | if ncols == 1: 88 | cax = ax[-1] 89 | else: 90 | cax = ax[-1][-1] 91 | for i in range(len(wave_ids), nrows*ncols-1): 92 | row = int(i/ncols) 93 | col = i % ncols 94 | ax[row][col].set_axis_off() 95 | 96 | cax.errorbar(wave_ids, velocities[:,0], yerr=velocities[:,1], 97 | linestyle='', marker='+') 98 | cax.set_xlabel(f'{evts.name} id') 99 | cax.set_title('velocities [{}]'.format(v_unit)) 100 | 101 | # transform to DataFrame 102 | df = pd.DataFrame(velocities, 103 | columns=['velocity_planar', 'velocity_planar_std']) 104 | df['velocity_unit'] = v_unit 105 | return df 106 | 107 | 108 | if __name__ == '__main__': 109 | args, unknown = CLI.parse_known_args() 110 | 111 | block = load_neo(args.data) 112 | 113 | evts = block.filter(name=args.event_name, objects="Event")[0] 114 | evts = evts[evts.labels.astype('str') != '-1'] 115 | 116 | velocities_df = calc_planar_velocities(evts) 117 | velocities_df[f'{args.event_name}_id'] = np.unique(evts.labels) 118 | 119 | if args.output_img is not None: 120 | save_plot(args.output_img) 121 | 122 | velocities_df.to_csv(args.output) 123 | -------------------------------------------------------------------------------- /cobrawap/pipeline/Snakefile: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import yaml 4 | from pathlib import Path 5 | import numpy as np 6 | import inspect 7 | sys.path.append(str(Path(inspect.getfile(lambda: None)))) 8 | from utils.snakefile import create_temp_configs, set_global_configs 9 | from utils.snakefile import set_stage_inputs, read_stage_output, get_setting 10 | 11 | # the working directory is by default the directory of the Snakefile 12 | working_dir = Path() 13 | temp_config = 'working_config.yaml' 14 | config_path = Path(get_setting('config_path')) 15 | output_path = Path(get_setting('output_path')) 16 | 17 | configfile: config_path / 'configs' / 'config.yaml' 18 | report: "report.rst" 19 | 20 | # Setting the profile 21 | config_profile = f'config_{config["PROFILE"]}.yaml' 22 | output_path = output_path / config["PROFILE"] 23 | 24 | STAGES = config['STAGES'] 25 | 26 | # onstart: 27 | ## building the temp_config files 28 | create_temp_configs(STAGES, 29 | configs_dir=config_path, 30 | config_name=config_profile, 31 | output_dir=output_path, 32 | temp_name=temp_config) 33 | ## overwrite stage configs with pipeline config parameters 34 | set_global_configs(STAGES, 35 | output_dir=output_path, 36 | config_dict=config, 37 | config_file=temp_config) 38 | ## write STAGE_OUTPUT into config of the following stage as STAGE_INPUT 39 | set_stage_inputs(STAGES, 40 | output_dir=output_path, 41 | config_file=temp_config) 42 | 43 | def get_stage(num): 44 | if num < len(STAGES): 45 | return STAGES[num] 46 | else: 47 | # temporary solution 48 | return '' 49 | 50 | subworkflow stage01: 51 | workdir: 52 | working_dir / get_stage(0) 53 | configfile: 54 | output_path / get_stage(0) / temp_config 55 | 56 | subworkflow stage02: 57 | workdir: 58 | working_dir / get_stage(1) 59 | configfile: 60 | output_path / get_stage(1) / temp_config 61 | 62 | subworkflow stage03: 63 | workdir: 64 | working_dir / get_stage(2) 65 | configfile: 66 | output_path / get_stage(2) / temp_config 67 | 68 | subworkflow stage04: 69 | workdir: 70 | working_dir / get_stage(3) 71 | configfile: 72 | output_path / get_stage(3) / temp_config 73 | 74 | subworkflow stage05: 75 | workdir: 76 | working_dir / get_stage(4) 77 | configfile: 78 | output_path / get_stage(4) / temp_config 79 | 80 | def stage_input(wildcards): 81 | stage_num = [i for i, stage in enumerate(STAGES) 82 | if stage == wildcards.stage][0] 83 | if stage_num: 84 | return output_path / f'{STAGES[stage_num-1]}.done' 85 | else: 86 | return [] 87 | 88 | def stage_output(wildcards): 89 | # setup for maximal 5 stages 90 | subworkflows=[stage01, stage02, stage03, stage04, stage05] 91 | if wildcards.stage not in STAGES: 92 | raise InputError(f"Don't recognize stage {wildcards.stage}!") 93 | for i, subworkflow in enumerate(subworkflows): 94 | if wildcards.stage == get_stage(i): 95 | output_name = read_stage_output(wildcards.stage, 96 | config_dir=output_path, 97 | config_name=temp_config) 98 | output = output_path / wildcards.stage / output_name 99 | return subworkflow(output) 100 | 101 | rule all: 102 | input: 103 | last_stage = output_path / f'{STAGES[-1]}.done', 104 | # reports = expand(os.path.join(output_path, '{subworkflow_name}', 105 | # 'report.html'), 106 | # subworkflow_name=STAGES) 107 | output: 108 | output_path / 'pipeline.done' 109 | shell: 110 | """ 111 | touch "{output}" 112 | """ 113 | 114 | rule execute_stage: 115 | input: 116 | stage_input = stage_input, 117 | stage_output = stage_output, 118 | config = output_path / '{stage}' / temp_config 119 | output: 120 | temp(output_path / '{stage}.done') 121 | shell: 122 | """ 123 | touch "{output}" 124 | """ 125 | 126 | rule create_report: 127 | input: 128 | output_path / '{subworkflow_name}' / 'report.clear', 129 | configfile = output_path / '{subworkflow_name}' / temp_config 130 | output: 131 | report = output_path / '{subworkflow_name}' / 'report.html' 132 | shell: 133 | """ 134 | cd {wildcards.subworkflow_name} 135 | snakemake --configfile {input.configfile} \ 136 | --report "{output.report}" || \ 137 | echo "Creation of report for {wildcards.subworkflow_name} failed." 138 | """ 139 | 140 | rule clear_report: 141 | output: 142 | temp(Path('{path}') / 'report.clear') 143 | params: 144 | report_path = Path('{path}') / 'report.html' 145 | shell: 146 | """ 147 | rm -f "{params.report_path}" 148 | touch "{output}" 149 | """ 150 | -------------------------------------------------------------------------------- /cobrawap/pipeline/stage05_wave_characterization/scripts/annotations.py: -------------------------------------------------------------------------------- 1 | """ 2 | Extract the annotations of Neo objects and structure them in a DataFrame 3 | to complement a wave characterization. 4 | """ 5 | 6 | import argparse 7 | from pathlib import Path 8 | import numpy as np 9 | import pandas as pd 10 | import quantities as pq 11 | import re 12 | from utils.io_utils import load_neo, save_plot 13 | from utils.parse import none_or_path, none_or_str 14 | from utils.neo_utils import remove_annotations 15 | 16 | CLI = argparse.ArgumentParser() 17 | CLI.add_argument("--data", nargs='?', type=Path, required=True, 18 | help="path to input data in neo format") 19 | CLI.add_argument("--output", nargs='?', type=Path, required=True, 20 | help="path of output file") 21 | CLI.add_argument("--output_img", nargs='?', type=none_or_path, default=None, 22 | help="path of output image file") 23 | CLI.add_argument("--event_name", "--EVENT_NAME", nargs='?', type=str, default='wavefronts', 24 | help="name of neo.Event to analyze (must contain waves)") 25 | CLI.add_argument("--ignore_keys", "--IGNORE_KEYS", nargs='*', type=str, default=[], 26 | help="neo.Event annotations keys to not include in dataframe") 27 | CLI.add_argument("--include_keys", "--INCLUDE_KEYS", nargs='*', type=str, default=[], 28 | help="neo object annotations keys to include in dataframe") 29 | CLI.add_argument("--profile", "--PROFILE", nargs='?', type=none_or_str, default=None, 30 | help="profile name") 31 | 32 | def add_annotations_to_df(df, annotations, include_keys=[]): 33 | use_all_keys = not bool(len(include_keys)) 34 | 35 | for key, value in annotations.items(): 36 | key_is_relevant = use_all_keys or key in include_keys 37 | 38 | if key_is_relevant and key not in df.columns: 39 | if type(value) == pq.Quantity: 40 | df[f'{key}_unit'] = value.dimensionality.string 41 | value = value.magnitude 42 | df[key] = value 43 | 44 | return df 45 | 46 | def get_corresponding_array_values(a, b): 47 | ''' 48 | a -> b 49 | ''' 50 | a_labels = np.unique(a) 51 | mapping = {} 52 | for i, a_label in enumerate(a_labels): 53 | idx = np.where(a_label == a)[0] 54 | b_label_values = b[idx] 55 | if (b_label_values == b_label_values[0]).all(): 56 | mapping[a_label] = b_label_values[0] 57 | else: 58 | return False 59 | return mapping 60 | 61 | def add_array_annotations_to_df(df, array_annotations, labels, index_name, 62 | include_keys=[]): 63 | use_all_keys = not bool(len(include_keys)) 64 | 65 | for key, value in array_annotations.items(): 66 | key_is_relevant = use_all_keys or key in include_keys 67 | 68 | if key_is_relevant and key not in df.columns: 69 | mapping = get_corresponding_array_values(labels, value) 70 | if not mapping: continue 71 | 72 | annotation_df = pd.DataFrame(mapping.items(), 73 | columns=[index_name, key]) 74 | df = df.merge(annotation_df, how='outer', on=index_name) 75 | return df 76 | 77 | if __name__ == '__main__': 78 | args, unknown = CLI.parse_known_args() 79 | args.ignore_keys = [re.sub(r"[\[\],\s]", "", key) for key in args.ignore_keys] 80 | args.include_keys = [re.sub(r"[\[\],\s]", "", key) for key in args.include_keys] 81 | if len(args.include_keys): 82 | args.ignore_keys = [] 83 | 84 | block = load_neo(args.data) 85 | 86 | asig = block.segments[0].analogsignals[0] 87 | evts = block.filter(name=args.event_name, objects="Event")[0] 88 | evts = evts[evts.labels.astype(str) != '-1'] 89 | 90 | remove_annotations(evts, del_keys=['nix_name', 'neo_name']+args.ignore_keys) 91 | remove_annotations(asig, del_keys=['nix_name', 'neo_name']+args.ignore_keys) 92 | 93 | ids = np.sort(np.unique(evts.labels).astype(int)) 94 | df = pd.DataFrame(ids, columns=[f'{args.event_name}_id']) 95 | 96 | for annotations in [evts.annotations, asig.annotations]: 97 | df = add_annotations_to_df(df, annotations, args.include_keys) 98 | 99 | df = add_array_annotations_to_df(df, evts.array_annotations, 100 | labels=evts.labels.astype(int), 101 | index_name=f'{args.event_name}_id', 102 | include_keys=args.include_keys) 103 | 104 | df['profile'] = [args.profile] * len(df.index) 105 | df['sampling_rate'] = asig.sampling_rate.magnitude 106 | df['sampling_rate_unit'] = asig.sampling_rate.dimensionality.string 107 | df['recording_length'] = (asig.t_stop - asig.t_start).magnitude 108 | df['recording_length_unit'] = asig.t_start.dimensionality.string 109 | df['dim_x'] = int(max(asig.array_annotations['x_coords']))+1 110 | df['dim_y'] = int(max(asig.array_annotations['y_coords']))+1 111 | 112 | df.to_csv(args.output) 113 | 114 | # ToDo 115 | if args.output_img is not None: 116 | save_plot(args.output_img) 117 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "creators": [ 3 | { 4 | "orcid": "0000-0001-7373-5962", 5 | "affiliation": "Department of Psychology and Center for Data Science, New York University, New York, USA", 6 | "name": "Gutzen, Robin", 7 | "type": "ProjectMember" 8 | }, 9 | { 10 | "orcid": "0000-0002-2651-1277", 11 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy", 12 | "name": "Lupo, Cosimo", 13 | "type": "ProjectMember" 14 | }, 15 | { 16 | "orcid": "0000-0002-5933-2873", 17 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy", 18 | "name": "Marmoreo, Federico", 19 | "type": "ProjectMember" 20 | }, 21 | { 22 | "orcid": "0000-0001-7079-5724", 23 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy", 24 | "name": "De Bonis, Giulia", 25 | "type": "ProjectMember" 26 | } 27 | ], 28 | 29 | "contributors": [ 30 | { 31 | "orcid": "0000-0003-3488-0088", 32 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy; Institute of Neuroinformatics, University of Zürich and ETH Zürich, Zürich, Switzerland", 33 | "name": "De Luca, Chiara", 34 | "type": "ProjectMember" 35 | }, 36 | { 37 | "orcid": "0000-0003-0682-1232", 38 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy", 39 | "name": "Pastorelli, Elena", 40 | "type": "ProjectMember" 41 | }, 42 | { 43 | "orcid": "0000-0002-9958-2551", 44 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy", 45 | "name": "Capone, Cristiano", 46 | "type": "ProjectMember" 47 | }, 48 | { 49 | "orcid": "0000-0002-8489-0076", 50 | "affiliation": "European Laboratory for Non-linear Spectroscopy (LENS), University of Florence, Florence, Italy; Neuroscience Institute, National Research Council, Pisa, Italy", 51 | "name": "Allegra Mascaro, Anna Letizia", 52 | "type": "ProjectMember" 53 | }, 54 | { 55 | "orcid": "0000-0002-9605-5852", 56 | "affiliation": "European Laboratory for Non-linear Spectroscopy (LENS), University of Florence, Florence, Italy; Department of Physics and Astronomy, University of Florence, Florence, Italy", 57 | "name": "Resta, Francesco", 58 | "type": "ProjectMember" 59 | }, 60 | { 61 | "orcid": "0000-0002-8306-0759", 62 | "affiliation": "Institut d’Investigacions Biomèdiques August Pi i Sunyer (IDIBAPS), Barcelona, Spain", 63 | "name": "Manasanch, Arnau", 64 | "type": "ProjectMember" 65 | }, 66 | { 67 | "orcid": "0000-0002-0675-3981", 68 | "affiliation": "European Laboratory for Non-linear Spectroscopy (LENS), University of Florence, Florence, Italy; Department of Physics and Astronomy, University of Florence, Florence, Italy; National Institute of Optics, National Research Council, Sesto Fiorentino, Italy", 69 | "name": "Pavone, Francesco Saverio", 70 | "type": "ProjectMember" 71 | }, 72 | { 73 | "orcid": "0000-0002-8437-9083", 74 | "affiliation": "Institut d’Investigacions Biomèdiques August Pi i Sunyer (IDIBAPS), Barcelona, Spain; Institució Catalana de Recerca i Estudis Avançats (ICREA), Barcelona, Spain", 75 | "name": "Sanchez-Vives, Maria V.", 76 | "type": "ProjectMember" 77 | }, 78 | { 79 | "orcid": "0000-0002-2356-4509", 80 | "affiliation": "Natl. Center for Radiation Protection and Computational Physics, Istituto Superiore di Sanità (ISS), Rome, Italy", 81 | "name": "Mattia, Maurizio", 82 | "type": "ProjectMember" 83 | }, 84 | { 85 | "orcid": "0000-0003-2829-2220", 86 | "affiliation": "Institute for Advanced Simulation (IAS-6), Jülich Research Centre, Jülich, Germany; Theoretical Systems Neurobiology, RWTH Aachen University, Aachen, Germany", 87 | "name": "Grün, Sonja", 88 | "type": "ProjectMember" 89 | }, 90 | { 91 | "orcid": "0000-0003-1255-7300", 92 | "affiliation": "Institute for Advanced Simulation (IAS-6), Jülich Research Centre, Jülich, Germany", 93 | "name": "Denker, Michael", 94 | "type": "ProjectLeader" 95 | }, 96 | { 97 | "orcid": "0000-0003-1937-6086", 98 | "affiliation": "Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Roma, Rome, Italy", 99 | "name": "Paolucci, Pier Stanislao", 100 | "type": "ProjectLeader" 101 | } 102 | ], 103 | 104 | "title": "Cobrawap 0.2.3", 105 | 106 | "keywords": [ 107 | "neuroscience", 108 | "neurophysiology", 109 | "statistics", 110 | "data-analysis" 111 | ], 112 | 113 | "license": { 114 | "id": "GPL-3.0-or-later" 115 | }, 116 | 117 | "related_identifiers": [ 118 | { 119 | "scheme": "doi", 120 | "identifier": "10.5281/zenodo.10198748", 121 | "relation": "isVersionOf", 122 | "resource_type": "software" 123 | }, 124 | 125 | { 126 | "scheme": "doi", 127 | "identifier": "10.5281/zenodo.17048329", 128 | "relation": "isNewVersionOf", 129 | "resource_type": "software" 130 | }, 131 | 132 | { 133 | "scheme": "doi", 134 | "identifier": "10.48550/arXiv.2211.08527", 135 | "relation": "isDocumentedBy", 136 | "resource_type": "publication-preprint" 137 | }, 138 | 139 | { 140 | "scheme": "doi", 141 | "identifier": "10.1016/j.crmeth.2023.100681", 142 | "relation": "isDocumentedBy", 143 | "resource_type": "publication-article" 144 | } 145 | ], 146 | 147 | "grants": [ 148 | {"id": "785907"}, 149 | {"id": "945539"}, 150 | {"id": "101147319"} 151 | ], 152 | 153 | "upload_type": "software" 154 | } 155 | --------------------------------------------------------------------------------