├── .github └── workflows │ └── osl-cicd-actions.yml ├── .gitignore ├── .readthedocs.yaml ├── CITATION.cff ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── doc └── source │ ├── Makefile │ ├── conf.py │ ├── documentation.rst │ ├── faq.rst │ ├── index.rst │ ├── install.rst │ └── tutorials │ ├── README.rst │ ├── glm_sensor.py │ ├── glm_sensor_solution.py │ ├── glm_simulation.py │ ├── glm_simulation_solution.py │ ├── preprocessing_automatic.py │ ├── preprocessing_batch.py │ ├── preprocessing_manual.py │ ├── source-recon_batch.py │ ├── source-recon_coreg.py │ ├── source-recon_deleting-headshape-points.py │ └── source-recon_subject.py ├── envs ├── README.md ├── bmrc.yml ├── hbaws.yml └── osle.yml ├── examples ├── README.md ├── ctf │ ├── ctf_with_headshape_pos │ │ ├── 1_preprocess.py │ │ ├── 2_coregister.py │ │ ├── 3_source_reconstruct.py │ │ ├── 4_sign_flip.py │ │ ├── 5_save_npy.py │ │ └── README.md │ └── ctf_with_smri_fid │ │ ├── 1_preprocess.py │ │ ├── 2_coregister.py │ │ ├── 3_source_reconstruct.py │ │ ├── 4_sign_flip.py │ │ └── README.md ├── eeg │ ├── 1_preprocess.py │ ├── 2_coregister.py │ ├── 3_move_sensors.py │ ├── 4_source_reconstruct.py │ ├── 5_sign_flip.py │ └── README.md ├── elekta │ ├── 1_maxfilter.py │ ├── 2_preprocess.py │ ├── 3_coregister.py │ ├── 4_source_reconstruct.py │ ├── 5_sign_flip.py │ └── README.md ├── misc │ ├── beamformer_comparison_paper.py │ ├── delete_headshape_points.py │ ├── fix_smri_files.py │ ├── freesurfer_source_recon.py │ ├── sign_flipping_matlab_files.py │ └── spectrum_analysis_walkthrough.py ├── opm │ ├── 0_convert_files.py │ ├── 1_preprocess.py │ ├── 2_coregister.py │ ├── 3_source_reconstruct.py │ ├── 4_sign_flip.py │ └── README.md ├── oxford │ └── README.md ├── parallelisation │ ├── README.md │ ├── parallel_preprocess.py │ ├── parallel_source_reconstruct.py │ ├── serial_preprocess.py │ └── serial_source_reconstruct.py └── toolbox-paper │ ├── 1_preprocessing.py │ ├── 2_source-reconstruct.py │ ├── 3_sign-flip.py │ ├── 4_stats.py │ ├── README.md │ └── osl-toolbox-paper.yml ├── osl_ephys ├── README.md ├── __init__.py ├── glm │ ├── README.md │ ├── __init__.py │ ├── glm_base.py │ ├── glm_epochs.py │ └── glm_spectrum.py ├── maxfilter │ ├── README.md │ ├── __init__.py │ └── maxfilter.py ├── preprocessing │ ├── README.md │ ├── __init__.py │ ├── batch.py │ ├── ica_label.py │ ├── mne_wrappers.py │ ├── osl_wrappers.py │ └── plot_ica.py ├── report │ ├── README.md │ ├── __init__.py │ ├── preproc_report.py │ ├── src_report.py │ └── templates │ │ ├── raw_subject_panel.html │ │ ├── raw_summary_panel.html │ │ ├── src_subject_panel.html │ │ ├── src_summary_panel.html │ │ ├── subject_report.html │ │ └── summary_report.html ├── source_recon │ ├── README.md │ ├── __init__.py │ ├── batch.py │ ├── beamforming.py │ ├── files │ │ ├── Glasser50_space-MNI152NLin6_res-8x8x8.nii.gz │ │ ├── Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz │ │ ├── HarvOxf-sub-Schaefer100-combined-2mm.nii.gz │ │ ├── HarvOxf-sub-Schaefer100-combined-2mm_4d.nii.gz │ │ ├── HarvOxf-sub-Schaefer100-combined-2mm_4d_ds8.nii.gz │ │ ├── HarvardOxford-sub-prob-bin-2mm.nii.gz │ │ ├── MNI152_T1_2mm_brain.nii.gz │ │ ├── MNI152_T1_8mm_brain.nii.gz │ │ ├── Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm.nii.gz │ │ ├── Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d.nii.gz │ │ ├── Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d_ds8.nii.gz │ │ ├── WTA_fMRI_parcellation_ds2mm.nii.gz │ │ ├── WTA_fMRI_parcellation_ds8mm.nii.gz │ │ ├── aal_cortical_merged_8mm_stacked.nii.gz │ │ ├── dk_cortical.nii.gz │ │ ├── dk_full.nii.gz │ │ ├── fMRI_parcellation_ds2mm.nii.gz │ │ ├── fMRI_parcellation_ds8mm.nii.gz │ │ ├── fmri_d100_parcellation_with_PCC_reduced_2mm.nii.gz │ │ ├── fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz │ │ ├── fmri_d100_parcellation_with_PCC_tighterMay15_v2_2mm.nii.gz │ │ ├── fmri_d100_parcellation_with_PCC_tighterMay15_v2_6mm_exclusive.nii.gz │ │ ├── fmri_d100_parcellation_with_PCC_tighterMay15_v2_8mm.nii.gz │ │ ├── giles_39_binary.nii.gz │ │ ├── reduced_hcp-mmp_2mm.nii.gz │ │ └── reduced_hcp-mmp_8mm.nii.gz │ ├── freesurfer_utils.py │ ├── minimum_norm.py │ ├── nii.py │ ├── parcellation.py │ ├── rhino │ │ ├── __init__.py │ │ ├── coreg.py │ │ ├── forward_model.py │ │ ├── fsl_utils.py │ │ ├── polhemus.py │ │ ├── surfaces.py │ │ └── utils.py │ ├── sign_flipping.py │ └── wrappers.py ├── tests │ ├── __init__.py │ ├── test_00_package_canary.py │ ├── test_batch_api.py │ ├── test_batch_preproc.py │ ├── test_file_handling.py │ ├── test_glm.py │ └── test_parallel.py └── utils │ ├── README.md │ ├── __init__.py │ ├── create_neuromag306_info.py │ ├── file_handling.py │ ├── logger.py │ ├── misc.py │ ├── neuromag306_info.yml │ ├── opm.py │ ├── package.py │ ├── parallel.py │ ├── run_func.py │ ├── simulate.py │ ├── simulation_config │ ├── __init__.py │ ├── megin_template_info.fif │ ├── reduced_mvar_params_grad.npy │ ├── reduced_mvar_params_mag.npy │ ├── reduced_mvar_pcacomp_grad.npy │ ├── reduced_mvar_pcacomp_mag.npy │ ├── reduced_mvar_residcov_grad.npy │ ├── reduced_mvar_residcov_mag.npy │ └── simulate.py │ ├── spmio │ ├── README.md │ ├── __init__.py │ ├── _data.py │ ├── _events.py │ ├── _spmmeeg_utils.py │ └── spmmeeg.py │ ├── study.py │ ├── trees │ ├── mrc_meguk.tree │ ├── mrc_meguk_bti.tree │ ├── mrc_meguk_ctf.tree │ ├── mrc_meguk_megin.tree │ ├── mrc_meguk_processed.tree │ └── ohba_meg.tree │ └── version_utils.py ├── release_notes.md ├── requirements.txt ├── setup.cfg └── setup.py /.github/workflows/osl-cicd-actions.yml: -------------------------------------------------------------------------------- 1 | name: osl-cicd-actions 2 | on: [push] 3 | jobs: 4 | osl-cicd: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - name: Check out repository code 8 | uses: actions/checkout@v2 9 | 10 | # Setup Python (faster than using Python container) 11 | - name: Setup Python 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: "3.8.16" 15 | 16 | - name: Install pipenv 17 | run: | 18 | python -m pip install --upgrade pipenv wheel 19 | 20 | - name: Install dependencies 21 | run: | 22 | python -m pip install --upgrade pip 23 | pip install -e .[full] 24 | 25 | - name: Run test suite 26 | run: | 27 | pytest 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swo 3 | *.swp 4 | *.egg-info 5 | .vscode 6 | .coverage 7 | .DS_Store 8 | __pycache__ 9 | dist 10 | build 11 | doc/build 12 | doc/source/tutorials_build 13 | doc/source/autoapi 14 | doc/source/sg_execution_times.rst 15 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.8" 13 | # You can also specify other tool versions: 14 | # nodejs: "16" 15 | # rust: "1.55" 16 | # golang: "1.17" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: doc/source/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | 26 | # Optionally declare the Python requirements required to build your docs 27 | python: 28 | install: 29 | - requirements: requirements.txt 30 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 2.3.1 2 | message: "If you use this software, please cite the software itself, as well as the paper listed in the preferred-citation field. osl-ephys is based on MNE-Python and the Source Recon module on FSL/FreeSurfer. Please cite these packages appropriately." 3 | title: "osl-ephys" 4 | version: 2.3.1 5 | date-released: "2025-05-08" 6 | commit: 2201385db8ed1f238b36361b6ea864d342c2293f 7 | doi: 10.5281/zenodo.6875060 8 | keywords: 9 | - MEG 10 | - magnetoencephalography 11 | - EEG 12 | - electroencephalography 13 | - electrophysiology 14 | - neuroimaging 15 | - data analysis 16 | authors: 17 | - family-names: Quinn 18 | given-names: Andrew J. 19 | orcid: https://orcid.org/0000-0003-2267-9897 20 | - family-names: van Es 21 | given-names: Mats W.J. 22 | orcid: https://orcid.org/0000-0002-7133-509X 23 | - family-names: Gohil 24 | given-names: Chetan 25 | orcid: https://orcid.org/0000-0002-0888-1207 26 | - family-names: Woolrich 27 | given-names: Mark W. 28 | orcid: https://orcid.org/0000-0001-8460-8854 29 | preferred-citation: 30 | title: "osl-ephys: a Python toolbox for the analysis of electrophysiology data" 31 | journal: "Frontiers in Neuroscience" 32 | type: article 33 | year: 2025 34 | volume: 19 35 | issue: 36 | start: 1522675 37 | end: 38 | doi: https://doi.org/10.3389/fnins.2025.1522675 39 | authors: 40 | - family-names: van Es 41 | given-names: Mats W.J. 42 | orcid: https://orcid.org/0000-0002-7133-509X 43 | - family-names: Gohil 44 | given-names: Chetan 45 | orcid: https://orcid.org/0000-0002-0888-1207 46 | - family-names: Quinn 47 | given-names: Andrew J. 48 | orcid: https://orcid.org/0000-0003-2267-9897 49 | - family-names: Woolrich 50 | given-names: Mark W. 51 | orcid: https://orcid.org/0000-0001-8460-8854 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2021-2024 University of Oxford, Oxford, UK. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | 16 | 17 | This software includes code from MNE-Python, which is licensed under the BSD-3-Clause license: 18 | https://github.com/mne-tools/mne-python/blob/main/LICENSE.txt 19 | 20 | This software includes code from FSL-Python, which is licensed under the Apache License: 21 | https://git.fmrib.ox.ac.uk/fsl/fslpy/-/blob/main/LICENSE 22 | 23 | For the full text of these licenses, please see the LICENSE file in the root directory of these projects. 24 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # vim: set noexpandtab ts=4 sw=4: 2 | # 3 | 4 | clean: 5 | python setup.py clean 6 | 7 | clean-all: 8 | python setup.py clean --all 9 | 10 | # Remove autogenerated python bytecode 11 | cleanpy: 12 | find . -name \*.pyc -delete 13 | find . -name \*__pycache__ -delete 14 | 15 | doc: clean-all 16 | python3 setup.py build_sphinx 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OSL: Electrophysiological Data Analysis Toolbox 2 | 3 | Tools for analysing electrophysiological (M/EEG) data. 4 | 5 | Documentation: https://osl-ephys.readthedocs.io/en/latest/. 6 | 7 | ## Installation 8 | 9 | We recommend installing osl-ephys in a conda environment. 10 | 11 | ### Conda / mamba 12 | 13 | Miniforge (`conda`) can be installed with: 14 | ``` 15 | wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" 16 | bash Miniforge3-$(uname)-$(uname -m).sh 17 | rm Miniforge3-$(uname)-$(uname -m).sh 18 | ``` 19 | 20 | Mamba (`mamba`) can be installed with: 21 | ``` 22 | conda install -n base -c conda-forge mamba 23 | ``` 24 | 25 | ### osl-ephys 26 | 27 | osl-ephys can be installed from source code in a conda environment using the following. 28 | 29 | ``` 30 | git clone https://github.com/OHBA-analysis/osl-ephys.git 31 | cd osl-ephys 32 | mamba env create -f envs/osle.yml 33 | conda activate osle 34 | pip install -e . 35 | ``` 36 | 37 | Note, on a headless server you may need to set the following environment variable: 38 | ``` 39 | export PYVISTA_OFF_SCREEN=true 40 | ``` 41 | 42 | ### Oxford-specific computers 43 | 44 | If you are installing on an OHBA workstation computer (hbaws) use: 45 | ``` 46 | git clone https://github.com/OHBA-analysis/osl-ephys.git 47 | cd osl-ephys 48 | mamba env create -f envs/hbaws.yml 49 | conda activate osle 50 | pip install -e . 51 | ``` 52 | 53 | Or on the BMRC cluster: 54 | ``` 55 | git clone https://github.com/OHBA-analysis/osl-ephys.git 56 | cd osl-ephys 57 | mamba env create -f envs/bmrc.yml 58 | conda activate osle 59 | pip install -e . 60 | ``` 61 | 62 | Remember to set the following environment variable: 63 | ``` 64 | export PYVISTA_OFF_SCREEN=true 65 | ``` 66 | 67 | ## Removing osl-ephys 68 | 69 | Simply remove the conda environment and delete the repository: 70 | ``` 71 | conda env remove -n osle 72 | rm -rf osl-ephys 73 | ``` 74 | 75 | ## For developers 76 | 77 | Install all the requirements: 78 | ``` 79 | pip install -r requirements.txt 80 | ``` 81 | 82 | Run tests: 83 | ``` 84 | cd osl_ephys 85 | pytest tests 86 | ``` 87 | or to run a specific test: 88 | ``` 89 | cd osl_ephys/tests 90 | pytest test_file_handling.py 91 | ``` 92 | 93 | Build documentation locally: 94 | ``` 95 | sphinx-build -b html doc/source build 96 | ``` 97 | Compiled docs can be found in `doc/build/html/index.html`. 98 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # This file is execfile()d with the current directory set to its containing dir. 4 | # 5 | # Note that not all possible configuration values are present in this 6 | # autogenerated file. 7 | # 8 | # All configuration values have a default; values that are commented out 9 | # serve to show the default. 10 | 11 | import inspect 12 | import os 13 | import shutil 14 | import sys 15 | 16 | __location__ = os.path.join( 17 | os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())) 18 | ) 19 | 20 | # If extensions (or modules to document with autodoc) are in another directory, 21 | # add these directories to sys.path here. If the directory is relative to the 22 | # documentation root, use os.path.abspath to make it absolute, like shown here. 23 | sys.path.insert(0, os.path.join(__location__, "../..")) 24 | 25 | # -- General configuration ----------------------------------------------------- 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | # needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be extensions 31 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 32 | extensions = [ 33 | #"sphinx.ext.autosummary", 34 | #"sphinx.ext.autodoc", 35 | "sphinx.ext.intersphinx", 36 | "sphinx.ext.todo", 37 | "sphinx.ext.viewcode", 38 | "sphinx.ext.coverage", 39 | "sphinx.ext.doctest", 40 | "sphinx.ext.ifconfig", 41 | "sphinx.ext.mathjax", 42 | "sphinx.ext.napoleon", 43 | "sphinx_gallery.gen_gallery", 44 | "autoapi.extension", 45 | ] 46 | autoapi_type = "python" 47 | autoapi_keep_files = True 48 | autoapi_add_toctree_entry = False 49 | autoapi_dirs = ["../../osl_ephys"] 50 | 51 | sphinx_gallery_conf = { 52 | "examples_dirs": "tutorials", # path to your example (tutorial) scripts 53 | "gallery_dirs": "tutorials_build", # path to where to save gallery generated output 54 | } 55 | 56 | # Add any paths that contain templates here, relative to this directory. 57 | # templates_path = ["_templates"] 58 | 59 | # The suffix of source filenames. 60 | source_suffix = ".rst" 61 | 62 | # The encoding of source files. 63 | # source_encoding = 'utf-8-sig' 64 | 65 | # The master toctree document. 66 | master_doc = "index" 67 | 68 | # General information about the project. 69 | project = "osl-ephys" 70 | copyright = "2022, OHBA Methods Group, University of Oxford" 71 | author = "OHBA Methods Group, University of Oxford" 72 | 73 | # The version info for the project you're documenting, acts as replacement for 74 | # |version| and |release|, also used in various other places throughout the 75 | # built documents. 76 | # 77 | # The short X.Y version. 78 | version = "2.3.1" # Is set by calling `setup.py docs` 79 | # The full version, including alpha/beta/rc tags. 80 | release = "" # Is set by calling `setup.py docs` 81 | 82 | # The language for content autogenerated by Sphinx. Refer to documentation 83 | # for a list of supported languages. 84 | # language = None 85 | 86 | # There are two options for replacing |today|: either, you set today to some 87 | # non-false value, then it is used: 88 | # today = '' 89 | # Else, today_fmt is used as the format for a strftime call. 90 | # today_fmt = '%B %d, %Y' 91 | 92 | # List of patterns, relative to source directory, that match files and 93 | # directories to ignore when looking for source files. 94 | # exclude_patterns = ["_build"] 95 | 96 | # The reST default role (used for this markup: `text`) to use for all documents. 97 | # default_role = None 98 | 99 | # If true, '()' will be appended to :func: etc. cross-reference text. 100 | # add_function_parentheses = True 101 | 102 | # If true, the current module name will be prepended to all description 103 | # unit titles (such as .. function::). 104 | # add_module_names = True 105 | 106 | # If true, sectionauthor and moduleauthor directives will be shown in the 107 | # output. They are ignored by default. 108 | # show_authors = False 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = "sphinx" 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | # modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | # keep_warnings = False 118 | 119 | 120 | # -- Options for HTML output --------------------------------------------------- 121 | 122 | # The theme to use for HTML and HTML Help pages. See the documentation for 123 | # a list of builtin themes. 124 | html_theme = "pydata_sphinx_theme" 125 | 126 | # Theme options are theme-specific and customize the look and feel of a theme 127 | # further. For a list of options available for each theme, see the 128 | # documentation. 129 | html_theme_options = { 130 | "gitlab_url": "https://github.com/OHBA-analysis/osl-ephys", 131 | "logo": {"text": "osl-ephys"}, 132 | } 133 | 134 | # Add any paths that contain custom themes here, relative to this directory. 135 | # html_theme_path = [] 136 | 137 | # The name for this set of Sphinx documents. If None, it defaults to 138 | # " v documentation". 139 | # try: 140 | # from osl_dynamics import __version__ as version 141 | # except ImportError: 142 | # pass 143 | # else: 144 | # release = version 145 | html_title = "osl-ephys" 146 | 147 | # A shorter title for the navigation bar. Default is the same as html_title. 148 | html_short_title = "osl-ephys" 149 | 150 | # The name of an image file (relative to this directory) to place at the top 151 | # of the sidebar. 152 | html_logo = "https://avatars.githubusercontent.com/u/15248840?s=200&v=4" 153 | 154 | # The name of an image file (within the static path) to use as favicon of the 155 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 156 | # pixels large. 157 | # html_favicon = None 158 | 159 | # Add any paths that contain custom static files (such as style sheets) here, 160 | # relative to this directory. They are copied after the builtin static files, 161 | # so a file named "default.css" will overwrite the builtin "default.css". 162 | # html_static_path = ["_static"] 163 | 164 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 165 | # using the given strftime format. 166 | # html_last_updated_fmt = '%b %d, %Y' 167 | 168 | # If true, SmartyPants will be used to convert quotes and dashes to 169 | # typographically correct entities. 170 | # html_use_smartypants = True 171 | 172 | # Custom sidebar templates, maps document names to template names. 173 | # html_sidebars = {} 174 | 175 | # Additional templates that should be rendered to pages, maps page names to 176 | # template names. 177 | # html_additional_pages = {} 178 | 179 | # If false, no module index is generated. 180 | # html_domain_indices = True 181 | 182 | # If false, no index is generated. 183 | # html_use_index = True 184 | 185 | # If true, the index is split into individual pages for each letter. 186 | # html_split_index = False 187 | 188 | # If true, links to the reST sources are added to the pages. 189 | # html_show_sourcelink = True 190 | 191 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 192 | # html_show_sphinx = False 193 | 194 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 195 | # html_show_copyright = False 196 | 197 | # If true, an OpenSearch description file will be output, and all pages will 198 | # contain a tag referring to it. The value of this option must be the 199 | # base URL from which the finished HTML is served. 200 | # html_use_opensearch = '' 201 | 202 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 203 | # html_file_suffix = None 204 | 205 | # Output file base name for HTML help builder. 206 | htmlhelp_basename = "osl-doc" 207 | intersphinx_mapping = { 208 | 'mne': ('https://mne.tools/stable/', None), 209 | 'osl-ephys': ('https://osl-ephys.readthedocs.io/en/stable/', None), 210 | 'dask': ('https://distributed.dask.org/en/stable/', None), 211 | 'sails': ('https://sails.readthedocs.io/en/stable/', None), 212 | 'matplotlib': ('https://matplotlib.org/stable/', None), 213 | } 214 | -------------------------------------------------------------------------------- /doc/source/documentation.rst: -------------------------------------------------------------------------------- 1 | Documentation 2 | ============= 3 | 4 | Welcome to the osl-ephys documentation! 5 | 6 | The :doc:`API reference ` provides documentation for the classes, methods and functions in osl-ephys. New users may find the :doc:`FAQ ` useful. 7 | 8 | 9 | Tutorials 10 | --------- 11 | 12 | The following tutorials illustrate basic usage and analysis that can be done with osl-ephys. 13 | 14 | **Preprocessing**: 15 | 16 | - :doc:`tutorials_build/preprocessing_manual`. 17 | - :doc:`tutorials_build/preprocessing_automatic`. 18 | - :doc:`tutorials_build/preprocessing_batch`. 19 | 20 | 21 | **Source reconstruction**: 22 | 23 | - :doc:`tutorials_build/source-recon_coreg`. 24 | - :doc:`tutorials_build/source-recon_deleting-headshape-points`. 25 | - :doc:`tutorials_build/source-recon_subject`. 26 | - :doc:`tutorials_build/source-recon_batch`. 27 | 28 | 29 | **Statistics (General Linear Modelling)**: 30 | 31 | - :doc:`tutorials_build/glm_simulation`. 32 | - :doc:`tutorials_build/glm_sensor`. 33 | - :doc:`tutorials_build/source-recon_batch`. 34 | 35 | 36 | More examples scripts can be found in the `examples directory `_ of the repo. 37 | 38 | Workshops 39 | --------- 40 | 41 | - `2023 OHBA Software Library (OSL) workshop `_. 42 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | OSL Electrophysiological Data Analysis Toolbox 2 | ============================================== 3 | 4 | This package contains models for analysing electrophysiology data. It builds on top of the widely used MNE-Python package and contains unique analysis tools for M/EEG sensor and source space analysis. Specifically, it contains tools for: 5 | 6 | * Multi-call MaxFilter processing. 7 | * (pre-)processing of M/EEG data using a concise config API. 8 | * Batch parallel processing using Dask. 9 | * Coregistration and volumetric source reconstruction using FSL. 10 | * Quality assurance of M/EEG processing using HTML reports. 11 | * Statistical significant testing (using GLM permutation testing). 12 | * And much more! 13 | 14 | 15 | For more information on how to use osl-ephys see the :doc:`documentation `. 16 | 17 | This package was developed by the Oxford Centre for Human Brain Activity (OHBA) Methods Group at the University of Oxford. Our group website is `here `_. 18 | 19 | If you find this toolbox useful, please cite the following: 20 | 21 | 22 | **van Es, M. W., Gohil, C., Quinn, A. J., & Woolrich, M. W. (2025). osl-ephys: A Python toolbox for the analysis of electrophysiology data. Frontiers in Neuroscience, 19, 1522675.** 23 | 24 | 25 | **Quinn, A.J., Van Es, M.W.J., Gohil, C., & Woolrich, M.W. (2022). OHBA Software Library in Python (OSL) (0.1.1). Zenodo. https://doi.org/10.5281/zenodo.6875060** 26 | 27 | 28 | 29 | The package heavily builds on MNE-Python, and for the source recon module, on FSL. Please also cite these packages if you use them: 30 | 31 | **Gramfort, A., Luessi, M., Larson, E., Engemann, D.A., Strohmeier, D., Brodbeck, C., Goj, R., Jas, M., Brooks, T., Parkkonen, L., Hämäläinen, M.S. (2013). MEG and EEG data analysis with MNE-Python. Frontiers in Neuroscience, 7(267):1–13. doi:10.3389/fnins.2013.00267.** 32 | 33 | 34 | **S.M. Smith, M. Jenkinson, M.W. Woolrich, C.F. Beckmann, T.E.J. Behrens, H. Johansen-Berg, P.R. Bannister, M. De Luca, I. Drobnjak, D.E. Flitney, R. Niazy, J. Saunders, J. Vickers, Y. Zhang, N. De Stefano, J.M. Brady, and P.M. Matthews. Advances in functional and structural MR image analysis and implementation as FSL. NeuroImage, 23(S1):208-19, 2004** 35 | 36 | If you would like to request new features or if you're confident that you have found a bug, please create a new issue on the `GitHub issues `_ page. 37 | 38 | .. |logo1| image:: https://avatars.githubusercontent.com/u/15248840?s=200&v=4 39 | :height: 125px 40 | :target: https://www.win.ox.ac.uk/research/our-locations/OHBA 41 | 42 | .. |logo2| image:: https://www.win.ox.ac.uk/images/site-logos/oxcin_logo.png 43 | :height: 125px 44 | :target: https://www.win.ox.ac.uk/ 45 | 46 | |logo2| |logo1| 47 | 48 | ----------------------- 49 | 50 | Contents 51 | ======== 52 | 53 | .. toctree:: 54 | :maxdepth: 2 55 | 56 | Install 57 | Documentation 58 | FAQ 59 | API Reference 60 | 61 | 62 | Indices and tables 63 | ================== 64 | 65 | * :ref:`genindex` 66 | * :ref:`modindex` 67 | * :ref:`search` 68 | -------------------------------------------------------------------------------- /doc/source/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | A full installation of the osl-ephys toolbox includes: 5 | 6 | - `FSL `_ (FMRIB Software Library) - only needed if you want to do volumetric source reconstruction. 7 | - `FreeSurfer `_ (FreeSurfer) - only needed if you want to do surface-based source reconstruction. 8 | - `Miniforge `_ (or `Miniconda `_ / `Anaconda `_). 9 | - `osl-ephys `_ (OSL Ephys Toolbox). 10 | 11 | Instructions 12 | ------------ 13 | 14 | 1. Install FSL using the instructions `here `_. 15 | 16 | If you're using a Windows machine, you will need to install the above in `Ubuntu `_ using a Windows subsystem. Make sure to setup XLaunch for visualisations. 17 | 18 | 2. Install Freesurfer using the instructions `here `_. 19 | 20 | 3. Install Miniforge3 with:: 21 | 22 | wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" 23 | bash Miniforge3-$(uname)-$(uname -m).sh 24 | rm Miniforge3-$(uname)-$(uname -m).sh 25 | 26 | and install :code:`mamba` with:: 27 | 28 | conda install -n base -c conda-forge mamba 29 | 30 | Note, if you're using a Windows computer, you will need to do this in the WSL Ubuntu terminal that was used to install FSL (step 1). 31 | 32 | 4. Install osl-ephys:: 33 | 34 | curl https://raw.githubusercontent.com/OHBA-analysis/osl/main/envs/osle.yml > osle.yml 35 | mamba env create -f osle.yml 36 | rm osle.yml 37 | 38 | This will create a conda environment called :code:`osle`. 39 | 40 | Loading the packages 41 | -------------------- 42 | 43 | To use osl-ephys you need to activate the conda environment:: 44 | 45 | conda activate osle 46 | 47 | **You need to do every time you open a new terminal.** You know if the :code:`osle` environment is activated if it says :code:`(osle)[...]` at the start of your terminal command line. 48 | 49 | Note, if you get a :code:`conda init` error when activating the :code:`osle` environment during a job on an HPC cluster, you can resolve this by replacing:: 50 | 51 | conda activate osle 52 | 53 | with:: 54 | 55 | source activate osle 56 | 57 | Integrated Development Environments (IDEs) 58 | ------------------------------------------ 59 | 60 | The osl-ephys installation comes with `Jupyter Notebook `_. To open Jupyter Notebook use:: 61 | 62 | conda activate osl 63 | jupyter notebook 64 | 65 | Test the installation 66 | --------------------- 67 | 68 | The following should not raise any errors:: 69 | 70 | conda activate osle 71 | python 72 | >> import osl_ephys 73 | 74 | Get the latest source code (optional) 75 | ------------------------------------- 76 | 77 | If you want the very latest code you can clone the GitHub repo. This is only neccessary if you want recent changes to the package that haven't been released yet. 78 | 79 | First install osl-ephys using the instructions above. Then clone the repo and install locally from source:: 80 | 81 | conda activate osle 82 | 83 | git clone https://github.com/OHBA-analysis/osl-ephys.git 84 | cd osl-ephys 85 | pip install -e . 86 | cd .. 87 | 88 | After you install from source, you can run the code with local changes. You can update the source code using:: 89 | 90 | git pull 91 | 92 | within the :code:`osl-ephys` directory. 93 | 94 | Getting help 95 | ------------ 96 | 97 | If you run into problems while installing osl-ephys, please open an issue on the `GitHub repository `_. 98 | -------------------------------------------------------------------------------- /doc/source/tutorials/README.rst: -------------------------------------------------------------------------------- 1 | Welcome to the osl tutorials 2 | ============================ 3 | 4 | The following tutorials illustrate basic usage and analysis that can be done with osl. 5 | For the tutorials we will use the publicly available multimodal dataset (Wakeman & Henson, 2014). 6 | This dataset contains simultaneous MEG, EEG, (and fMRI) data from 19 subjects (6 sessions each) 7 | performing a simple visual task. Subject were watching pictures of famous, unfamiliar, and scrambled 8 | faces. They had to indicate whether the picture was symettrical or not. 9 | 10 | 11 | Preprocessing 12 | ============= -------------------------------------------------------------------------------- /doc/source/tutorials/glm_sensor_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Challenge 3 | ========= 4 | 5 | Which parts of the spectrum show a statistically significant group average for the first level effect of the VEOG channel? 6 | 7 | Let's run the permutations for the age effect: 8 | """ 9 | 10 | group_contrast = 0 # Group mean 11 | firstlevel_contrast = 3 # VEOG 12 | 13 | P = osl_ephys.glm.MaxStatPermuteGLMSpectrum(gglmsp, group_contrast, firstlevel_contrast, nperms=50, nprocesses=1) 14 | 15 | critical_value = P.perms.get_thresh(100 - 5) 16 | print(critical_value) 17 | 18 | plt.figure(figsize=(9, 9)) 19 | ax = plt.subplot(111) 20 | 21 | osl_ephys.glm.plot_sensor_spectrum(gglmsp.f, gglmsp.model.tstats[group_contrast, firstlevel_contrast, :, :].T, 22 | gglmsp.info, base=0.5, ax=ax, sensor_proj=True) 23 | 24 | xl = ax.get_xlim() 25 | 26 | critical_value = P.perms.get_thresh(100 - 5) 27 | ax.hlines(critical_value, xl[0], xl[1], 'k') 28 | ax.hlines(-critical_value, xl[0], xl[1], 'k') 29 | 30 | critical_value = P.perms.get_thresh(100 - 1) 31 | ax.hlines(critical_value, xl[0], xl[1], 'r') 32 | ax.hlines(-critical_value, xl[0], xl[1], 'r') 33 | 34 | ax.set_ylim(-20, 20) 35 | 36 | # There are very wide spread changes - the peak is in frontal sensors at frequencies below around 15Hz 37 | -------------------------------------------------------------------------------- /doc/source/tutorials/glm_simulation_solution.py: -------------------------------------------------------------------------------- 1 | """ 2 | Challenge 2 - SOLUTION! 3 | ======================= 4 | 5 | As we now have a regression model, we can compute the spectrum that the GLM-Spectrum would predict for different combinations of the predictor variables. 6 | 7 | This is a linear prediction with a classic standard form. For the specific model we have been using, the equation looks like this. 8 | 9 | spectrum = InterceptParameter * InterceptValue + LinearParameter * LinearValue 10 | 11 | We can see the predictor values by looking at the design matrix. 12 | """ 13 | 14 | print(glmsp.design.design_matrix) 15 | 16 | # The intercept value is always the same with a value of 1. This is as we expect as the intercept does not vary with the values of our other predictor variables. The Linear predictor values vary between around -1.64 and +1.64, these slightly odd values are what we get when we z-transform a straight line. 17 | # 18 | # We can see the regression parameter estimates in the fitted model, let's look specifically at the betas for 22Hz. Remember we have vectors of coefficents for every frequency. 19 | 20 | print(glmsp.model.betas) 21 | 22 | # Now the challenge, can you use the information above to write some code to plot the GLM model predicted spectrum for the start, middle and end of the data? 23 | 24 | # Solution - we start with the intercept (glmsp.model.betas[0, :]) and add the slope effect for time (glmsp.model.betas[1, :]) multiplied by some predictor value. 25 | # One tricky part here is that we have standardised our time predictor - as far as the GLM knows time starts at -1.64 and ends at +1.64 - we can't use the 'real' time values 26 | 27 | 28 | start_spec = glmsp.model.betas[0, :] + glmsp.model.betas[1, :] * -1.64 29 | middle_spec = glmsp.model.betas[0, :] + glmsp.model.betas[1, :] * 0 30 | end_spec = glmsp.model.betas[0, :] + glmsp.model.betas[1, :] * +1.64 31 | 32 | plt.figure() 33 | plt.plot(glmsp.f,start_spec) 34 | plt.plot(glmsp.f,middle_spec) 35 | plt.plot(glmsp.f,end_spec) 36 | plt.legend(['Start', 'Middle', 'End']) 37 | plt.title('GLM predicted spectra') 38 | -------------------------------------------------------------------------------- /envs/README.md: -------------------------------------------------------------------------------- 1 | # Conda Environments 2 | 3 | - `osle.yml`: for Linux or MacOS computers. 4 | - `hbaws.yml`: for Oxford OHBA workstation computers. 5 | - `bmrc.yml`: for the Oxford BMRC cluster. 6 | 7 | These can be install with: 8 | ``` 9 | git clone https://github.com/OHBA-analysis/osl-ephys.git 10 | cd osl-ephys 11 | conda env create -f envs/.yml 12 | conda activate osle 13 | pip install -e . 14 | ``` 15 | 16 | All environments come with Jupyter Notebook. 17 | -------------------------------------------------------------------------------- /envs/osle.yml: -------------------------------------------------------------------------------- 1 | name: osle 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - python=3.12 6 | - pip 7 | - vtk 8 | - pyvistaqt 9 | - pip: 10 | - jupyter 11 | - ipympl 12 | - ipywidgets 13 | - ipyevents 14 | - ipyvtklink 15 | - jupyter-client 16 | - numpy 17 | - scipy 18 | - matplotlib 19 | - mne 20 | - scikit-learn 21 | - fslpy 22 | - sails 23 | - tabulate 24 | - pyyaml 25 | - neurokit2 26 | - jinja2 27 | - glmtools 28 | - numba 29 | - nilearn 30 | - dask 31 | - distributed 32 | - parse 33 | - opencv-python 34 | - h5io 35 | - mat73 36 | - nibabel 37 | - pandas 38 | - panel 39 | - pqdm 40 | - seaborn 41 | - tqdm 42 | - osfclient 43 | - pyvista[jupyter] 44 | - osl-ephys 45 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | Example Scripts 2 | --------------- 3 | 4 | This directory contains generic scripts for preprocessing/source reconstructing MEG data collected with different scanners. 5 | 6 | For example scripts for various public M/EEG datasets, see https://github.com/OHBA-analysis/OHBA-Examples. 7 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_headshape_pos/1_preprocess.py: -------------------------------------------------------------------------------- 1 | """Preprocess CTF data. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from osl_ephys import preprocessing 8 | 9 | # Settings 10 | config = """ 11 | preproc: 12 | - filter: {l_freq: 1, h_freq: 125, method: iir, iir_params: {order: 5, ftype: butter}} 13 | - notch_filter: {freqs: 50 100} 14 | - resample: {sfreq: 250} 15 | - bad_segments: {segment_len: 500, picks: mag} 16 | - bad_segments: {segment_len: 500, picks: mag, mode: diff} 17 | - bad_channels: {picks: mag} 18 | - interpolate_bads: {} 19 | """ 20 | 21 | # Create a list of paths to files to preprocess 22 | inputs = [ 23 | "data/raw/Nottingham/sub-not001/meg/sub-not001_task-resteyesopen_meg.ds", 24 | "data/raw/Nottingham/sub-not002/meg/sub-not002_task-resteyesopen_meg.ds", 25 | ] 26 | 27 | # Subject IDs 28 | subjects = [ 29 | "sub-not001_task-resteyesopen", 30 | "sub-not002_task-resteyesopen", 31 | ] 32 | 33 | # Directory to save output to 34 | outdir = "data" 35 | 36 | # Do preprocessing 37 | preprocessing.run_proc_batch( 38 | config, 39 | inputs, 40 | subjects=subjects, 41 | outdir=outdir, 42 | overwrite=True, 43 | ) 44 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_headshape_pos/2_coregister.py: -------------------------------------------------------------------------------- 1 | """Coregistration and forward modelling. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | import numpy as np 8 | import pandas as pd 9 | 10 | from osl_ephys import source_recon, utils 11 | 12 | def save_polhemus_from_pos(outdir, subject): 13 | """Saves fiducials/headshape from a pos file.""" 14 | 15 | # Get path to pos file 16 | pos_file = f"data/raw/Nottingham/{subject}/meg/{subject}_headshape.pos" 17 | utils.logger.log_or_print(f"Saving polhemus info from {pos_file}") 18 | 19 | # Get coreg filenames 20 | filenames = source_recon.rhino.get_coreg_filenames(outdir, subject) 21 | 22 | # Load in txt file, these values are in cm in polhemus space: 23 | num_headshape_pnts = int(pd.read_csv(pos_file, header=None).to_numpy()[0]) 24 | data = pd.read_csv(pos_file, header=None, skiprows=[0], delim_whitespace=True) 25 | 26 | # RHINO is going to work with distances in mm 27 | # So convert to mm from cm, note that these are in polhemus space 28 | data.iloc[:, 1:4] = data.iloc[:, 1:4] * 10 29 | 30 | # Polhemus fiducial points in polhemus space 31 | polhemus_nasion = ( 32 | data[data.iloc[:, 0].str.match("nasion")] 33 | .iloc[0, 1:4].to_numpy().astype("float64").T 34 | ) 35 | polhemus_rpa = ( 36 | data[data.iloc[:, 0].str.match("right")] 37 | .iloc[0, 1:4].to_numpy().astype("float64").T 38 | ) 39 | polhemus_lpa = ( 40 | data[data.iloc[:, 0].str.match("left")] 41 | .iloc[0, 1:4].to_numpy().astype("float64").T 42 | ) 43 | 44 | # Polhemus headshape points in polhemus space in mm 45 | polhemus_headshape = ( 46 | data[0:num_headshape_pnts] 47 | .iloc[:, 1:4].to_numpy().astype("float64").T 48 | ) 49 | 50 | # Save 51 | np.savetxt(filenames["polhemus_nasion_file"], polhemus_nasion) 52 | np.savetxt(filenames["polhemus_rpa_file"], polhemus_rpa) 53 | np.savetxt(filenames["polhemus_lpa_file"], polhemus_lpa) 54 | np.savetxt(filenames["polhemus_headshape_file"], polhemus_headshape) 55 | 56 | def fix_headshape_points(outdir, subject, preproc_file, smri_file, epoch_file): 57 | """Remove headshape points on the nose and neck.""" 58 | 59 | # Load saved headshape and nasion files 60 | filenames = source_recon.rhino.get_coreg_filenames(outdir, subject) 61 | hs = np.loadtxt(filenames["polhemus_headshape_file"]) 62 | nas = np.loadtxt(filenames["polhemus_nasion_file"]) 63 | lpa = np.loadtxt(filenames["polhemus_lpa_file"]) 64 | rpa = np.loadtxt(filenames["polhemus_rpa_file"]) 65 | 66 | # Remove headshape points on the nose 67 | remove = np.logical_and(hs[1] > max(lpa[1], rpa[1]), hs[2] < nas[2]) 68 | hs = hs[:, ~remove] 69 | 70 | # Remove headshape points on the neck 71 | remove = hs[2] < min(lpa[2], rpa[2]) - 4 72 | hs = hs[:, ~remove] 73 | 74 | # Remove headshape points far from the head in any direction 75 | remove = np.logical_or( 76 | hs[0] < lpa[0] - 5, 77 | np.logical_or( 78 | hs[0] > rpa[0] + 5, 79 | hs[1] > nas[1] + 5, 80 | ), 81 | ) 82 | hs = hs[:, ~remove] 83 | 84 | # Overwrite headshape file 85 | utils.logger.log_or_print(f"overwritting {filenames['polhemus_headshape_file']}") 86 | np.savetxt(filenames["polhemus_headshape_file"], hs) 87 | 88 | # Settings 89 | config = """ 90 | source_recon: 91 | - save_polhemus_from_pos: {} 92 | - fix_headshape_points: {} 93 | - compute_surfaces: 94 | include_nose: False 95 | - coregister: 96 | use_nose: False 97 | use_headshape: True 98 | - forward_model: 99 | model: Single Layer 100 | """ 101 | 102 | # Subject IDs 103 | subjects = [ 104 | "sub-not001_task-resteyesopen", 105 | "sub-not002_task-resteyesopen", 106 | ] 107 | 108 | # Fif files containing the sensor-level preprocessed data for each subject 109 | preproc_files = [ 110 | "data/sub-not001_task-resteyesopen/sub-not001_task-resteyesopen_preproc-raw.fif", 111 | "data/sub-not002_task-resteyesopen/sub-not002_task-resteyesopen_preproc-raw.fif", 112 | ] 113 | 114 | # The corresponding structurals for each subject 115 | smri_files = [ 116 | "smri/sub-not001_T1w.nii.gz", 117 | "smri/sub-not002_T1w.nii.gz", 118 | ] 119 | 120 | # Directory to save output to 121 | outdir = "data" 122 | 123 | # Source reconstruction 124 | source_recon.run_src_batch( 125 | config, 126 | outdir=outdir, 127 | subjects=subjects, 128 | preproc_files=preproc_files, 129 | smri_files=smri_files, 130 | extra_funcs=[save_polhemus_from_pos, fix_headshape_points], 131 | ) 132 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_headshape_pos/3_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Source reconstruction using a LCMV beamformer and parcellation. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from osl_ephys import source_recon 8 | 9 | # Settings 10 | config = """ 11 | source_recon: 12 | - beamform_and_parcellate: 13 | freq_range: [1, 45] 14 | chantypes: mag 15 | rank: {mag: 120} 16 | parcellation_file: aal_cortical_merged_8mm_stacked.nii.gz 17 | method: spatial_basis 18 | orthogonalisation: symmetric 19 | """ 20 | 21 | # Subject IDs 22 | subjects = [ 23 | "sub-not001_task-resteyesopen", 24 | "sub-not002_task-resteyesopen", 25 | ] 26 | 27 | # Fif files containing the sensor-level preprocessed data for each subject 28 | preproc_files = [ 29 | "data/sub-not001_task-resteyesopen/sub-not001_task-resteyesopen_preproc-raw.fif", 30 | "data/sub-not002_task-resteyesopen/sub-not002_task-resteyesopen_preproc-raw.fif", 31 | ] 32 | 33 | # Directory to save output to 34 | outdir = "data" 35 | 36 | # Source reconstruction 37 | source_recon.run_src_batch( 38 | config, 39 | outdir=outdir, 40 | subjects=subjects, 41 | preproc_files=preproc_files, 42 | ) 43 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_headshape_pos/4_sign_flip.py: -------------------------------------------------------------------------------- 1 | """Sign flipping. 2 | 3 | Note, this script is only needed if you're training a dynamic network 4 | model (e.g. the HMM) using the time-delay embedded (TDE) approach. 5 | 6 | You can skip this if you're training the HMM on amplitude envelope data 7 | or calculating sign-invariant quantities such as amplitude envelope 8 | correlations or power. 9 | """ 10 | 11 | # Authors: Chetan Gohil 12 | 13 | from osl_ephys import source_recon 14 | 15 | # Source directory and subjects to sign flip 16 | outdir = "data" 17 | subjects = [ 18 | "sub-not001_task-resteyesopen", 19 | "sub-not002_task-resteyesopen", 20 | ] 21 | 22 | # Find a good template subject to align other subjects to 23 | template = source_recon.find_template_subject( 24 | outdir, subjects, n_embeddings=15, standardize=True 25 | ) 26 | 27 | # Settings 28 | config = f""" 29 | source_recon: 30 | - fix_sign_ambiguity: 31 | template: {template} 32 | n_embeddings: 15 33 | standardize: True 34 | n_init: 3 35 | n_iter: 2500 36 | max_flips: 20 37 | """ 38 | 39 | # Do the sign flipping 40 | source_recon.run_src_batch(config, outdir, subjects) 41 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_headshape_pos/5_save_npy.py: -------------------------------------------------------------------------------- 1 | """Save sign flipped parcel data as numpy files. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from glob import glob 8 | 9 | from osl_dynamics.data import Data 10 | 11 | files = sorted(glob("data/*/*_sflip_parc-raw.fif")) 12 | data = Data( 13 | files, 14 | picks="misc", 15 | reject_by_annotation="omit", 16 | n_jobs=4, 17 | ) 18 | data.save("data/npy") 19 | data.delete_dir() 20 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_headshape_pos/README.md: -------------------------------------------------------------------------------- 1 | # CTF Example 2 | 3 | In this example we use headshape points to coregister the MEG and sMRI data. 4 | 5 | Full pipeline: 6 | 7 | - `1_preprocess.py`: Preprocess the sensor-level data. This includes standard signal processing such as downsampling and filtering as well as artefact removal. 8 | - `2_coregister.py`: Coregister the MEG and sMRI data and calculate the forward model. 9 | - `3_source_reconstruct.py`: Beamform the sensor-level data and parcellate to give us the source-level data. We use the AAL parcellation. 10 | - `4_sign_flip.py`: Fix the dipole sign ambiguity (we align the sign of the parcel time courses across subjects). This is only needed if we're training a group-level model on time-delay emebdded data. 11 | - `5_save_npy.py`: Save the source data as vanilla numpy files in (time, parcels) format. 12 | 13 | See [here](https://github.com/OHBA-analysis/osl/tree/main/examples/parallelisation) for how to parallelise these scripts. 14 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_smri_fid/1_preprocess.py: -------------------------------------------------------------------------------- 1 | """Preprocessing. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from osl_ephys import preprocessing 8 | 9 | # Settings 10 | config = """ 11 | preproc: 12 | - filter: {l_freq: 1, h_freq: 125, method: iir, iir_params: {order: 5, ftype: butter}} 13 | - notch_filter: {freqs: 50 100} 14 | - resample: {sfreq: 250} 15 | """ 16 | 17 | # Create a list of paths to files to preprocess 18 | inputs = ["data/raw/mg04938_BrainampDBS_20170504_01_raw.fif"] 19 | 20 | # Subject IDs 21 | subjects = ["LN_VTA2"] 22 | 23 | # Directory to save output to 24 | outdir = "data" 25 | 26 | # Do preprocessing 27 | preprocessing.run_proc_batch( 28 | config, 29 | inputs, 30 | subjects=subjects, 31 | outdir=outdir, 32 | overwrite=True, 33 | ) 34 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_smri_fid/2_coregister.py: -------------------------------------------------------------------------------- 1 | """Coregistration. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | import numpy as np 8 | 9 | from osl_ephys import source_recon, utils 10 | 11 | 12 | config = """ 13 | source_recon: 14 | - extract_polhemus_from_info: {} 15 | - save_mni_fiducials: 16 | filepath: data/fiducials/{subject}_smri_fid.txt 17 | - compute_surfaces: 18 | include_nose: False 19 | - coregister: 20 | use_nose: False 21 | use_headshape: False 22 | """ 23 | 24 | # List of subject IDs 25 | subjects = ["LN_VTA2"] 26 | 27 | # Lists for input files 28 | preproc_files = ["data/LN_VTA2/mg04938_BrainampDBS_20170504_01_preproc-raw.fif"] 29 | smri_files = ["smri/LN_VTA2.nii"] 30 | 31 | # Output directory 32 | outdir = "data" 33 | 34 | # Do coregistration 35 | source_recon.run_src_batch( 36 | config, 37 | outdir=outdir, 38 | subjects=subjects, 39 | preproc_files=preproc_files, 40 | smri_files=smri_files, 41 | ) 42 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_smri_fid/3_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Source reconstruction using a LCMV beamformer and parcellation. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from osl_ephys import source_recon 8 | 9 | # Settings 10 | config = """ 11 | source_recon: 12 | - forward_model: 13 | model: Single Layer 14 | - beamform_and_parcellate: 15 | freq_range: [1, 80] 16 | chantypes: mag 17 | rank: {mag: 120} 18 | parcellation_file: Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz 19 | method: spatial_basis 20 | orthogonalisation: symmetric 21 | """ 22 | 23 | # List of subject IDs 24 | subjects = ["LN_VTA2"] 25 | 26 | # Fif files containing the sensor-level preprocessed data for each subject 27 | preproc_files = ["data/LN_VTA2/mg04938_BrainampDBS_20170504_01_preproc-raw.fif"] 28 | 29 | # Directories 30 | outdir = "data" 31 | 32 | # Source reconstruction 33 | source_recon.run_src_batch( 34 | config, 35 | outdir=outdir, 36 | subjects=subjects, 37 | preproc_files=preproc_files, 38 | ) 39 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_smri_fid/4_sign_flip.py: -------------------------------------------------------------------------------- 1 | """Align the sign of each parcel time course across subjects 2 | and save the data as a vanilla numpy file. 3 | 4 | """ 5 | 6 | # Authors: Chetan Gohil 7 | 8 | import os 9 | import mne 10 | import numpy as np 11 | from glob import glob 12 | 13 | from osl_ephys.source_recon.sign_flipping import ( 14 | load_covariances, 15 | find_template_subject, 16 | find_flips, 17 | apply_flips, 18 | ) 19 | 20 | def load(filename): 21 | """Load data without bad segments.""" 22 | raw = mne.io.read_raw_fif(filename, verbose=False) 23 | raw = raw.pick("misc") 24 | data = raw.get_data(reject_by_annotation="omit", verbose=False) 25 | return data.T 26 | 27 | 28 | # Files to sign flip 29 | files = sorted(glob("data/*/parc/parc-raw.fif")) 30 | 31 | # Get covariance matrices 32 | covs = load_covariances( 33 | files, 34 | n_embeddings=15, 35 | standardize=True, 36 | loader=load, 37 | ) 38 | 39 | # Load template covariance 40 | template_cov = np.load("../camcan_norm_model/template_cov.npy") 41 | 42 | # Output directory 43 | os.makedirs("data/npy", exist_ok=True) 44 | 45 | # Do sign flipping 46 | for i in range(len(files)): 47 | print("Sign flipping", files[i]) 48 | 49 | # Find the channels to flip 50 | flips, metrics = find_flips( 51 | covs[i], 52 | template_cov, 53 | n_embeddings=15, 54 | n_init=3, 55 | n_iter=2500, 56 | max_flips=20, 57 | ) 58 | 59 | # Apply flips to the parcellated data and save 60 | parc_data = load(files[i]) 61 | parc_data *= flips[np.newaxis, ...] 62 | subject = files[i].split("/")[-3] 63 | np.save(f"data/npy/{subject}.npy", parc_data) 64 | -------------------------------------------------------------------------------- /examples/ctf/ctf_with_smri_fid/README.md: -------------------------------------------------------------------------------- 1 | # CTF Example 2 | 3 | In this example we coregister using sMRI fiducials. 4 | 5 | ## Pipeline 6 | 7 | In this example we: 8 | 9 | - `1_preprocess.py`: Preprocess the sensor-level data. 10 | - `2_coregister.py`: Coregister the MEG and sMRI data. 11 | - `3_source_reconstruct.py`: Beamform the sensor-level data and parcellate to give us the source-level data. 12 | - `4_sign_flip.py`: Align the sign of each parcel time course to a template subject from the normative model. 13 | 14 | ## Parallelisation 15 | 16 | See [here](https://github.com/OHBA-analysis/osl/tree/main/examples/parallelisation) for how to parallelise these scripts. 17 | -------------------------------------------------------------------------------- /examples/eeg/1_preprocess.py: -------------------------------------------------------------------------------- 1 | """Preprocessing. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | import mne 8 | import numpy as np 9 | from osl_ephys import preprocessing 10 | 11 | config = """ 12 | preproc: 13 | - resample: {sfreq: 250} 14 | - bad_segments: {segment_len: 500, picks: eeg} 15 | - bad_segments: {segment_len: 500, picks: eeg, mode: diff} 16 | - set_eeg_reference: {projection: true} 17 | """ 18 | 19 | preprocessing.run_proc_chain( 20 | config, 21 | infile="data/sub-001_clean-raw.fif", 22 | subject="sub-001", 23 | outdir="output", 24 | overwrite=True, 25 | ) 26 | -------------------------------------------------------------------------------- /examples/eeg/2_coregister.py: -------------------------------------------------------------------------------- 1 | """Coregisteration. 2 | 3 | Note, this script does not actually move the EEG electrodes in the fif file. 4 | 5 | This script should be used to find the best rescaling for the EEG sensor 6 | positions to match the structural. 7 | 8 | The next script, 3_move_sensors.py, moves the sensors. 9 | """ 10 | 11 | # Authors: Chetan Gohil 12 | 13 | from osl_ephys import source_recon 14 | 15 | config = """ 16 | source_recon: 17 | - extract_polhemus_from_info: 18 | include_eeg_as_headshape: True 19 | rescale: [0.9, 1, 1] 20 | - compute_surfaces: 21 | include_nose: False 22 | - coregister: 23 | use_nose: False 24 | use_headshape: True 25 | """ 26 | 27 | source_recon.run_src_chain( 28 | config, 29 | smri_file="data/sub-001_T1w.nii.gz", 30 | subject="sub-001", 31 | outdir="output", 32 | ) 33 | -------------------------------------------------------------------------------- /examples/eeg/3_move_sensors.py: -------------------------------------------------------------------------------- 1 | """Move EEG sensors and re-do coregistration. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from osl_ephys import source_recon 8 | 9 | config = """ 10 | source_recon: 11 | - rescale_sensor_positions: 12 | rescale: [0.9, 1, 1] 13 | 14 | # Redo coregistration 15 | - extract_polhemus_from_info: 16 | include_eeg_as_headshape: True 17 | - coregister: 18 | use_nose: False 19 | use_headshape: True 20 | """ 21 | 22 | source_recon.run_src_chain( 23 | config, 24 | smri_file="data/sub-001_T1w.nii.gz", 25 | subject="sub-001", 26 | outdir="output", 27 | ) 28 | -------------------------------------------------------------------------------- /examples/eeg/4_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Source reconstruction and parcellation. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from osl_ephys import source_recon 8 | 9 | config = """ 10 | source_recon: 11 | - forward_model: 12 | model: Triple Layer 13 | eeg: True 14 | - beamform_and_parcellate: 15 | freq_range: [1, 40] 16 | chantypes: eeg 17 | rank: {eeg: 40} 18 | parcellation_file: fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz 19 | method: spatial_basis 20 | orthogonalisation: symmetric 21 | """ 22 | 23 | source_recon.run_src_chain( 24 | config, 25 | subject="sub-001", 26 | outdir="output", 27 | ) 28 | -------------------------------------------------------------------------------- /examples/eeg/5_sign_flip.py: -------------------------------------------------------------------------------- 1 | """Performs sign flipping. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from glob import glob 8 | from dask.distributed import Client 9 | 10 | from osl_ephys import utils 11 | from osl_ephys.source_recon import find_template_subject, run_src_batch 12 | 13 | outdir = "output" 14 | 15 | if __name__ == "__main__": 16 | utils.logger.set_up(level="INFO") 17 | client = Client(n_workers=4, threads_per_worker=1) 18 | 19 | subjects = [] 20 | for path in sorted(glob(f"{outdir}/*/parc/lcmv-parc-raw.fif")): 21 | subject = path.split("/")[-3] 22 | subjects.append(subject) 23 | 24 | template = find_template_subject( 25 | outdir, subjects, n_embeddings=15, standardize=True 26 | ) 27 | 28 | config = f""" 29 | source_recon: 30 | - fix_sign_ambiguity: 31 | template: {template} 32 | n_embeddings: 15 33 | standardize: True 34 | n_init: 3 35 | n_iter: 2500 36 | max_flips: 20 37 | """ 38 | 39 | run_src_batch( 40 | config, 41 | outdir=outdir, 42 | subjects=subjects, 43 | dask_client=True, 44 | ) 45 | -------------------------------------------------------------------------------- /examples/eeg/README.md: -------------------------------------------------------------------------------- 1 | EEG Example 2 | ----------- 3 | 4 | Here, we source reconstruct EEG data when we have a structural but don't have electrode positions. 5 | -------------------------------------------------------------------------------- /examples/elekta/1_maxfilter.py: -------------------------------------------------------------------------------- 1 | """Example script for maxfiltering raw data recorded at Oxford using the new scanner. 2 | 3 | Note: this script needs to be run on a computer with a MaxFilter license. 4 | """ 5 | 6 | # Authors: Chetan Gohil 7 | 8 | from osl_ephys.maxfilter import run_maxfilter_batch 9 | 10 | # Setup paths to raw (pre-maxfiltered) fif files 11 | input_files = [ 12 | "raw/file1.fif", 13 | "raw/file2.fif", 14 | ] 15 | 16 | # Directory to save the maxfiltered data to 17 | output_directory = "maxfilter" 18 | 19 | # Run MaxFiltering 20 | # 21 | # Note: 22 | # - We don't use the -trans option because it affects the placement of the head during coregistration. 23 | # - See the /maxfilter directory for further info. 24 | run_maxfilter_batch( 25 | input_files, 26 | output_directory, 27 | "--scanner Neo --mode multistage --tsss --headpos --movecomp", 28 | ) 29 | -------------------------------------------------------------------------------- /examples/elekta/2_preprocess.py: -------------------------------------------------------------------------------- 1 | """Preprocessing. 2 | 3 | Note, we preprocess multiple subjects in parallel to speed things up. 4 | """ 5 | 6 | # Authors: Chetan Gohil 7 | 8 | from dask.distributed import Client 9 | 10 | from osl_ephys import preprocessing, utils 11 | 12 | # Files and directories 13 | raw_file = "maxfilter/{subject}_tsss.fif" # {subject} will be replace by the name for the subject 14 | outdir = "data" 15 | 16 | subjects = ["sub-001", "sub-002"] 17 | 18 | # Settings 19 | config = """ 20 | preproc: 21 | - filter: {l_freq: 0.5, h_freq: 125, method: iir, iir_params: {order: 5, ftype: butter}} 22 | - notch_filter: {freqs: 50 100} 23 | - resample: {sfreq: 250} 24 | - bad_segments: {segment_len: 500, picks: mag} 25 | - bad_segments: {segment_len: 500, picks: grad} 26 | - bad_segments: {segment_len: 500, picks: mag, mode: diff} 27 | - bad_segments: {segment_len: 500, picks: grad, mode: diff} 28 | - bad_channels: {picks: mag} 29 | - bad_channels: {picks: grad} 30 | - ica_raw: {picks: meg, n_components: 40} 31 | - ica_autoreject: {picks: meg, ecgmethod: correlation, eogthreshold: auto} 32 | - interpolate_bads: {} 33 | """ 34 | 35 | if __name__ == "__main__": 36 | utils.logger.set_up(level="INFO") 37 | 38 | # Get input files 39 | inputs = [] 40 | for subject in subjects: 41 | inputs.append(raw_file.format(subject=subject)) 42 | 43 | # Setup parallel processing 44 | # 45 | # n_workers is the number of CPUs to use, 46 | # we recommend less than half the total number of CPUs you have 47 | client = Client(n_workers=4, threads_per_worker=1) 48 | 49 | # Main preprocessing 50 | preprocessing.run_proc_batch( 51 | config, 52 | inputs, 53 | subjects=subjects, 54 | outdir=outdir, 55 | overwrite=True, 56 | dask_client=True, 57 | ) 58 | -------------------------------------------------------------------------------- /examples/elekta/3_coregister.py: -------------------------------------------------------------------------------- 1 | """Coregisteration. 2 | 3 | The scripts was first run for all subjects (with n_init=1). Then for subjects 4 | whose coregistration looked a bit off we re-run this script just for that 5 | particular subject with a higher n_init. 6 | 7 | Note, these scripts do not include/use the nose in the coregistration. 8 | If you want to use the nose you need to change the config to include the nose 9 | and you may not want to call the remove_stray_headshape_points function. 10 | """ 11 | 12 | # Authors: Chetan Gohil 13 | 14 | import numpy as np 15 | from dask.distributed import Client 16 | 17 | from osl_ephys import source_recon, utils 18 | 19 | # Directories 20 | outdir = "data" 21 | anatdir = "smri" 22 | 23 | # Files ({subject} will be replaced by the name for the subject) 24 | preproc_file = outdir + "{subject}/{subject}_tsss_preproc-raw.fif" 25 | smri_file = anatdir + "/{subject}/anat/{subject}_T1w.nii" 26 | 27 | # Subjects to coregister 28 | subjects = ["sub-001", "sub-002"] 29 | 30 | # Settings 31 | config = """ 32 | source_recon: 33 | - extract_polhemus_from_info: {} 34 | - remove_stray_headshape_points: {} 35 | - compute_surfaces: 36 | include_nose: False 37 | - coregister: 38 | use_nose: False 39 | use_headshape: True 40 | #n_init: 50 41 | """ 42 | 43 | if __name__ == "__main__": 44 | utils.logger.set_up(level="INFO") 45 | 46 | # Setup files 47 | preproc_files = [] 48 | smri_files = [] 49 | for subject in subjects: 50 | preproc_files.append(preproc_file.format(subject=subject)) 51 | smri_files.append(smri_file.format(subject=subject)) 52 | 53 | # Setup parallel processing 54 | # 55 | # n_workers is the number of CPUs to use, 56 | # we recommend less than half the total number of CPUs you have 57 | client = Client(n_workers=4, threads_per_worker=1) 58 | 59 | # Run coregistration 60 | source_recon.run_src_batch( 61 | config, 62 | outdir=outdir, 63 | subjects=subjects, 64 | preproc_files=preproc_files, 65 | smri_files=smri_files, 66 | dask_client=True, 67 | ) 68 | -------------------------------------------------------------------------------- /examples/elekta/4_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Source reconstruction. 2 | 3 | This includes beamforming, parcellation and orthogonalisation. 4 | 5 | Note, before this script is run the /coreg directory created by 3_coregister.py 6 | must be copied and renamed to /src. 7 | """ 8 | 9 | # Authors: Chetan Gohil 10 | 11 | from dask.distributed import Client 12 | 13 | from osl_ephys import source_recon, utils 14 | 15 | # Directories 16 | outdir = "data" 17 | 18 | # Files 19 | preproc_file = outdir + "/{subject}_tsss_preproc-raw.fif" # {subject} will be replaced by the subject name 20 | 21 | # Subjects to do 22 | subjects = ["sub-001", "sub-002"] 23 | 24 | # Settings 25 | config = """ 26 | source_recon: 27 | - forward_model: 28 | model: Single Layer 29 | - beamform_and_parcellate: 30 | freq_range: [1, 45] 31 | chantypes: [mag, grad] 32 | rank: {meg: 60} 33 | parcellation_file: Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz 34 | method: spatial_basis 35 | orthogonalisation: symmetric 36 | """ 37 | 38 | if __name__ == "__main__": 39 | utils.logger.set_up(level="INFO") 40 | 41 | # Get paths to files 42 | preproc_files = [] 43 | for subject in subjects: 44 | preproc_files.append(preproc_file.format(subject=subject)) 45 | 46 | # Setup parallel processing 47 | # 48 | # n_workers is the number of CPUs to use, 49 | # we recommend less than half the total number of CPUs you have 50 | client = Client(n_workers=4, threads_per_worker=1) 51 | 52 | # Source reconstruction 53 | source_recon.run_src_batch( 54 | config, 55 | outdir=outdir, 56 | subjects=subjects, 57 | preproc_files=preproc_files, 58 | dask_client=True, 59 | ) 60 | -------------------------------------------------------------------------------- /examples/elekta/5_sign_flip.py: -------------------------------------------------------------------------------- 1 | """Performs sign flipping. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from glob import glob 8 | from dask.distributed import Client 9 | 10 | from osl_ephys import utils 11 | from osl_ephys.source_recon import find_template_subject, run_src_batch 12 | 13 | # Directories 14 | outdir = "data" 15 | 16 | if __name__ == "__main__": 17 | utils.logger.set_up(level="INFO") 18 | 19 | # Subjects to sign flip 20 | # We create a list by looking for subjects that have a parc/parc-raw.fif file 21 | subjects = [] 22 | for path in sorted(glob(f"{outdir}/*/parc/parc-raw.fif")): 23 | subject = path.split("/")[-3] 24 | subjects.append(subject) 25 | 26 | # Find a good template subject to align other subjects to 27 | template = find_template_subject( 28 | outdir, subjects, n_embeddings=15, standardize=True 29 | ) 30 | 31 | # Settings for batch processing 32 | config = f""" 33 | source_recon: 34 | - fix_sign_ambiguity: 35 | template: {template} 36 | n_embeddings: 15 37 | standardize: True 38 | n_init: 3 39 | n_iter: 2500 40 | max_flips: 20 41 | """ 42 | 43 | # Setup parallel processing 44 | # 45 | # n_workers is the number of CPUs to use, 46 | # we recommend less than half the total number of CPUs you have 47 | client = Client(n_workers=4, threads_per_worker=1) 48 | 49 | # Do the sign flipping 50 | run_src_batch( 51 | config, 52 | outdir=outdir, 53 | subjects=subjects, 54 | dask_client=True, 55 | ) 56 | -------------------------------------------------------------------------------- /examples/elekta/README.md: -------------------------------------------------------------------------------- 1 | Elekta Example 2 | -------------- 3 | 4 | To run these scripts you need: 5 | 6 | 1. The raw `.fif` file containing the recording for each session. Note, you must have an EOG and ECG channel. 7 | 8 | 2. A structural MRI for each subject. 9 | 10 | The steps are: 11 | 12 | 1. **MaxFilter** (`1_maxfilter.py`). This script MaxFilters the raw recordings. 13 | 14 | 2. **Preprocess** (`2_preprocess.py`). This script filters, downsamples, and uses automated algorithms to detect bad segments/channels and uses ICA to remove eye blinks. Note, the automated ICA artefact removal might not always work very well, so it's often worthwhile to check the preprocessing. 15 | 16 | 3. **Coregistration** (`3_coregister.py`). This script aligns the sMRI and MEG space (using the polhemus head space). Here, we advise you check the head has been placed in a plausible location in the MEG scanner. You can do this by looking at the coregistration panel in the report (`coreg/report/subjects_report.html` and `summary_report.html`). You may need to re-run this script with different settings (e.g. by increasing `n_init`) to fix poorly aligned subjects. 17 | 18 | 4. **Source reconstruct** (`4_source_reconstruct.py`). This script calculates the forward model, beamforms and parcellates the data. The parcellated data files can be found in `src/{subject}/parc/parc-raw.fif`. 19 | 20 | 5. **Dipole sign flipping** (`5_sign_flip.py`). This script tries to minimise the effect of the sign of each parcel time course being misaligned between subjects when fitting group-level models, such as a Hidden Markov Model. This step maybe skipped if you're not interested in fitting group models. 21 | 22 | The final data can be found in `src/{subject}/sflip_parc-raw.fif`. 23 | -------------------------------------------------------------------------------- /examples/misc/delete_headshape_points.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Typically, you would do this function after running source_recon.rhino.coreg 3 | (either directly, or via the batch API), and after diagnosing a bad coreg 4 | (again, either directly using source_recon.rhino.coreg_display , or via the html 5 | report generated by using the batch API). 6 | ''' 7 | 8 | ## Download files 9 | !pip install osfclient 10 | 11 | import os 12 | import os.path as op 13 | from osl_ephys import utils 14 | from osl_ephys.source_recon.rhino import polhemus 15 | from osl_ephys import source_recon 16 | import numpy as np 17 | 18 | ''' 19 | To put ourselves in this situation we will first download the appropriate data 20 | and copy the headshape points to the appropriate paths in the assumed RHINO 21 | directory structure: 22 | ''' 23 | 24 | def get_data(name): 25 | print('Data will be in directory {}'.format(os.getcwd())) 26 | """Download a dataset from OSF.""" 27 | if os.path.exists(f"{name}"): 28 | return f"{name} already downloaded. Skipping.." 29 | os.system(f"osf -p zxb6c fetch SourceRecon/data/{name}.zip") 30 | os.system(f"unzip -o {name}.zip") 31 | os.remove(f"{name}.zip") 32 | return f"Data downloaded to: {name}" 33 | 34 | # Download the dataset 35 | get_data("notts_2subjects") 36 | 37 | ## Setup file names 38 | data_dir = './notts_2subjects' 39 | recon_dir = './notts_2subjects/recon' 40 | 41 | subject = '{subject}' 42 | fif_files_path = op.join(data_dir, subject, subject + '_task-resteyesopen_meg_preproc_raw.fif') 43 | fif_files = utils.Study(fif_files_path) 44 | subjects = fif_files.fields['subject'] 45 | fif_files = fif_files.get() 46 | 47 | ## Copy polhemus files 48 | def copy_polhemus_files(recon_dir, subject, preproc_file, smri_file, logger): 49 | polhemus_headshape = np.loadtxt(op.join(data_dir, subject, 'polhemus/polhemus_headshape.txt')) 50 | polhemus_nasion = np.loadtxt(op.join(data_dir, subject, 'polhemus/polhemus_nasion.txt')) 51 | polhemus_rpa = np.loadtxt(op.join(data_dir, subject, 'polhemus/polhemus_rpa.txt')) 52 | polhemus_lpa = np.loadtxt(op.join(data_dir, subject, 'polhemus/polhemus_lpa.txt')) 53 | 54 | # Get coreg filenames 55 | filenames = source_recon.rhino.get_coreg_filenames(recon_dir, subject) 56 | 57 | # Save 58 | np.savetxt(filenames["polhemus_nasion_file"], polhemus_nasion) 59 | np.savetxt(filenames["polhemus_rpa_file"], polhemus_rpa) 60 | np.savetxt(filenames["polhemus_lpa_file"], polhemus_lpa) 61 | np.savetxt(filenames["polhemus_headshape_file"], polhemus_headshape) 62 | 63 | copy_polhemus_files(recon_dir, subjects[0], [], [], []) 64 | 65 | ''' 66 | We can now call the delete_headshape_points function we have defined above. 67 | Note that we can call this in two different ways, either: 68 | 69 | 1) Specify the subjects_dir AND the subject directory, in the directory structure used by RHINO: delete_headshape_points(recon_dir=recon_dir, subject=subject) 70 | 2) Specify the full path to the .npy file containing the (3 x num_headshapepoints) numpy array of headshape points: delete_headshape_points(polhemus_headshape_file=polhemus_headshape_file) 71 | 72 | Here, we want to use the first option. Let's now call the function we defined above: 73 | ''' 74 | 75 | polhemus.delete_headshape_points(recon_dir, subjects[0]) -------------------------------------------------------------------------------- /examples/misc/fix_smri_files.py: -------------------------------------------------------------------------------- 1 | """Fix structural MRI (sMRI) files. 2 | 3 | Replaces the sform with a standard sform. 4 | 5 | Note, this script may not be needed. Only use this script if 6 | OSL raises an error regarding the sform code of the sMRIs. 7 | """ 8 | 9 | # Authors: Chetan Gohil 10 | 11 | import os 12 | from pathlib import Path 13 | from shutil import copyfile 14 | 15 | import numpy as np 16 | import nibabel as nib 17 | 18 | from osl_ephys import source_recon 19 | 20 | # List of sMRI files we need to fix 21 | smri_files = [ 22 | "sub-001_T1w.nii.gz", 23 | "sub-002_T1w.nii.gz", 24 | ] 25 | 26 | # Directory to save fixed sMRIs to 27 | fixed_smri_dir = "data/smri" 28 | os.makedirs(fixed_smri_dir, exist_ok=True) 29 | 30 | # Loop through the sMRIs 31 | for input_smri_file in smri_files: 32 | 33 | # Copy the original sMRI file to the output directory 34 | input_name = Path(input_smri_file).name 35 | output_smri_file = f"{fixed_smri_dir}/{input_name}" 36 | print("Saving output to:", output_smri_file) 37 | copyfile(input_smri_file, output_smri_file) 38 | 39 | # Load the output SMRI file 40 | smri = nib.load(output_smri_file) 41 | 42 | # Get the original sform header 43 | sform = smri.header.get_sform() 44 | sform_std = np.copy(sform) 45 | 46 | # Fix the sform header 47 | sform_std[0, 0:4] = [-1, 0, 0, 128] 48 | sform_std[1, 0:4] = [0, 1, 0, -128] 49 | sform_std[2, 0:4] = [0, 0, 1, -90] 50 | source_recon.rhino.utils.system_call( 51 | "fslorient -setsform {} {}".format( 52 | " ".join(map(str, sform_std.flatten())), 53 | output_smri_file, 54 | ) 55 | ) 56 | 57 | -------------------------------------------------------------------------------- /examples/misc/freesurfer_source_recon.py: -------------------------------------------------------------------------------- 1 | # Authors: Mats van Es 2 | 3 | from osl_ephys import source_recon 4 | source_recon.setup_freesurfer('/Applications/freesurfer/7.4.1') 5 | 6 | recon_dir = '/Users/matsvanes/osl-dev/output' 7 | subject = 'sub-oxf001' 8 | preproc_file = '/Users/matsvanes/osl-dev/output/sub-oxf001_task-resteyesopen/sub-oxf001_task-resteyesopen_preproc-raw.fif' 9 | smri_file='/Users/matsvanes/osl-dev/smri/sub-oxf001_T1w.nii.gz' 10 | 11 | subjects = [subject] 12 | smri_files = [smri_file] 13 | preproc_files = [preproc_file] 14 | 15 | # Run FreeSurfer recon-all before running the pipeline 16 | for subject, smri_file in zip(subjects, smri_files): 17 | source_recon.recon_all(smri_file, recon_dir, subject) 18 | 19 | config = """ 20 | source_recon: 21 | - make_watershed_bem: {} 22 | - coregister: 23 | nasion_weight: 2.0 24 | - forward_model: 25 | forward_model: Single Layer 26 | source_space: surface 27 | kwargs: {ico: 4} 28 | gridstep: 8 29 | - minimum_norm_and_parcellate: 30 | source_space: surface 31 | source_method: eLORETA 32 | chantypes: [mag, grad] 33 | rank: {meg: 20} 34 | depth: 0.8 35 | loose: 0.2 36 | reg: 0.1 37 | pick_ori: None 38 | parcellation_file: Yeo2011_7Networks_N1000 39 | reference_brain: fsaverage 40 | method: pca_flip 41 | orthogonalisation: symmetric 42 | """ 43 | 44 | source_recon.run_src_batch( 45 | config, 46 | outdir=recon_dir, 47 | subjects=[subject], 48 | preproc_files=preproc_files, 49 | smri_file=[smri_file], 50 | surface_extraction_method='freesurfer' 51 | ) -------------------------------------------------------------------------------- /examples/misc/sign_flipping_matlab_files.py: -------------------------------------------------------------------------------- 1 | """Standalone script for peforming sign flipping with MATLAB data. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | import numpy as np 8 | from scipy import io 9 | 10 | from osl_ephys.source_recon.sign_flipping import ( 11 | load_covariances, 12 | find_template_subject, 13 | find_flips, 14 | apply_flips, 15 | ) 16 | 17 | SRC_DIR = "/ohba/pi/mwoolrich/cgohil/uk_meg_notts/bmrc_data" 18 | 19 | N_EMBEDDINGS = 15 20 | STANDARDIZE = True 21 | N_INIT = 2 22 | N_ITER = 2500 23 | MAX_FLIPS = 20 24 | 25 | # Input data 26 | subject_files = [] 27 | for i in range(1, 11): 28 | subject_files.append(SRC_DIR + f"/subject{i}.mat") 29 | 30 | 31 | def load_matlab(filename): 32 | """Function to load data files.""" 33 | data = io.loadmat(filename) 34 | return data["X"] 35 | 36 | 37 | def save_matlab(filename, X): 38 | """Function to save data files.""" 39 | T = X.shape[0] 40 | io.savemat(filename, {"X": X, "T": T}) 41 | 42 | 43 | # Get covariance matrices 44 | covs = load_covariances( 45 | subject_files, 46 | N_EMBEDDINGS, 47 | STANDARDIZE, 48 | loader=load_matlab, 49 | ) 50 | 51 | # Find a subject to use as a template 52 | template = find_template_subject(covs, N_EMBEDDINGS) 53 | print("Using template:", subject_files[template]) 54 | 55 | # Loop through each subject 56 | for i in range(len(subject_files)): 57 | print("Subject", i + 1) 58 | 59 | if i == template: 60 | # Don't need to do sign flipping on the template subject 61 | parc_data = load_matlab(subject_files[i]) 62 | save_matlab(SRC_DIR + f"/sflip{i + 1}.mat", parc_data) 63 | continue 64 | 65 | # Find the channels to flip 66 | flips, metrics = find_flips( 67 | covs[i], covs[template], N_EMBEDDINGS, N_INIT, N_ITER, MAX_FLIPS 68 | ) 69 | 70 | # Apply flips to the parcellated data and save 71 | parc_data = load_matlab(subject_files[i]) 72 | parc_data *= flips[np.newaxis, ...] 73 | save_matlab(SRC_DIR + f"/sflip{i + 1}.mat", parc_data) 74 | -------------------------------------------------------------------------------- /examples/misc/spectrum_analysis_walkthrough.py: -------------------------------------------------------------------------------- 1 | import osl_ephys 2 | from scipy import signal 3 | import matplotlib.pyplot as plt 4 | 5 | raw = osl_ephys.utils.simulate_raw_from_template(10000, noise=1/3) 6 | raw.pick(picks='mag') 7 | 8 | 9 | #%% 10 | spec = osl_ephys.glm.glm_spectrum(raw) 11 | spec.plot_joint_spectrum(freqs=(1, 10, 17), base=0.5, title='testing123') 12 | 13 | #%% 14 | aper, osc = osl_ephys.glm.glm_irasa(raw, mode='magnitude') 15 | plt.figure() 16 | ax = plt.subplot(121) 17 | aper.plot_joint_spectrum(freqs=(1, 10, 17), base=0.5,ax=ax) 18 | ax = plt.subplot(122) 19 | osc.plot_joint_spectrum(freqs=(1, 10, 17), base=0.5,ax=ax) 20 | 21 | 22 | #%% 23 | alpha = raw.copy().filter(l_freq=7, h_freq=13) 24 | covs = {'alpha': np.abs(signal.hilbert(alpha.get_data()[raw.ch_names.index('MEG1711'), :]))} 25 | 26 | spec = osl_ephys.glm.glm_spectrum(raw, reg_ztrans=covs) 27 | 28 | plt.figure() 29 | ax = plt.subplot(121) 30 | spec.plot_joint_spectrum(0, freqs=(1, 10, 17), base=0.5,ax=ax) 31 | ax = plt.subplot(122) 32 | spec.plot_joint_spectrum(1, freqs=(1, 10, 17), base=0.5,ax=ax) 33 | 34 | 35 | 36 | 37 | aper, osc = osl_ephys.glm.glm_irasa(raw, reg_ztrans=covs) 38 | 39 | plt.figure() 40 | ax = plt.subplot(221) 41 | aper.plot_joint_spectrum(0, freqs=(1, 10, 17), base=0.5,ax=ax) 42 | ax = plt.subplot(222) 43 | aper.plot_joint_spectrum(1, freqs=(1, 10, 17), base=0.5,ax=ax) 44 | ax = plt.subplot(223) 45 | osc.plot_joint_spectrum(0, freqs=(1, 10, 17), base=0.5,ax=ax) 46 | ax = plt.subplot(224) 47 | osc.plot_joint_spectrum(1, freqs=(1, 10, 17), base=0.5,ax=ax) 48 | 49 | 50 | 51 | 52 | gglmsp = osl_ephys.glm.read_glm_spectrum('/Users/andrew/Downloads/bigmeg-camcan-movecomptrans_glm-spectrum_grad-noztrans_group-level.pkl') 53 | spec = osl_ephys.glm.GroupSensorGLMSpectrum(gglmsp.model, 54 | gglmsp.design, 55 | gglmsp.config, 56 | gglmsp.info, 57 | fl_contrast_names=None, 58 | data=gglmsp.data) 59 | P = osl_ephys.glm.MaxStatPermuteGLMSpectrum(spec, 1, nperms=25) 60 | -------------------------------------------------------------------------------- /examples/opm/0_convert_files.py: -------------------------------------------------------------------------------- 1 | """Convert Nottingham files into fif. 2 | 3 | """ 4 | 5 | import os 6 | from osl_ephys.utils import opm 7 | 8 | 9 | base_dir = "dog_day_afternoon_OPM" 10 | outdir = "data/raw" 11 | 12 | os.makedirs(outdir, exist_ok=True) 13 | 14 | for sub in range(1, 11): 15 | for run in range(1, 3): 16 | # MEG data 17 | mat_file = f"{basedir}/sub-{sub:03d}/meg/sub-{sub:03d}_task-movie_run-{run:03d}_meg.mat" 18 | tsv_file = f"{basedir}/sub-{sub:03d}/meg/sub-{sub:03d}_task-movie_run-{run:03d}_channels.tsv_new" 19 | out_fif_file = f"{outdir}/sub-{sub:03d}_run-{run:03d}_raw.fif" 20 | 21 | # sMRI data 22 | smri_file = f"{basedir}/sub-{sub:03d}/anat/sub-{sub:03d}.nii" 23 | out_smri_file = f"{outdir}/sub-{sub:03d}_T1w.nii" 24 | 25 | # Convert files 26 | opm.convert_notts(mat_file, smri_file, tsv_file, out_fif_file, out_smri_file) 27 | -------------------------------------------------------------------------------- /examples/opm/1_preprocess.py: -------------------------------------------------------------------------------- 1 | """Preprocess OPM data. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | 8 | from dask.distributed import Client 9 | from osl_ephys import preprocessing, utils 10 | 11 | if __name__ == "__main__": 12 | utils.logger.set_up(level="INFO") 13 | client = Client(n_workers=4, threads_per_worker=1) 14 | 15 | config = """ 16 | preproc: 17 | - resample: {sfreq: 250} 18 | - filter: {l_freq: 1, h_freq: 45, method: iir, iir_params: {order: 5, ftype: butter}} 19 | - bad_segments: {segment_len: 500, picks: mag, significance_level: 0.1} 20 | - bad_segments: {segment_len: 500, picks: mag, mode: diff, significance_level: 0.1} 21 | - bad_channels: {picks: mag, significance_level: 0.4} 22 | """ 23 | 24 | subjects, inputs = [], [] 25 | for sub in range(1, 11): 26 | for run in range(1, 3): 27 | subjects.append(f"sub-{sub:03d}_run-{run:03d}") 28 | inputs.append(f"../data/raw/sub-{sub:03d}_run-{run:03d}_raw.fif") 29 | 30 | outdir = "data/preproc" 31 | 32 | dataset = preprocessing.run_proc_batch( 33 | config, 34 | inputs, 35 | subjects=subjects, 36 | outdir=outdir, 37 | overwrite=True, 38 | dask_client=True, 39 | ) 40 | -------------------------------------------------------------------------------- /examples/opm/2_coregister.py: -------------------------------------------------------------------------------- 1 | """Coregistration with RHINO. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from dask.distributed import Client 8 | from osl_ephys import source_recon, utils 9 | 10 | if __name__ == "__main__": 11 | utils.logger.set_up(level="INFO") 12 | client = Client(n_workers=4, threads_per_worker=1) 13 | 14 | config = """ 15 | source_recon: 16 | - compute_surfaces: 17 | include_nose: False 18 | - coregister: 19 | use_nose: False 20 | use_headshape: False 21 | already_coregistered: True 22 | """ 23 | 24 | subjects, smri_files = [], [] 25 | for sub in range(1, 11): 26 | for run in range(1, 3): 27 | subjects.append(f"sub-{sub:03d}_run-{run:03d}") 28 | smri_files.append(f"../data/raw/sub-{sub:03d}_T1w.nii") 29 | 30 | outdir = "data/preproc" 31 | 32 | source_recon.run_src_batch( 33 | config, 34 | outdir=outdir, 35 | subjects=subjects, 36 | smri_files=smri_files, 37 | dask_client=True, 38 | ) 39 | -------------------------------------------------------------------------------- /examples/opm/3_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Source reconstruction with an LCMV beamformer. 2 | 3 | """ 4 | 5 | # Authors: Chetan Gohil 6 | 7 | from dask.distributed import Client 8 | from osl_ephys import source_recon, utils 9 | 10 | if __name__ == "__main__": 11 | utils.logger.set_up(level="INFO") 12 | client = Client(n_workers=4, threads_per_worker=1) 13 | 14 | config = """ 15 | source_recon: 16 | - forward_model: 17 | model: Single Layer 18 | - beamform_and_parcellate: 19 | freq_range: [1, 45] 20 | chantypes: mag 21 | rank: {mag: 100} 22 | spatial_resolution: 8 23 | parcellation_file: aal_cortical_merged_8mm_stacked.nii.gz 24 | method: spatial_basis 25 | orthogonalisation: symmetric 26 | """ 27 | 28 | subjects = [] 29 | for sub in range(1, 11): 30 | for run in range(1, 3): 31 | subjects.append(f"sub-{sub:03d}_run-{run:03d}") 32 | 33 | outdir = "data/preproc" 34 | 35 | source_recon.run_src_batch( 36 | config, 37 | outdir=outdir, 38 | subjects=subjects, 39 | dask_client=True, 40 | ) 41 | -------------------------------------------------------------------------------- /examples/opm/4_sign_flip.py: -------------------------------------------------------------------------------- 1 | """Sign flipping. 2 | 3 | Note, this script is only needed if you're training a dynamic network 4 | model (e.g. the HMM) using the time-delay embedded (TDE) approach. 5 | 6 | You can skip this if you're training the HMM on amplitude envelope data 7 | or calculating sign-invariant quantities such as amplitude envelope 8 | correlations or power. 9 | """ 10 | 11 | # Authors: Chetan Gohil 12 | 13 | from dask.distributed import Client 14 | from osl_ephys import source_recon, utils 15 | 16 | if __name__ == "__main__": 17 | utils.logger.set_up(level="INFO") 18 | client = Client(n_workers=4, threads_per_worker=1) 19 | 20 | subjects = [] 21 | for sub in range(1, 11): 22 | for run in range(1, 3): 23 | subjects.append(f"sub-{sub:03d}_run-{run:03d}") 24 | 25 | outdir = "data/preproc" 26 | 27 | # Find a good template subject to align other subjects to 28 | template = source_recon.find_template_subject( 29 | outdir, subjects, n_embeddings=15, standardize=True 30 | ) 31 | 32 | config = f""" 33 | source_recon: 34 | - fix_sign_ambiguity: 35 | template: {template} 36 | n_embeddings: 15 37 | standardize: True 38 | n_init: 3 39 | n_iter: 2500 40 | max_flips: 20 41 | """ 42 | 43 | source_recon.run_src_batch( 44 | config, 45 | outdir=outdir, 46 | subjects=subjects, 47 | dask_client=True, 48 | ) 49 | -------------------------------------------------------------------------------- /examples/opm/README.md: -------------------------------------------------------------------------------- 1 | # Nottingham OPM Example 2 | 3 | Preprocessing and source reconstruction of OPM data. 4 | 5 | Data can be downloaded from: https://zenodo.org/doi/10.5281/zenodo.7525341. Note you need to unzip the files using 7zip if you have a Mac computer. 6 | 7 | ## Pipeline 8 | 9 | - `1_preprocess.py`: Preprocesses the data. This includes filtering, downsampling and automated artefact removal. 10 | - `2_coregister.py`: Extract surfaces from the structural MRI, create the coregistration files OSL is expecting. 11 | - `3_source_reconstruct.py`: Calculate forward model, beamform and parcellate. 12 | - `4_sign_flip.py` (optional): Sign flipping to fix the dipole sign ambiguity. 13 | -------------------------------------------------------------------------------- /examples/oxford/README.md: -------------------------------------------------------------------------------- 1 | # Oxford Examples 2 | 3 | OHBA-specific examples have been moved to: https://github.com/OHBA-analysis/OHBA-Examples/tree/main/oxford. 4 | -------------------------------------------------------------------------------- /examples/parallelisation/README.md: -------------------------------------------------------------------------------- 1 | Example scripts for parallelising batch processing with Dask 2 | ------------------------------------------------------------ 3 | 4 | If you're using a computer with multiple cores, we can batch processes (preprocess, source reconstruct) in parallel. The scripts `serial_*.py` use code that runs serially (i.e. without parallel workers) and the `parallel_*.py` scripts show the changes that need to be made to run the same processing in parallel. 5 | 6 | There are 3 things you need to do parallelise a script: 7 | 8 | 1. Add 9 | 10 | ``` 11 | if __name__ == "__main__": 12 | utils.logger.set_up(level="INFO") 13 | ``` 14 | 15 | at the top of the script. 16 | 17 | 2. Setup a Dask client with 18 | 19 | ``` 20 | client = Client(n_workers=2, threads_per_worker=1) 21 | ``` 22 | 23 | 3. Pass 24 | 25 | ``` 26 | dask_client=True 27 | ``` 28 | 29 | to the osl batch processing function. 30 | 31 | 32 | Note, running a script in parallel with Dask will create a `dask-worker-space` directory. This can be safely deleted after the script has finished. 33 | -------------------------------------------------------------------------------- /examples/parallelisation/parallel_preprocess.py: -------------------------------------------------------------------------------- 1 | """Example script for preprocessing CamCAN in parallel. 2 | 3 | In this script the preprocessing for multiple fif files will be done in parallel. 4 | """ 5 | 6 | # Authors: Chetan Gohil 7 | 8 | import pathlib 9 | from glob import glob 10 | from dask.distributed import Client 11 | 12 | from osl_ephys import preprocessing, utils 13 | 14 | if __name__ == "__main__": 15 | utils.logger.set_up(level="INFO") 16 | 17 | rawdir = "/ohba/pi/mwoolrich/datasets/CamCan_2021/cc700/meg/pipeline/release005/BIDSsep/rest" 18 | outdir = "/ohba/pi/mwoolrich/cgohil/camcan/preproc" 19 | 20 | config = """ 21 | preproc: 22 | - filter: {l_freq: 0.5, h_freq: 125, method: 'iir', iir_params: {order: 5, ftype: butter}} 23 | - notch_filter: {freqs: 50 100 150 200} 24 | - resample: {sfreq: 250} 25 | - bad_channels: {picks: 'mag'} 26 | - bad_channels: {picks: 'grad'} 27 | - bad_segments: {segment_len: 2000, picks: 'mag'} 28 | - bad_segments: {segment_len: 2000, picks: 'grad'} 29 | """ 30 | 31 | # Get input files 32 | inputs = [] 33 | for subject in sorted(glob(f"{rawdir}/sub-*")): 34 | subject = pathlib.Path(subject).stem 35 | inputs.append(f"{rawdir}/{subject}/ses-rest/meg/{subject}_ses-rest_task-rest_meg.fif") 36 | inputs = inputs[:2] 37 | 38 | # Setup a Dask client for parallel processing 39 | # 40 | # Generally, we advise leaving threads_per_worker=1 41 | # and setting n_workers to the number of CPUs you want 42 | # to use. 43 | # 44 | # Note, we recommend you do not set n_workers to be 45 | # greater than half the total number of CPUs you have. 46 | # Also, each worker will process a separate fif file 47 | # so setting n_workers greater than the number of fif 48 | # files you want to process won't speed up the script. 49 | client = Client(n_workers=2, threads_per_worker=1) 50 | 51 | # Main preprocessing 52 | preprocessing.run_proc_batch( 53 | config, 54 | inputs, 55 | outdir=outdir, 56 | overwrite=True, 57 | dask_client=True, 58 | ) 59 | -------------------------------------------------------------------------------- /examples/parallelisation/parallel_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Example script for source reconstructing CamCAN in parallel. 2 | 3 | In this script, we source reconstruct multiple subjects in parallel. 4 | 5 | Source reconstruction include coregistration, beamforming, parcellation and 6 | orthogonalisation. 7 | """ 8 | 9 | # Authors: Chetan Gohil 10 | 11 | import numpy as np 12 | import pathlib 13 | import os.path as op 14 | from glob import glob 15 | from dask.distributed import Client 16 | 17 | from osl_ephys import source_recon, utils 18 | 19 | import logging 20 | logger = logging.getLogger("osl") 21 | 22 | if __name__ == "__main__": 23 | utils.logger.set_up(level="INFO") 24 | 25 | # Directories 26 | anatdir = "/ohba/pi/mwoolrich/datasets/CamCan_2021/cc700/mri/pipeline/release004/BIDS_20190411/anat" 27 | outdir = "/ohba/pi/mwoolrich/cgohil/camcan/src" 28 | 29 | # Files 30 | smri_file = anatdir + "/{0}/anat/{0}_T1w.nii" 31 | preproc_file = outdir + "{0}_ses-rest_task-rest_meg/{0}_ses-rest_task-rest_meg_preproc-raw.fif" 32 | 33 | # Settings 34 | config = """ 35 | source_recon: 36 | - extract_polhemus_from_info: {} 37 | - remove_headshape_points: {} 38 | - compute_surfaces: 39 | include_nose: False 40 | - coregister: 41 | use_nose: False 42 | use_headshape: True 43 | - forward_model: 44 | model: Single Layer 45 | - beamform_and_parcellate: 46 | freq_range: [1, 45] 47 | chantypes: meg 48 | rank: {meg: 60} 49 | parcellation_file: fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz 50 | method: spatial_basis 51 | orthogonalisation: symmetric 52 | """ 53 | 54 | def remove_headshape_points(outdir, subject): 55 | """Removes headshape points near the nose.""" 56 | 57 | # Get coreg filenames 58 | filenames = source_recon.rhino.get_coreg_filenames(outdir, subject) 59 | 60 | # Load saved headshape and nasion files 61 | hs = np.loadtxt(filenames["polhemus_headshape_file"]) 62 | nas = np.loadtxt(filenames["polhemus_nasion_file"]) 63 | 64 | # Drop nasion by 4cm 65 | nas[2] -= 40 66 | distances = np.sqrt( 67 | (nas[0] - hs[0]) ** 2 + (nas[1] - hs[1]) ** 2 + (nas[2] - hs[2]) ** 2 68 | ) 69 | 70 | # Keep headshape points more than 7cm away 71 | keep = distances > 70 72 | hs = hs[:, keep] 73 | 74 | # Overwrite headshape file 75 | logger.info(f"overwritting {filenames['polhemus_headshape_file']}") 76 | np.savetxt(filenames["polhemus_headshape_file"], hs) 77 | 78 | # Get subjects 79 | subjects = [] 80 | for subject in glob(PREPROC_DIR + "/sub-*"): 81 | subjects.append(pathlib.Path(subject).stem.split("_")[0]) 82 | 83 | # Setup files 84 | smri_files = [] 85 | preproc_files = [] 86 | for subject in subjects: 87 | smri_files.append(SMRI_FILE.format(subject)) 88 | preproc_files.append(PREPROC_FILE.format(subject)) 89 | 90 | # Setup a Dask client for parallel processing 91 | # 92 | # Generally, we advise leaving threads_per_worker=1 93 | # and setting n_workers to the number of CPUs you want 94 | # to use. 95 | # 96 | # Note, we recommend you do not set n_workers to be 97 | # greater than half the total number of CPUs you have. 98 | # Also, each worker will process a separate fif file 99 | # so setting n_workers greater than the number of fif 100 | # files you want to process won't speed up the script. 101 | client = Client(n_workers=2, threads_per_worker=1) 102 | 103 | # Beamforming and parcellation 104 | source_recon.run_src_batch( 105 | config, 106 | outdir=outdir, 107 | subjects=subjects, 108 | preproc_files=preproc_files, 109 | smri_files=smri_files, 110 | extra_funcs=[remove_headshape_points], 111 | dask_client=True, 112 | ) 113 | -------------------------------------------------------------------------------- /examples/parallelisation/serial_preprocess.py: -------------------------------------------------------------------------------- 1 | """Example script for preprocessing CamCAN in serial. 2 | 3 | In this script the preprocessing will be done for one fif file at a time. 4 | """ 5 | 6 | # Authors: Chetan Gohil 7 | 8 | import pathlib 9 | from glob import glob 10 | 11 | from osl_ephys import preprocessing 12 | 13 | rawdir = "/ohba/pi/mwoolrich/datasets/CamCan_2021/cc700/meg/pipeline/release005/BIDSsep/rest" 14 | outdir = "/ohba/pi/mwoolrich/cgohil/camcan/preproc" 15 | 16 | config = """ 17 | preproc: 18 | - filter: {l_freq: 0.5, h_freq: 125, method: 'iir', iir_params: {order: 5, ftype: butter}} 19 | - notch_filter: {freqs: 50 100 150 200} 20 | - resample: {sfreq: 250} 21 | - bad_channels: {picks: 'mag'} 22 | - bad_channels: {picks: 'grad'} 23 | - bad_segments: {segment_len: 2000, picks: 'mag'} 24 | - bad_segments: {segment_len: 2000, picks: 'grad'} 25 | """ 26 | 27 | # Get input files 28 | inputs = [] 29 | for subject in sorted(glob(f"{rawdir}/sub-*")): 30 | subject = pathlib.Path(subject).stem 31 | inputs.append(f"{rawdir}/{subject}/ses-rest/meg/{subject}_ses-rest_task-rest_meg.fif") 32 | inputs = inputs[:2] 33 | 34 | # Main preprocessing 35 | preprocessing.run_proc_batch( 36 | config, 37 | inputs, 38 | outdir=outdir, 39 | overwrite=True, 40 | ) 41 | -------------------------------------------------------------------------------- /examples/parallelisation/serial_source_reconstruct.py: -------------------------------------------------------------------------------- 1 | """Example script for source reconstructing CamCAN in serial. 2 | 3 | In this script, we source reconstruct each subject one at a time. 4 | 5 | Source reconstruction include coregistration, beamforming, parcellation and 6 | orthogonalisation. 7 | """ 8 | 9 | # Authors: Chetan Gohil 10 | 11 | import numpy as np 12 | import pathlib 13 | import os.path as op 14 | from glob import glob 15 | 16 | from osl_ephys import source_recon 17 | 18 | import logging 19 | logger = logging.getLogger("osl") 20 | 21 | # Directories 22 | anatdir = "/ohba/pi/mwoolrich/datasets/CamCan_2021/cc700/mri/pipeline/release004/BIDS_20190411/anat" 23 | outdir = "/ohba/pi/mwoolrich/cgohil/camcan/src" 24 | 25 | # Files 26 | smri_file = anatdir + "/{0}/anat/{0}_T1w.nii" 27 | preproc_file = outdir + "{0}_ses-rest_task-rest_meg/{0}_ses-rest_task-rest_meg_preproc-raw.fif" 28 | 29 | # Settings 30 | config = """ 31 | source_recon: 32 | - extract_polhemus_from_info: {} 33 | - remove_headshape_points: {} 34 | - compute_surfaces: 35 | include_nose: False 36 | - coregister: 37 | use_nose: False 38 | use_headshape: True 39 | - forward_model: 40 | model: Single Layer 41 | - beamform_and_parcellate: 42 | freq_range: [1, 45] 43 | chantypes: meg 44 | rank: {meg: 60} 45 | parcellation_file: fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz 46 | method: spatial_basis 47 | orthogonalisation: symmetric 48 | """ 49 | 50 | def remove_headshape_points(outdir, subject): 51 | """Removes headshape points near the nose.""" 52 | 53 | # Get coreg filenames 54 | filenames = source_recon.rhino.get_coreg_filenames(outdir, subject) 55 | 56 | # Load saved headshape and nasion files 57 | hs = np.loadtxt(filenames["polhemus_headshape_file"]) 58 | nas = np.loadtxt(filenames["polhemus_nasion_file"]) 59 | 60 | # Drop nasion by 4cm 61 | nas[2] -= 40 62 | distances = np.sqrt( 63 | (nas[0] - hs[0]) ** 2 + (nas[1] - hs[1]) ** 2 + (nas[2] - hs[2]) ** 2 64 | ) 65 | 66 | # Keep headshape points more than 7cm away 67 | keep = distances > 70 68 | hs = hs[:, keep] 69 | 70 | # Overwrite headshape file 71 | logger.info(f"overwritting {filenames['polhemus_headshape_file']}") 72 | np.savetxt(filenames["polhemus_headshape_file"], hs) 73 | 74 | # Get subjects 75 | subjects = [] 76 | for subject in sorted(glob(f"{outdir}/sub-*")): 77 | subjects.append(pathlib.Path(subject).stem.split("_")[0]) 78 | 79 | # Setup files 80 | smri_files = [] 81 | preproc_files = [] 82 | for subject in subjects: 83 | smri_files.append(smri_file.format(subject)) 84 | preproc_files.append(preproc_file.format(subject)) 85 | 86 | # Beamforming and parcellation 87 | source_recon.run_src_batch( 88 | config, 89 | outdir=outdir, 90 | subjects=subjects, 91 | preproc_files=preproc_files, 92 | smri_files=smri_files, 93 | extra_funcs=[remove_headshape_points], 94 | ) 95 | -------------------------------------------------------------------------------- /examples/toolbox-paper/1_preprocessing.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dask.distributed import Client 3 | 4 | import osl_ephys 5 | 6 | 7 | if __name__ == "__main__": 8 | client = Client(n_workers=16, threads_per_worker=1) # specify to enable parallel processing 9 | basedir = "ds117" 10 | 11 | config = """ 12 | meta: 13 | event_codes: 14 | famous/first: 5 15 | famous/immediate: 6 16 | famous/last: 7 17 | unfamiliar/first: 13 18 | unfamiliar/immediate: 14 19 | unfamiliar/last: 15 20 | scrambled/first: 17 21 | scrambled/immediate: 18 22 | scrambled/last: 19 23 | preproc: 24 | - find_events: {min_duration: 0.005} 25 | - set_channel_types: {EEG061: eog, EEG062: eog, EEG063: ecg} 26 | - filter: {l_freq: 0.5, h_freq: 125, method: iir, iir_params: {order: 5, ftype: butter}} 27 | - notch_filter: {freqs: 50 100} 28 | - resample: {sfreq: 250} 29 | - bad_segments: {segment_len: 500, picks: mag} 30 | - bad_segments: {segment_len: 500, picks: grad} 31 | - bad_segments: {segment_len: 500, picks: mag, mode: diff} 32 | - bad_segments: {segment_len: 500, picks: grad, mode: diff} 33 | - bad_channels: {picks: mag, significance_level: 0.1} 34 | - bad_channels: {picks: grad, significance_level: 0.1} 35 | - ica_raw: {picks: meg, n_components: 40} 36 | - ica_autoreject: {picks: meg, ecgmethod: correlation, eogmethod: correlation, 37 | eogthreshold: 0.35, apply: False} 38 | - interpolate_bads: {reset_bads: False} 39 | """ 40 | 41 | # Study utils enables selection of existing paths using various wild cards 42 | study = osl_ephys.utils.Study(os.path.join(basedir, "sub{sub_id}/MEG/run_{run_id}_raw.fif")) 43 | inputs = sorted(study.get()) 44 | 45 | # specify session names and output directory 46 | subjects = [f"sub{i+1:03d}-run{j+1:02d}" for i in range(19) for j in range(6)] 47 | outdir = os.path.join(basedir, "processed") 48 | 49 | osl_ephys.preprocessing.run_proc_batch( 50 | config, 51 | inputs, 52 | subjects, 53 | outdir, 54 | dask_client=True, 55 | random_seed=2280431064, 56 | ) 57 | -------------------------------------------------------------------------------- /examples/toolbox-paper/2_source-reconstruct.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from dask.distributed import Client 4 | from osl_ephys import source_recon, utils 5 | 6 | source_recon.setup_fsl("~/fsl") # FSL needs to be installed 7 | 8 | def fix_headshape_points(outdir, subject): 9 | filenames = source_recon.rhino.get_coreg_filenames(outdir, subject) 10 | 11 | # Load saved headshape and nasion files 12 | hs = np.loadtxt(filenames["polhemus_headshape_file"]) 13 | nas = np.loadtxt(filenames["polhemus_nasion_file"]) 14 | lpa = np.loadtxt(filenames["polhemus_lpa_file"]) 15 | rpa = np.loadtxt(filenames["polhemus_rpa_file"]) 16 | 17 | # Remove headshape points on the nose 18 | remove = np.logical_and(hs[1] > max(lpa[1], rpa[1]), hs[2] < nas[2]) 19 | hs = hs[:, ~remove] 20 | 21 | # Overwrite headshape file 22 | utils.logger.log_or_print(f"overwritting {filenames['polhemus_headshape_file']}") 23 | np.savetxt(filenames["polhemus_headshape_file"], hs) 24 | 25 | if __name__ == "__main__": 26 | utils.logger.set_up(level="INFO") 27 | client = Client(n_workers=16, threads_per_worker=1) 28 | 29 | config = """ 30 | source_recon: 31 | - extract_polhemus_from_info: {} 32 | - fix_headshape_points: {} 33 | - compute_surfaces: 34 | include_nose: False 35 | - coregister: 36 | use_nose: False 37 | use_headshape: True 38 | - forward_model: 39 | model: Single Layer 40 | - beamform_and_parcellate: 41 | freq_range: [1, 45] 42 | chantypes: [mag, grad] 43 | rank: {meg: 60} 44 | parcellation_file: Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz 45 | method: spatial_basis 46 | orthogonalisation: symmetric 47 | """ 48 | 49 | basedir = "ds117" 50 | proc_dir = os.path.join(basedir, "processed") 51 | 52 | # Define inputs 53 | subjects = [f"sub{i+1:03d}-run{j+1:02d}" for i in range(19) for j in range(6)] 54 | preproc_files = sorted(utils.Study(os.path.join(proc_dir, "sub{sub_id}- run{run_id}/sub{sub_id}-run{run_id}_preproc-raw.fif")).get()) 55 | smri_files = np.concatenate([[smri_file]*6 for smri_file in sorted(utils.Study(os.path.join(basedir, "sub{sub_id}/anatomy/highres001.nii.gz"))).get()]) 56 | 57 | # Run source batch 58 | source_recon.run_src_batch( 59 | config, 60 | outdir=proc_dir, 61 | subjects=subjects, 62 | preproc_files=preproc_files, 63 | smri_files=smri_files, 64 | extra_funcs=[fix_headshape_points], 65 | dask_client=True, 66 | random_seed=1392754308, 67 | ) 68 | 69 | -------------------------------------------------------------------------------- /examples/toolbox-paper/3_sign-flip.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | from glob import glob 4 | from dask.distributed import Client 5 | 6 | from osl_ephys import source_recon, utils 7 | 8 | source_recon.setup_fsl("~/fsl") 9 | 10 | # Directory containing source reconstructed data 11 | proc_dir = "ds117/processed" 12 | src_files = sorted(utils.Study(os.path.join(proc_dir, 13 | "sub{sub_id}-run{run_id}/parc/parc-raw.fif")).get()) 14 | 15 | if __name__ == "__main__": 16 | utils.logger.set_up(level="INFO") 17 | 18 | subjects = [f"sub{i+1:03d}-run{j+1:02d}" for i in range(19) for j in range(6)] 19 | 20 | # Find a good template subject to match others to 21 | template = source_recon.find_template_subject( 22 | proc_dir, subjects, n_embeddings=15, standardize=True, 23 | ) 24 | 25 | # Settings 26 | config = f""" 27 | source_recon: 28 | - fix_sign_ambiguity: 29 | template: {template} 30 | n_embeddings: 15 31 | standardize: True 32 | n_init: 3 33 | n_iter: 3000 34 | max_flips: 20 35 | """ 36 | 37 | # Setup parallel processing 38 | client = Client(n_workers=16, threads_per_worker=1) 39 | 40 | # Run sign flipping 41 | source_recon.run_src_batch(config, proc_dir, subjects, dask_client=True, random_seed=3116145039) 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /examples/toolbox-paper/4_stats.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import glmtools 4 | import matplotlib.pyplot as plt 5 | from dask.distributed import Client 6 | from osl_ephys import preprocessing, glm 7 | 8 | 9 | if __name__ == "__main__": 10 | client = Client(n_workers=16, threads_per_worker=1) 11 | 12 | config = """ 13 | preproc: 14 | - read_dataset: {ftype: sflip_parc-raw} 15 | - epochs: {picks: misc, tmin: -0.2, tmax: 0.3} 16 | - glm_add_regressor: {name: famous, rtype: Categorical, codes: [5 6 7]} 17 | - glm_add_regressor: {name: unfamiliar, rtype: Categorical, codes: [13 14 15]} 18 | - glm_add_regressor: {name: scrambled, rtype: Categorical, codes: [17 18 19]} 19 | - glm_add_contrast: {name: Mean, values: {famous: 1/3, unfamiliar: 1/3, scrambled: 1/3}} 20 | - glm_add_contrast: {name: Faces-Scrambled, values: {famous: 1, unfamiliar: 1, scrambled: -2}} 21 | - glm_fit: {target: epochs, method: glm_epochs} 22 | group: 23 | - glm_add_regressor: {name: Subject, rtype: Categorical, key: Subject, codes: unique} 24 | - glm_add_contrast: {name: Mean, values: unique, key: Subject} 25 | - glm_fit: {method: epochs, tmin: 0.05, tmax: 0.3} 26 | - glm_permutations: {method: epochs, target: group_glm, contrast: Mean, type: max, nperms: 1000, threshold: 0.99} 27 | """ 28 | proc_dir = "ds117/processed" 29 | src_files = sorted(utils.Study(os.path.join(proc_dir, 30 | "sub{sub_id}-run{run_id}", "sub{sub_id}-run{run_id}_sflip_parc-raw.fif")).get()) 31 | subjects = [f"sub{i+1:03d}-run{j+1:02d}" for i in range(19) for j in range(6)] 32 | covs = [f"Subject": [sub.split("-")[0]for sub in subjects] 33 | 34 | preprocessing.run_proc_batch( 35 | config, 36 | src_files, 37 | subjects, 38 | outdir=proc_dir, 39 | ftype='raw', 40 | covs=covs, 41 | dask_client=True, 42 | overwrite=True, 43 | gen_report=False, 44 | skip_save=['events', 'raw', 'ica', 'event_id', 'sflip_parc-raw'], 45 | ) 46 | 47 | -------------------------------------------------------------------------------- /examples/toolbox-paper/README.md: -------------------------------------------------------------------------------- 1 | # OSL Toolbox Paper 2 | 3 | The scripts for the toolbox paper can be found here in the order they appear in the manuscript. You can download the data from [OpenfMRI](https://openfmri.org/s3-browser/?prefix=ds000117/ds000117_R0.1.1/compressed/). Extract the `tar.gz` files in a folder called `ds117`. Note: you may have to change the base directory in the scripts to match the directory where you store the data. You also need to [install FSL](https://fsl.fmrib.ox.ac.uk/fsl/docs/#/install/index), and make sure that `source_recon.setup_fsl()` in `2_source-reconstruct.py` and `3_sign-flip.py` is pointing to the correct directory. 4 | 5 | If you wish to fully reproduce the analysis pipeline install the environment specified in `osl-toolbox-paper.yml`, and set `random_seed` in `run_proc_batch` and `run_src_batch` according to the seed found in the logfiles on [OSF](https://osf.io/2rnyg/). 6 | 7 | ## Manual preprocessing 8 | 9 | Note that in the toolbox paper, automatically labeled ICA components were manually refined for the following sessions: 10 | - sub008-ses03 11 | - sub019-ses01 12 | - sub019-ses02 13 | - sub019-ses03 14 | - sub019-ses04 15 | - sub019-ses05 16 | - sub019-ses06 17 | - sub010-ses05 18 | 19 | This was done by running the following command line function iteratively, replacing "session". 20 | `osl_ica_label None processed "session"` 21 | After all specified sessions were refined, all automatically/manually labeled components were removed from the preprocessed MEG data using the command line call 22 | `osl_ica_apply processed` -------------------------------------------------------------------------------- /osl_ephys/README.md: -------------------------------------------------------------------------------- 1 | # OSL Packages 2 | 3 | OSL containes the following modules: 4 | 5 | - maxfilter : interface for Megin's commandline maxfilter 6 | - preprocessing : MEEG data processing and cleaning (similar to OPT) 7 | - glm : glmtools interface 8 | - report : create summaries and data quality/acquisition checks from fif files 9 | - source_recon : coregistration, beamforming and parcellation 10 | - utils : finding and organsing files, things could be used for all of the above 11 | 12 | -------------------------------------------------------------------------------- /osl_ephys/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # -------------------------------------------------------- 4 | # If user hasn't configured NumExpr environment the an irritating low-level log 5 | # is generated. We supress it by setting a default value of 8 if not already 6 | # set. 7 | # 8 | # https://numexpr.readthedocs.io/projects/NumExpr3/en/latest/user_guide.html#threadpool-configuration 9 | # https://github.com/pydata/numexpr/blob/7c2ef387d81cd450e8220fe4174cf46ec559994c/numexpr/utils.py#L118 10 | 11 | import os 12 | if 'NUMEXPR_MAX_THREADS' not in os.environ: 13 | os.environ['NUMEXPR_MAX_THREADS'] = '8' 14 | 15 | # Some modules are chatty by default when a logger is on - set log-levels to 16 | # WARNING on setup 17 | import logging 18 | logging.getLogger("asyncio").setLevel(logging.WARNING) 19 | logging.getLogger("matplotlib").setLevel(logging.WARNING) 20 | logging.getLogger("PIL").setLevel(logging.WARNING) 21 | 22 | # Feels like there should be a better solution for this. How do we setup a 23 | # logger which only produces OSL outputs?! 24 | 25 | # -------------------------------------------------------- 26 | # Main importing - set module structure here 27 | 28 | from . import utils # noqa: F401, F403 29 | from . import preprocessing # noqa: F401, F403 30 | from . import maxfilter # noqa: F401, F403 31 | from . import report # noqa: F401, F403 32 | from . import source_recon # noqa: F401, F403 33 | from . import glm # noqa: F401, F403 34 | 35 | # -------------------------------------------------------- 36 | osl_logger = logging.getLogger(__name__) 37 | osl_logger.debug('osl-ephys main init complete') 38 | 39 | # -------------------------------------------------------- 40 | __version__ = '2.4.dev0' 41 | -------------------------------------------------------------------------------- /osl_ephys/glm/README.md: -------------------------------------------------------------------------------- 1 | # GLM 2 | 3 | osl-ephys's GLM (generalized linear model) module, with modality specific functions based on https://pypi.org/project/glmtools/ -------------------------------------------------------------------------------- /osl_ephys/glm/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .glm_spectrum import * 3 | from .glm_epochs import * 4 | 5 | with open(os.path.join(os.path.dirname(__file__), "README.md"), 'r') as f: 6 | __doc__ = f.read() -------------------------------------------------------------------------------- /osl_ephys/maxfilter/README.md: -------------------------------------------------------------------------------- 1 | # OHBA Maxfilter 2 | 3 | A python batch processing script for Maxfilter preprocessing of MEG files. 4 | 5 | - [Usage](#usage) 6 | - [Customising Options](#customising-options) 7 | - [Multistage Options](#multistage-options) 8 | - [Options Arguments](#optional-arguments) 9 | 10 | 11 | ## Usage 12 | 13 | We can call `osl_maxfilter` via the command line (make sure you have activated the osl conda environment first). `osl_maxfilter` requires at least 2 positional inputs to run. These are 14 | 15 | ``` 16 | files plain text file containing full paths to files to be 17 | processed 18 | outdir Path to output directory to save data in 19 | ``` 20 | 21 | For example: 22 | 23 | ``` 24 | osl_maxfilter input_files.txt /path/to/my/output/dir/ 25 | ``` 26 | 27 | will run each fif file in `input_files.txt` through maxfilter with default options and store the outputs in `/path/to/my/output/dir/` 28 | 29 | ### Customising Options 30 | 31 | Maxfilter processing can be customised using command line flags which are (mostly) mapped to the options in Maxfilter itself. For exmaple, we can specify that `autobad` be included by adding the `--autobad` command line flag. 32 | 33 | ``` 34 | osl_maxfilter input_files.txt /path/to/my/output/dir/ --autobad 35 | ``` 36 | 37 | Similarly, we can include movement compensation and head-position computation by adding their respective options. 38 | 39 | ``` 40 | osl_maxfilter input_files.txt /path/to/my/output/dir/ --autobad --movecomp --headpos 41 | ``` 42 | 43 | Some options take additional arguments. Here we specify that temporal extension SSS should be applied with a 20 second data buffer (using `--tsss` and `-st 20`) and that two specific channels should be removed from the analysis (`--bads 1722 1723`). 44 | 45 | ``` 46 | osl_maxfilter input_files.txt /path/to/my/output/dir/ --movecomp --headpos --tsss --st 20 --bads 1722 1723 47 | ``` 48 | 49 | A [complete list of customisation options](#optional-arguments) is included at the bottom of this page. 50 | 51 | #### Temporal Extension 52 | 53 | The temporal extension can be turned on with the `--tsss` flag, the buffer length and correlation threshold can then be cusomised using the `--st` and `--corr` options. This is slightly different to main maxfilter which only requires you to specify -st to turn on the temporal extension. 54 | 55 | This example specifys a temporal extension with a twenty second buffer window and a correlation threshold of 0.9 56 | 57 | ``` 58 | osl_maxfilter input_files.txt /path/to/my/output/dir/ --movecomp --headpos --tsss --st 20 --corr 0.9 59 | ``` 60 | 61 | #### Position Translation 62 | 63 | There are several ways to customised head position translation to align head position between two recordings. One option is to align both scans to the same pre-specified position. This is done by specifying `--trans` to default and providing a head origin co-ordinate. For example: 64 | 65 | ``` 66 | osl_maxfilter input_files.txt /path/to/my/output/dir/ --trans default --origin 0 0 40 --frame head --force 67 | ``` 68 | 69 | Will move the point 0,0,40 in head space to the device origin and then align the device and head coordinate systems. 70 | 71 | We can also align one scan to match the head position of a reference scan. This is done by specifying the path to a reference fif file in the `--trans` option. For example: 72 | 73 | ``` 74 | osl_maxfilter input_files.txt /path/to/my/output/dir/ --trans /path/to/reference.fif 75 | ``` 76 | 77 | Will align all files with the head position from the `/path/to/reference.fif` file. 78 | 79 | 80 | ### Multistage Options 81 | 82 | More complex maxfilter workflows are implemented as specific 'modes'. Two modes are implemented at the moment. 83 | 84 | #### Multistage 85 | 86 | The multistage maxfilter is selected using `--mode multistage`. This will first run: 87 | 88 | 1) Maxfilter with limited customisation, no movement compensation and autobad on to identify bad channels. 89 | 2) Maxfilter with full customisation and movement compensation with the specific bad channels from stage 1 90 | 3) Optional [position translation](#position-translation) (this requires that the `--trans` options are specified) 91 | 92 | #### CBU 93 | 94 | The CBU maxfilter processing chain is selected using `--mode cbu`. This will first run: 95 | 96 | 1) A custom head-origin co-ordinate is estimated from the headshape points with any nose points removed. 97 | 2) Maxfilter with limited customisation, no movement compensation and autobad on to identify bad channels. 98 | 3) Maxfilter with full customisation and movement compensation with the specific bad channels from stage 1 99 | 4) Position translation to a default head position. 100 | 101 | ### Optional Arguments 102 | 103 | ``` 104 | optional arguments: 105 | -h, --help show this help message and exit 106 | --maxpath MAXPATH Path to maxfilter command to use 107 | --mode MODE Running mode for maxfilter. Either 'standard' or 108 | 'multistage' 109 | --headpos Output additional head movement parameter file 110 | --movecomp Apply movement compensation 111 | --movecompinter Apply movement compensation on data with intermittent 112 | HPI 113 | --autobad Apply automatic bad channel detection 114 | --autobad_dur AUTOBAD_DUR 115 | Set autobad on with a specific duration 116 | --bad BAD [BAD ...] Set specific channels to bad 117 | --badlimit BADLIMIT Set upper limit for number of bad channels to be 118 | removed 119 | --trans TRANS Transforms the data to the head position in defined 120 | file 121 | --origin ORIGIN [ORIGIN ...] 122 | Set specific sphere origin 123 | --frame FRAME Set device/dead co-ordinate frame 124 | --force Ignore program warnings 125 | --tsss Apply temporal extension of maxfilter 126 | --st ST Data buffer length for TSSS processing 127 | --corr CORR Subspace correlation limit for TSSS processing 128 | --inorder INORDER Set the order of the inside expansion 129 | --outorder OUTORDER Set the order of the outside expansion 130 | --hpie HPIE sets the error limit for hpi coil fitting (def 5 mm) 131 | --hpig HPIG ets the g-value limit (goodness-of-fit) for hpi coil 132 | fitting (def 0.98)) 133 | --scanner SCANNER Set CTC and Cal for the OHBA scanner the dataset was 134 | collected with (VectorView, VectorView2 or Neo). This 135 | overrides the --ctc and --cal options. 136 | --ctc CTC Specify cross-talk calibration file 137 | --cal CAL Specify fine-calibration file 138 | --overwrite Overwrite previous output files if they're in the way 139 | --dryrun Don't actually run anything, just print commands that 140 | would have been run 141 | ``` 142 | -------------------------------------------------------------------------------- /osl_ephys/maxfilter/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from .maxfilter import * # noqa: F401, F403 4 | 5 | import logging 6 | osl_logger = logging.getLogger(__name__) 7 | osl_logger.debug('osl-ephys maxfilter init complete') 8 | 9 | with open(os.path.join(os.path.dirname(__file__), "README.md"), 'r') as f: 10 | __doc__ = f.read() 11 | -------------------------------------------------------------------------------- /osl_ephys/preprocessing/README.md: -------------------------------------------------------------------------------- 1 | # OSL Preprocessing tools 2 | 3 | A python batch processing script for preprocessing of MEG files. 4 | 5 | ## Interactive Example 6 | 7 | ``` 8 | import osl 9 | 10 | config = """ 11 | meta: 12 | event_codes: 13 | visual: 1 14 | auditory: 2 15 | button_press: 3 16 | preproc: 17 | - crop: {tmin: 40} 18 | - find_events: {min_duration: 0.005} 19 | - filter: {l_freq: 0.1, h_freq: 175} 20 | - notch_filter: {freqs: 50 100 150} 21 | - bad_channels: {picks: 'mag'} 22 | - bad_channels: {picks: 'grad'} 23 | - bad_channels: {picks: 'eeg'} 24 | - bad_segments: {segment_len: 800, picks: 'mag'} 25 | - bad_segments: {segment_len: 800, picks: 'grad'} 26 | - bad_segments: {segment_len: 800, picks: 'eeg'} 27 | - resample: {sfreq: 400, n_jobs: 6} 28 | - ica_raw: {picks: 'meg', n_components: 64} 29 | - ica_autoreject: {picks: 'meg', ecgmethod: 'correlation'} 30 | """ 31 | 32 | # Output directory 33 | outdir = '/where/do/i/want/my/output_dir' 34 | 35 | # Process a single file 36 | raw_file = '/path/to/file.fif' 37 | 38 | osl_ephys.preprocessing.run_proc_chain(config, raw_file, outdir) # creates /path/to/file_preproc-raw.fif 39 | 40 | # Process a list of files 41 | list_of_raw_files = ['/path/to/file1.fif','/path/to/file2.fif','/path/to/file3.fif'] 42 | 43 | osl_ephys.preprocessing.run_proc_batch(config, list_of_raw_files, outdir, overwrite=True) 44 | ``` 45 | 46 | ### An example with epoching 47 | 48 | ``` 49 | config = """ 50 | meta: 51 | event_codes: 52 | visual: 1 53 | motor_short: 2 54 | motor_long: 3 55 | preproc: 56 | - crop: {tmin: 10} 57 | - set_channel_types: {EEG057: eog, EEG058: eog, EEG059: ecg} 58 | - pick_types: {meg: true, eeg: false, eog: true, 59 | ecg: true, stim: true, ref_meg: false} 60 | - find_events: {min_duration: 0.005} 61 | - filter: {l_freq: 1, h_freq: 175} 62 | - notch_filter: {freqs: 50 100 150} 63 | - bad_channels: {picks: 'meg'} 64 | - bad_segments: {segment_len: 2000, picks: 'meg'} 65 | - epochs: {tmin: -0.3, tmax: 1 } 66 | - tfr_multitaper: {freqs: 4 45 41, n_jobs: 6, return_itc: false, 67 | average: false, use_fft: false, 68 | decim: 3, n_cycles: 2, time_bandwidth: 8} 69 | """ 70 | ``` 71 | 72 | The following code runs the chain on a file: 73 | 74 | ``` 75 | fname = '/path/to/my/dataset.fif' 76 | 77 | osl_ephys.preprocessing.run_proc_chain(config, fname) # creates dataset_preproc-raw.fif and dataset_epo.fif 78 | 79 | # Average the epochs object and visualise a response 80 | epochs = mne.io.read_raw_fif('/path/to/my/dataset_epo.fif') 81 | vis = epochs['visual'].average() 82 | vis.plot_joint() 83 | ``` 84 | 85 | ## Command Line Example 86 | 87 | The command line function osl_preproc is installed with the package. This is a command line interface to run_proc_batch 88 | 89 | ``` 90 | Batch preprocess some fif files. 91 | 92 | positional arguments: 93 | config yaml defining preproc 94 | files plain text file containing full paths to files to be processed 95 | outdir Path to output directory to save data in 96 | 97 | optional arguments: 98 | -h, --help show this help message and exit 99 | --logsdir LOGSDIR Path to logs directory 100 | --reportdir REPORTDIR 101 | Path to report directory 102 | --gen_report GEN_REPORT 103 | Should we generate a report? 104 | --overwrite Overwrite previous output files if they're in the way 105 | --verbose VERBOSE Set the logging level for osl-ephys functions 106 | --mneverbose MNEVERBOSE 107 | Set the logging level for MNE functions 108 | --strictrun Will ask the user for confirmation before starting 109 | ``` 110 | 111 | osl_preproc takes at least 2 arguments: `config` and `files, the rest are optional. For example: 112 | ``` 113 | osl_preproc my_config.yml list_of_raw_files.txt --outdir /path/to/my/output_dir --overwrite 114 | ``` 115 | -------------------------------------------------------------------------------- /osl_ephys/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from . import mne_wrappers # noqa: F401, F403 4 | from . import osl_wrappers # noqa: F401, F403 5 | 6 | from .batch import * # noqa: F401, F403 7 | from .plot_ica import * # noqa: F401, F043 8 | 9 | with open(os.path.join(os.path.dirname(__file__), "README.md"), 'r') as f: 10 | __doc__ = f.read() -------------------------------------------------------------------------------- /osl_ephys/report/README.md: -------------------------------------------------------------------------------- 1 | # OHBA MEG Quality Check 2 | 3 | MNE-Python based tool for generating quality check reports from MEG recordings. 4 | 5 | ## Usage 6 | 7 | These tools can be called from osl-ephys in a script as follows. 8 | 9 | ``` 10 | osl_ephys.report.gen_report_from_fif(list_of_files, outdir='/path/to/save/dir') 11 | ``` 12 | 13 | This function will generate a set of figures for each data file in the input list, save them in the output directory and collate them into a html page for easy viewing. 14 | 15 | 16 | ## Command line usage 17 | 18 | The script can also be run from the command line. 19 | 20 | ``` 21 | usage: osl_report [-h] files [files ...] outdir [OUTDIR] 22 | 23 | Run a quick quality control summary on data. 24 | 25 | positional arguments: 26 | files plain text file containing full paths to files to be processed 27 | outdir OUTDIR Path to output directory to save data in 28 | 29 | optional arguments: 30 | -h, --help show this help message and exit 31 | ``` 32 | -------------------------------------------------------------------------------- /osl_ephys/report/__init__.py: -------------------------------------------------------------------------------- 1 | from .preproc_report import * # noqa: F401, F403 2 | 3 | import logging 4 | osl_logger = logging.getLogger(__name__) 5 | osl_logger.debug('osl-ephys report init complete') 6 | 7 | with open(os.path.join(os.path.dirname(__file__), "README.md"), 'r') as f: 8 | __doc__ = f.read() -------------------------------------------------------------------------------- /osl_ephys/report/templates/raw_summary_panel.html: -------------------------------------------------------------------------------- 1 |
2 |
3 | 4 |
5 |
6 | 7 | 8 | {% if data.batchlog is defined %} 9 | 10 | {% endif %} 11 | {% if data.errlog is defined %} 12 | 13 | {% endif %} 14 |
15 |
16 | 17 |
18 | 19 |
20 |

Config

21 | 22 | {% if data.extra_funcs is defined %} 23 |

Extra functions 24 | 25 | 29 |

30 | 31 | {% endif %} 32 |
33 | 34 |
35 |

Preproc Summary 36 | 37 | 41 |

42 |
43 | {{ data.tbl | safe }} 44 |
45 |
46 | 47 | {% if data.batchlog is defined %} 48 | 52 | {% endif %} 53 | 54 | {% if data.errlog is defined %} 55 | 62 | {% endif %} 63 | 64 |
65 |
66 |
67 | 68 | 97 | 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /osl_ephys/report/templates/subject_report.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | OSL Report: Subjects 5 | 28 | 29 | 30 | 31 | 32 |
33 |

OSL Report: Subjects

34 |
35 | 36 |
37 |
38 |
39 | 40 | 41 | 42 |

43 |
44 |
45 | The panels below summarise each fif file (full list on the right). Use arrow keys to navigate tabs. 46 |
47 |
48 |
49 | {% for panel in panels %} 50 | {{ panel }} 51 | {% endfor %} 52 |
53 |
54 |
55 | {{ filenames }} 56 |
57 |
58 |
59 | 60 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /osl_ephys/report/templates/summary_report.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | OSL Report: Summary 5 | 28 | 29 | 30 | 31 | 32 |
33 |

OSL Report: Summary

34 |
35 | 36 |
37 |
38 | {{ panel }} 39 |
40 | 45 |
46 | 47 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /osl_ephys/source_recon/README.md: -------------------------------------------------------------------------------- 1 | # OSL Source Reconstruction 2 | 3 | Tools for source reconstructing MEEG files. 4 | -------------------------------------------------------------------------------- /osl_ephys/source_recon/__init__.py: -------------------------------------------------------------------------------- 1 | from .batch import * # noqa: F401, F403 2 | from .rhino.fsl_utils import setup_fsl, check_fsl # noqa: F401, F403 3 | from .freesurfer_utils import setup_freesurfer, check_freesurfer, recon_all, make_watershed_bem, make_fsaverage_src # noqa: F401, F403 4 | from .wrappers import find_template_subject # noqa: F401, F403 5 | 6 | with open(os.path.join(os.path.dirname(__file__), "README.md"), 'r') as f: 7 | __doc__ = f.read() -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/Glasser50_space-MNI152NLin6_res-8x8x8.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/Glasser50_space-MNI152NLin6_res-8x8x8.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/HarvOxf-sub-Schaefer100-combined-2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/HarvOxf-sub-Schaefer100-combined-2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/HarvOxf-sub-Schaefer100-combined-2mm_4d.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/HarvOxf-sub-Schaefer100-combined-2mm_4d.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/HarvOxf-sub-Schaefer100-combined-2mm_4d_ds8.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/HarvOxf-sub-Schaefer100-combined-2mm_4d_ds8.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/HarvardOxford-sub-prob-bin-2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/HarvardOxford-sub-prob-bin-2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/MNI152_T1_2mm_brain.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/MNI152_T1_2mm_brain.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/MNI152_T1_8mm_brain.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/MNI152_T1_8mm_brain.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d_ds8.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d_ds8.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/WTA_fMRI_parcellation_ds2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/WTA_fMRI_parcellation_ds2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/WTA_fMRI_parcellation_ds8mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/WTA_fMRI_parcellation_ds8mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/aal_cortical_merged_8mm_stacked.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/aal_cortical_merged_8mm_stacked.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/dk_cortical.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/dk_cortical.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/dk_full.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/dk_full.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fMRI_parcellation_ds2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fMRI_parcellation_ds2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fMRI_parcellation_ds8mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fMRI_parcellation_ds8mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_reduced_2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_reduced_2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_tighterMay15_v2_2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_tighterMay15_v2_2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_tighterMay15_v2_6mm_exclusive.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_tighterMay15_v2_6mm_exclusive.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_tighterMay15_v2_8mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/fmri_d100_parcellation_with_PCC_tighterMay15_v2_8mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/giles_39_binary.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/giles_39_binary.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/reduced_hcp-mmp_2mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/reduced_hcp-mmp_2mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/files/reduced_hcp-mmp_8mm.nii.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/source_recon/files/reduced_hcp-mmp_8mm.nii.gz -------------------------------------------------------------------------------- /osl_ephys/source_recon/freesurfer_utils.py: -------------------------------------------------------------------------------- 1 | """Wrappers for Freesurfer. 2 | 3 | """ 4 | 5 | # Authors: Mats van Es 6 | # Chetan Gohil 7 | 8 | import os 9 | import os.path as op 10 | import shutil 11 | import subprocess 12 | 13 | 14 | from mne import setup_source_space, write_source_spaces, read_source_spaces, bem 15 | from osl_ephys.utils.logger import log_or_print 16 | 17 | def setup_freesurfer(directory, subjects_dir=None): 18 | """Setup FreeSurfer. 19 | 20 | Parameters 21 | ---------- 22 | directory : str 23 | Path to FreeSurfer installation. 24 | """ 25 | 26 | os.environ["FREESURFERDIR"] = directory 27 | 28 | # Define FREESURFER_HOME 29 | os.environ['FREESURFER_HOME'] = directory 30 | 31 | # Source the SetUpFreeSurfer.sh script and capture the output 32 | setup_cmd = f"source {os.environ['FREESURFER_HOME']}/SetUpFreeSurfer.sh && env" 33 | proc = subprocess.Popen(setup_cmd, stdout=subprocess.PIPE, shell=True, executable='/bin/bash') 34 | output, _ = proc.communicate() 35 | 36 | # Update the current environment with the new variables 37 | for line in output.decode().split('\n'): 38 | if '=' in line: 39 | key, value = line.split('=', 1) 40 | os.environ[key] = value 41 | 42 | # check that it contains a license file 43 | if not op.exists(op.join(directory, "license.txt")): 44 | raise RuntimeError(f"Could not find license file in {directory}. Please visit https://surfer.nmr.mgh.harvard.edu/fswiki/License.") 45 | 46 | # Set subjects_dir 47 | if subjects_dir is not None: 48 | os.environ["SUBJECTS_DIR"] = subjects_dir 49 | 50 | 51 | 52 | def check_freesurfer(): 53 | """Check FreeSurfer is installed.""" 54 | if "FREESURFERDIR" not in os.environ: 55 | raise RuntimeError("Please setup FreeSurfer, e.g. with osl_ephys.source_recon.setup_freesurfer().") 56 | 57 | 58 | def get_freesurfer_filenames(subjects_dir, subject): 59 | """Get paths to all FreeSurfer files. 60 | 61 | Files will be in subjects_dir/subject/. 62 | 63 | Parameters 64 | ---------- 65 | subjects_dir : string 66 | Directory containing the subject directories. 67 | subject : string 68 | Subject directory name to put the coregistration files in. 69 | 70 | Returns 71 | ------- 72 | files : dict 73 | A dict of files generated and used by RHINO. Contains three keys: 74 | - 'surf': containing surface extraction file paths. 75 | - 'coreg': containing coregistration file paths. 76 | - 'fwd_model': containing the forward model file path. 77 | """ 78 | 79 | # Base FreeSurfer directory 80 | fs_dir = op.join(subjects_dir, subject) 81 | if " " in fs_dir: 82 | raise ValueError("subjects_dir cannot contain spaces.") 83 | 84 | # Surfaces files 85 | surfaces_dir = op.join(fs_dir, "surfaces") 86 | os.makedirs(surfaces_dir, exist_ok=True) 87 | surf_files = { 88 | "basedir": surfaces_dir, 89 | "smri_file": op.join(surfaces_dir, f"{subject.split('-')[-1]}.mgz"), # TODO: make more robust 90 | "talairach_xform": op.join(surfaces_dir, "tranforms", "talairach.xfm"), 91 | "bem_brain_surf_file": op.join(surfaces_dir, "bem", "brain.surf"), 92 | "bem_scalp_surf_fif": op.join(surfaces_dir, "bem", f"{subject}-head.fif"), 93 | "bem_inner_skull_surf_file": op.join(surfaces_dir, "bem", "inner_skull.surf"), 94 | "bem_outer_skull_surf_file": op.join(surfaces_dir, "bem", "outer_skull.surf"), 95 | "bem_outer_skin_surf_file": op.join(surfaces_dir, "bem", "outer_skin.surf"), 96 | "bem_ws_brain_surf_file": op.join(surfaces_dir, "bem", "watershed", f"{subject}_brain_surface"), 97 | "bem_ws_inner_skull_surf_file": op.join(surfaces_dir, "bem", "watershed", f"{subject}_inner_skull_surface"), 98 | "talairach_xform": op.join(surfaces_dir, "tranforms", "talairach.xfm"), 99 | "std_brain_dir": op.join(os.environ["FREESURFER_HOME"], "subjects", "fsaverage"), 100 | "std_brain_mri": op.join(os.environ["FREESURFER_HOME"], "subjects", "fsaverage", "mri", "T1.mgz"), 101 | "completed": op.join(surfaces_dir, "completed.txt"), 102 | } 103 | 104 | # Coregistration files 105 | coreg_dir = op.join(fs_dir, "coreg") 106 | os.makedirs(coreg_dir, exist_ok=True) 107 | coreg_files = { 108 | "basedir": coreg_dir, 109 | "info_fif_file": op.join(coreg_dir, "info-raw.fif"), 110 | "source_space": op.join(coreg_dir, "space-src.fif"), 111 | "source_space-morph": op.join(coreg_dir, "space-src-morph.fif"), 112 | "coreg_trans": op.join(coreg_dir, "coreg-trans.fif"), 113 | "coreg_html": op.join(coreg_dir, "coreg.html"), 114 | } 115 | 116 | # Forward model filename 117 | fwd = op.join(fs_dir, "model-fwd.fif") 118 | 119 | # All Freesurfer files 120 | files = {"surf": surf_files, "coreg": coreg_files, "fwd_model": fwd} 121 | 122 | return files 123 | 124 | 125 | def get_coreg_filenames(subjects_dir, subject): 126 | """Files used in coregistration by FreeSurfer. 127 | 128 | Files will be in subjects_dir/subject/. 129 | 130 | Parameters 131 | ---------- 132 | subjects_dir : string 133 | Directory containing the subject directories. 134 | subject : string 135 | Subject directory name to put the coregistration files in. 136 | 137 | Returns 138 | ------- 139 | filenames : dict 140 | A dict of files generated and used by FreeSurfer. 141 | """ 142 | fs_files = get_freesurfer_filenames(subjects_dir, subject) 143 | return fs_files["coreg"] 144 | 145 | 146 | def recon_all(smri_file, subjects_dir, subject): 147 | 148 | os.environ["SUBJECTS_DIR"] = subjects_dir 149 | 150 | move_flag = False 151 | if op.exists(op.join(subjects_dir, subject)): 152 | log_or_print(f'Temporarily saving data to {op.join(subjects_dir, subject + "_freesurfer_temp")} because subject {subject} already exists') 153 | cmd = ['recon-all', '-i', smri_file, '-s', subject + '_freesurfer_temp', '-all'] 154 | move_flag = True 155 | else: 156 | cmd = ['recon-all', '-i', smri_file, '-s', subject, '-all'] 157 | 158 | try: 159 | subprocess.run(cmd, check=True, env=os.environ) 160 | log_or_print(f"recon-all completed successfully for subject {subject}") 161 | except subprocess.CalledProcessError as e: 162 | log_or_print(f"Error running recon-all for subject {subject}: {e}") 163 | 164 | if move_flag: 165 | log_or_print(f'Moving data from {op.join(subjects_dir, subject + "_freesurfer_temp")} to {op.join(subjects_dir, subject)}') 166 | os.rename(op.join(subjects_dir, subject + "_freesurfer_temp"), op.join(subjects_dir, subject)) 167 | 168 | 169 | def make_watershed_bem(outdir, subject, **kwargs): 170 | """Wrapper for :py:func:`mne.bem.make_watershed_bem ` making a watershed BEM with FreeSurfer.""" 171 | 172 | check_freesurfer() 173 | 174 | bem.make_watershed_bem( 175 | subject=subject, 176 | subjects_dir=outdir, 177 | **kwargs 178 | ) 179 | 180 | 181 | def make_fsaverage_src(subjects_dir, spacing='oct6'): 182 | 183 | subject = 'fsaverage' 184 | src_fname = get_coreg_filenames(subjects_dir, subject)['source_space'] 185 | 186 | if not op.exists(src_fname): 187 | # need to copy fsaverage from the freesurfer directory to the subjects_dir, because we can't write in the FS dir. 188 | os.makedirs(op.join(subjects_dir, subject), exist_ok=True) 189 | shutil.copytree(op.join(os.environ["FREESURFERDIR"], 'subjects', 'fsaverage'), op.join(subjects_dir, subject), dirs_exist_ok=True) 190 | 191 | src = setup_source_space( 192 | subjects_dir=subjects_dir, 193 | subject=subject, 194 | spacing=spacing, 195 | add_dist="patch", 196 | ) 197 | write_source_spaces(src_fname, src) -------------------------------------------------------------------------------- /osl_ephys/source_recon/nii.py: -------------------------------------------------------------------------------- 1 | """Utility functions to work with parcellation niftii files. 2 | 3 | Example code 4 | ------------ 5 | 6 | import os 7 | import os.path as op 8 | from osl_ephys.source_recon import nii 9 | 10 | workingdir = '/Users/woolrich/osl/osl/source_recon/parcellation/files/' 11 | parc_name = 'Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm' 12 | 13 | os.system('fslmaths /Users/woolrich/Downloads/{} {}'.format(parc_name, workingdir)) 14 | 15 | tmpdir = op.join(workingdir, 'tmp') 16 | os.mkdir(tmpdir) 17 | parcel3d_fname = op.join(workingdir, parc_name + '.nii.gz') 18 | parcel4d_fname = op.join(workingdir, parc_name + '_4d.nii.gz') 19 | nii.convert_3dparc_to_4d(parcel3d_fname, parcel4d_fname, tmpdir, 100) 20 | 21 | mni_file = '/Users/woolrich/osl/osl/source_recon/parcellation/files/MNI152_T1_8mm_brain.nii.gz' 22 | spatial_res = 8 # mm 23 | parcel4d_ds_fname = op.join(workingdir, parc_name + '_4d_ds' + str(spatial_res) + '.nii.gz') 24 | nii.spatially_downsample(parcel4d_fname, parcel4d_ds_fname, mni_file, spatial_res) 25 | 26 | os.system('fslmaths /usr/local/fsl/data/atlases/HarvardOxford/HarvardOxford-sub-prob-2mm.nii.gz -thr 50 -bin /Users/woolrich/osl/osl/source_recon/parcellation/files/HarvardOxford-sub-prob-bin-2mm.nii.gz') 27 | 28 | file_in = '/Users/woolrich/osl/osl/source_recon/parcellation/files/Schaefer2018_100Parcels_7Networks_order_FSLMNI152_2mm_4d.nii.gz' 29 | file_out = '/Users/woolrich/osl/osl/source_recon/parcellation/files/HarvOxf-sub-Schaefer100-combined-2mm_4d.nii.gz' 30 | file_append = '/Users/woolrich/osl/osl/source_recon/parcellation/files/HarvardOxford-sub-prob-bin-2mm.nii.gz' 31 | parcel_indices = [3,4,5,6,8,9,10,14,15,16,17,18,19,20] # index from 0 32 | nii.append_4d_parcellation(file_in, file_out, file_append, parcel_indices) 33 | 34 | parc_name = '/Users/woolrich/osl/osl/source_recon/parcellation/files/HarvOxf-sub-Schaefer100-combined-2mm_4d' 35 | parcel4d_fname = op.join(parc_name + '.nii.gz') 36 | mni_file = '/Users/woolrich/osl/osl/source_recon/parcellation/files/MNI152_T1_8mm_brain.nii.gz' 37 | spatial_res = 8 # mm 38 | parcel4d_ds_fname = op.join(parc_name + '_ds' + str(spatial_res) + '.nii.gz') 39 | nii.spatially_downsample(parcel4d_fname, parcel4d_ds_fname, mni_file, spatial_res) 40 | 41 | 42 | fslmaths /Users/woolrich/osl/osl/source_recon/parcellation/files/HarvOxf-sub-Schaefer100-combined-2mm_4d.nii.gz -Tmaxn /Users/woolrich/osl/osl/source_recon/parcellation/files/HarvOxf-sub-Schaefer100-combined-2mm.nii.gz 43 | """ 44 | 45 | # Authors: Mark Woolrich 46 | 47 | import os 48 | import os.path as op 49 | import nibabel as nib 50 | import numpy as np 51 | 52 | 53 | def convert_4dparc_to_3d(parcel4d_fname, parcel3d_fname): 54 | """Convert 4D parcellation to 3D. 55 | 56 | Parameters 57 | ---------- 58 | parcel4d_fname : str 59 | 4D nifii file, where each volume is a parcel 60 | parcel3d_fname : str 61 | 3D nifii output fule with each voxel with a value of 0 if not in a parcel, 62 | or 1...p...n_parcels if in parcel p 63 | """ 64 | os.system("fslmaths {} -Tmaxn -add 1 {}".format(parcel4d_fname, parcel3d_fname)) 65 | 66 | 67 | def convert_3dparc_to_4d(parcel3d_fname, parcel4d_fname, tmpdir, n_parcels): 68 | """Convert 3D parcellation to 4D. 69 | 70 | Parameters 71 | ---------- 72 | parcel3d_fname : str 73 | 3D nifii volume with each voxel with a value of 0 if not in a parcel, 74 | or 1...p...n_parcels if in parcel p 75 | parcel4d_fname : str 76 | 4D nifii output file, where each volume is a parcel 77 | tmpdir : str 78 | temp dir to write to. Must exist. 79 | n_parcels 80 | Number of parcels 81 | """ 82 | os.system("rm -f {}".format(parcel4d_fname)) 83 | 84 | vol_list_str = " " 85 | for pp in range(n_parcels): 86 | print(pp) 87 | vol_fname = op.join(tmpdir, "parc3d_vol" + str(pp) + ".nii.gz") 88 | os.system("fslmaths {} -thr {} -uthr {} -min 1 {}".format(parcel3d_fname, pp + 0.5, pp + 1.5, vol_fname)) 89 | vol_list_str = vol_list_str + "{} ".format(vol_fname) 90 | 91 | os.system("fslmerge -t {} {}".format(parcel4d_fname, vol_list_str)) 92 | 93 | 94 | def spatially_downsample(file_in, file_out, file_ref, spatial_res): 95 | """Downsample niftii file file_in spatially and writes it to file_out 96 | 97 | Parameters 98 | ---------- 99 | file_in: str 100 | file_out: str 101 | file_ref: str 102 | reference niftii volume at resolution spatial_res 103 | spatial_res 104 | new spatial res in mm 105 | 106 | """ 107 | os.system("flirt -in {} -ref {} -out {} -applyisoxfm {}".format(file_in, file_ref, file_out, spatial_res)) 108 | 109 | 110 | def append_4d_parcellation(file_in, file_out, file_append, parcel_indices=None): 111 | """Appends volumes in file_append to file_in. 112 | 113 | Parameters 114 | ---------- 115 | file_in : str 116 | file_out : str 117 | file_append : str 118 | parcel_indices : np.ndarray 119 | (n_indices) numpy array containing volume indices (starting from 0) of volumes from file_append to append to file_in 120 | """ 121 | if parcel_indices is None: 122 | nparcels = nib.load(file_append).get_fdata().shape[3] 123 | parcel_indices = np.arange(nparcels) 124 | 125 | vol_list_str = "" 126 | for pp in parcel_indices: 127 | print(pp) 128 | vol_list_str = vol_list_str + "{},".format(pp) 129 | 130 | os.system("fslselectvols -i {} -o {} --vols={}".format(file_append, file_out, vol_list_str)) 131 | os.system("fslmerge -t {} {} {}".format(file_out, file_in, file_out)) -------------------------------------------------------------------------------- /osl_ephys/source_recon/rhino/__init__.py: -------------------------------------------------------------------------------- 1 | """Registration of Headshapes Including Nose in OSL (RHINO). 2 | 3 | """ 4 | 5 | from .coreg import * # noqa: F401, F403 6 | from .polhemus import * # noqa: F401, F403 7 | from .forward_model import * # noqa: F401, F403 8 | from .surfaces import * # noqa: F401, F403 9 | from .fsl_utils import * # noqa: F401, F403 10 | from . import utils # noqa: F401, F403 11 | -------------------------------------------------------------------------------- /osl_ephys/source_recon/rhino/fsl_utils.py: -------------------------------------------------------------------------------- 1 | """Wrappers for fsleyes. 2 | 3 | """ 4 | 5 | # Authors: Mark Woolrich 6 | # Chetan Gohil 7 | 8 | import os 9 | import os.path as op 10 | 11 | import nibabel as nib 12 | 13 | import osl_ephys.source_recon.rhino.utils as rhino_utils 14 | 15 | 16 | def setup_fsl(directory): 17 | """Setup FSL. 18 | 19 | Parameters 20 | ---------- 21 | directory : str 22 | Path to FSL installation. 23 | """ 24 | if "FSLDIR" not in os.environ: 25 | os.environ["FSLDIR"] = directory 26 | if "{:s}/bin" not in os.getenv("PATH"): 27 | os.environ["PATH"] = "{:s}/bin:{:s}".format(directory, os.getenv("PATH")) 28 | if "FSLOUTPUTTYPE" not in os.environ: 29 | os.environ["FSLOUTPUTTYPE"] = "NIFTI_GZ" 30 | 31 | 32 | def check_fsl(): 33 | """Check FSL is installed.""" 34 | if "FSLDIR" not in os.environ: 35 | raise RuntimeError("Please setup FSL, e.g. with osl_ephys.source_recon.setup_fsl().") 36 | 37 | 38 | def fsleyes(image_list): 39 | """Displays list of niftii's using external command line call to fsleyes. 40 | 41 | Parameters 42 | ---------- 43 | image_list : string | tuple of strings 44 | Niftii filenames or tuple of niftii filenames 45 | 46 | Examples 47 | -------- 48 | fsleyes(image) 49 | fsleyes((image1, image2)) 50 | """ 51 | 52 | # Check if image_list is a single file name 53 | if isinstance(image_list, str): 54 | image_list = (image_list,) 55 | 56 | cmd = "fsleyes " 57 | for img in image_list: 58 | cmd += img 59 | cmd += " " 60 | cmd += "&" 61 | 62 | rhino_utils.system_call(cmd, verbose=True) 63 | 64 | 65 | def fsleyes_overlay(background_img, overlay_img): 66 | """Displays overlay_img and background_img using external command line call to fsleyes. 67 | 68 | Parameters 69 | ---------- 70 | background_img : string 71 | Background niftii filename 72 | overlay_img : string 73 | Overlay niftii filename 74 | """ 75 | if type(background_img) is str: 76 | if background_img == "mni": 77 | mni_resolution = int(nib.load(overlay_img).header.get_zooms()[0]) 78 | background_img = op.join(os.environ["FSLDIR"], "data/standard/MNI152_T1_{}mm_brain.nii.gz".format(mni_resolution)) 79 | elif background_img[0:3] == "mni": 80 | mni_resolution = int(background_img[3]) 81 | background_img = op.join(os.environ["FSLDIR"], "data/standard/MNI152_T1_{}mm_brain.nii.gz".format(mni_resolution)) 82 | 83 | cmd = "fsleyes {} --volume 0 {} --alpha 100.0 --cmap red-yellow --negativeCmap blue-lightblue --useNegativeCmap &".format(background_img, overlay_img) 84 | rhino_utils.system_call(cmd) 85 | -------------------------------------------------------------------------------- /osl_ephys/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/tests/__init__.py -------------------------------------------------------------------------------- /osl_ephys/tests/test_00_package_canary.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | class TestModuleStructure(unittest.TestCase): 5 | 6 | def test_module_structure(self): 7 | 8 | try: 9 | from .. import utils 10 | except ImportError: 11 | raise Exception("Unable to import 'utils'") 12 | 13 | try: 14 | from .. import maxfilter 15 | except ImportError: 16 | raise Exception("Unable to import 'maxfilter'") 17 | 18 | try: 19 | from .. import preprocessing 20 | except ImportError: 21 | raise Exception("Unable to import 'preprocessing'") 22 | 23 | try: 24 | from .. import report 25 | except ImportError: 26 | raise Exception("Unable to import 'report'") 27 | 28 | try: 29 | from .. import source_recon 30 | except ImportError: 31 | raise Exception("Unable to import 'source_recon'") 32 | 33 | 34 | class TestPackageData(unittest.TestCase): 35 | 36 | @classmethod 37 | def setUpClass(cls): 38 | 39 | cls.testdir = os.path.dirname(os.path.realpath(__file__)) 40 | cls.osldir = os.path.abspath(os.path.join(cls.testdir, '..')) 41 | 42 | def test_simulatons_data(self): 43 | 44 | template = os.path.join(self.osldir, 'utils', 'simulation_config', 'megin_template_info.fif') 45 | assert(os.path.exists(template)) 46 | 47 | for ff in ['reduced_mvar_params_mag.npy', 'reduced_mvar_residcov_mag.npy', 48 | 'reduced_mvar_pcacomp_mag.npy', 'reduced_mvar_params_grad.npy', 49 | 'reduced_mvar_residcov_grad.npy', 'reduced_mvar_pcacomp_grad.npy']: 50 | 51 | template = os.path.join(self.osldir, 'utils', 'simulation_config', ff) 52 | assert(os.path.exists(template)) 53 | 54 | def test_channel_data(self): 55 | 56 | template = os.path.join(self.osldir, 'utils', 'neuromag306_info.yml') 57 | assert(os.path.exists(template)) 58 | 59 | def test_parcellation_data(self): 60 | 61 | to_check = ['WTA_fMRI_parcellation_ds2mm.nii.gz', 62 | 'WTA_fMRI_parcellation_ds8mm.nii.gz', 63 | 'dk_cortical.nii.gz', 64 | 'dk_full.nii.gz', 65 | 'fMRI_parcellation_ds2mm.nii.gz', 66 | 'fMRI_parcellation_ds8mm.nii.gz', 67 | 'fmri_d100_parcellation_with_PCC_reduced_2mm.nii.gz', 68 | 'fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz', 69 | 'fmri_d100_parcellation_with_PCC_tighterMay15_v2_2mm.nii.gz', 70 | 'fmri_d100_parcellation_with_PCC_tighterMay15_v2_6mm_exclusive.nii.gz', 71 | 'fmri_d100_parcellation_with_PCC_tighterMay15_v2_8mm.nii.gz', 72 | 'giles_39_binary.nii.gz'] 73 | 74 | for ff in to_check: 75 | template = os.path.join(self.osldir, 'source_recon', 'files', ff) 76 | assert(os.path.exists(template)) 77 | -------------------------------------------------------------------------------- /osl_ephys/tests/test_batch_api.py: -------------------------------------------------------------------------------- 1 | """Tests for passing arguments into batch preprocessing.""" 2 | 3 | import unittest 4 | 5 | import numpy as np 6 | 7 | class TestFunctionFinding(unittest.TestCase): 8 | 9 | def test_find_func_in_mne_wrapper(selF): 10 | from ..preprocessing import find_func 11 | from ..preprocessing import mne_wrappers as wrappers 12 | 13 | # Check we're finding some common functions 14 | ff = find_func('notch_filter') 15 | assert(ff == wrappers.run_mne_notch_filter) 16 | 17 | ff = find_func('resample') 18 | assert(ff == wrappers.run_mne_resample) 19 | 20 | ff = find_func('pick_channels') 21 | assert(ff == wrappers.run_mne_pick_channels) 22 | 23 | ff = find_func('pick_types') 24 | assert(ff == wrappers.run_mne_pick_types) 25 | 26 | 27 | def test_find_func_in_mne_object(self): 28 | import functools 29 | from ..preprocessing import find_func 30 | from ..preprocessing import mne_wrappers as wrappers 31 | 32 | # Make sure we have properly set up partial functions based on 33 | # run_mne_anonymous 34 | 35 | ff = find_func('close') 36 | assert(isinstance(ff, functools.partial)) 37 | assert(ff.func == wrappers.run_mne_anonymous) 38 | assert('method' in ff.keywords.keys()) 39 | assert(ff.keywords['method'] == 'close') 40 | 41 | ff = find_func('copy') 42 | assert(isinstance(ff, functools.partial)) 43 | assert(ff.func == wrappers.run_mne_anonymous) 44 | assert('method' in ff.keywords.keys()) 45 | assert(ff.keywords['method'] == 'copy') 46 | 47 | ff = find_func('savgol_filter') 48 | assert(isinstance(ff, functools.partial)) 49 | assert(ff.func == wrappers.run_mne_anonymous) 50 | assert('method' in ff.keywords.keys()) 51 | assert(ff.keywords['method'] == 'savgol_filter') 52 | 53 | 54 | def test_find_func_in_osl_wrapper(self): 55 | from ..preprocessing import find_func 56 | from ..preprocessing.osl_wrappers import run_osl_bad_segments, run_osl_bad_channels 57 | 58 | # Check we can find OSL wrapper functions - only 2... 59 | ff = find_func('bad_segments') 60 | assert(ff == run_osl_bad_segments) 61 | 62 | ff = find_func('bad_channels') 63 | assert(ff == run_osl_bad_channels) 64 | 65 | 66 | def test_find_func_from_userlist(self): 67 | from ..preprocessing import find_func 68 | from ..preprocessing import print_custom_func_info 69 | 70 | # Check that user func is found first 71 | def filter(x, u): 72 | return x 73 | 74 | ff = find_func('filter', extra_funcs=[filter]) 75 | assert(ff(1, None) == 1) 76 | -------------------------------------------------------------------------------- /osl_ephys/tests/test_batch_preproc.py: -------------------------------------------------------------------------------- 1 | """Tests for running batch preprocessing - doesn't check that it runs properly, 2 | just that it runs....""" 3 | 4 | import unittest 5 | import tempfile 6 | import os 7 | 8 | import mne 9 | import numpy as np 10 | 11 | class TestPreprocessingChain(unittest.TestCase): 12 | 13 | @classmethod 14 | def setUpClass(cls): 15 | from ..utils import simulate_raw_from_template 16 | 17 | cls.flat_channels = [10] 18 | cls.bad_channels = [5, 200] 19 | cls.bad_segments = [(600, 750)] 20 | 21 | cls.raw = simulate_raw_from_template(5000, 22 | flat_channels=cls.flat_channels, 23 | bad_channels=cls.bad_channels, 24 | bad_segments=cls.bad_segments) 25 | 26 | cls.fpath = tempfile.NamedTemporaryFile().name + 'raw.fif' 27 | cls.raw.save(cls.fpath) 28 | 29 | @classmethod 30 | def tearDownClass(cls): 31 | os.remove(cls.fpath) 32 | 33 | def test_simple_chain(self): 34 | from ..preprocessing import run_proc_chain 35 | 36 | cfg = """ 37 | meta: 38 | event_codes: 39 | preproc: 40 | - filter: {l_freq: 1, h_freq: 30} 41 | - notch_filter: {freqs: 50} 42 | - bad_channels: {picks: 'grad'} 43 | - bad_segments: {segment_len: 800, picks: 'grad'} 44 | """ 45 | 46 | dataset = run_proc_chain(cfg, self.fpath) 47 | 48 | # Just testing that things run not that the outputs are sensible... 49 | assert(isinstance(dataset["raw"], mne.io.fiff.raw.Raw)) 50 | 51 | 52 | class TestVersions(unittest.TestCase): 53 | def test_simple_chain(self): 54 | from ..preprocessing import load_config, check_config_versions 55 | 56 | cfg = """ 57 | meta: 58 | event_codes: 59 | version_assert: 60 | version_warn: 61 | preproc: 62 | - filter: {l_freq: 1, h_freq: 30} 63 | - notch_filter: {freqs: 50} 64 | - bad_channels: {picks: 'grad'} 65 | - bad_segments: {segment_len: 800, picks: 'grad'} 66 | """ 67 | config = load_config(cfg) 68 | 69 | config['meta']['version_assert'] = ['numpy>1.0', 'scipy>1.0'] 70 | config['meta']['version_warn'] = ['mne>1.0'] 71 | 72 | check_config_versions(config) 73 | 74 | 75 | class TestPreprocessingBatch(unittest.TestCase): 76 | 77 | @classmethod 78 | def setUpClass(cls): 79 | from ..utils import simulate_raw_from_template 80 | 81 | cls.infiles = [] 82 | 83 | # First file normal 84 | cls.raw = simulate_raw_from_template(5000) 85 | cls.fpath = tempfile.NamedTemporaryFile().name + 'raw.fif' 86 | cls.raw.save(cls.fpath) 87 | cls.infiles.append(cls.fpath) 88 | 89 | # Second file doesn't exist 90 | cls.fpath = tempfile.NamedTemporaryFile().name + 'raw.fif' 91 | cls.infiles.append(cls.fpath) 92 | 93 | # Third file normal 94 | cls.raw = simulate_raw_from_template(5000) 95 | cls.fpath = tempfile.NamedTemporaryFile().name + 'raw.fif' 96 | cls.raw.save(cls.fpath) 97 | cls.infiles.append(cls.fpath) 98 | 99 | @classmethod 100 | def tearDownClass(cls): 101 | for fpath in cls.infiles: 102 | if os.path.exists(fpath): 103 | os.remove(fpath) 104 | 105 | def test_simple_batch(self): 106 | from ..preprocessing import run_proc_batch 107 | 108 | cfg = """ 109 | meta: 110 | event_codes: 111 | preproc: 112 | - bad_channels: {picks: 'grad'} 113 | - bad_segments: {segment_len: 800, picks: 'grad'} 114 | """ 115 | 116 | # Normal run 117 | td = tempfile.TemporaryDirectory() 118 | goods = run_proc_batch(cfg, self.infiles, outdir=td.name) 119 | 120 | assert(np.all(goods == np.array([1, 0, 1]))) 121 | 122 | 123 | def test_dask_batch(self): 124 | from ..preprocessing import run_proc_batch 125 | from dask.distributed import Client 126 | 127 | cfg = """ 128 | meta: 129 | event_codes: 130 | preproc: 131 | - bad_channels: {picks: 'grad'} 132 | - bad_segments: {segment_len: 800, picks: 'grad'} 133 | """ 134 | 135 | client = Client(n_workers=2, threads_per_worker=1) 136 | td = tempfile.TemporaryDirectory() 137 | 138 | goods = run_proc_batch(cfg, self.infiles, 139 | outdir=td.name, 140 | dask_client=True) 141 | 142 | assert(np.all(goods == np.array([1, 0, 1]))) 143 | 144 | client.shutdown() 145 | -------------------------------------------------------------------------------- /osl_ephys/tests/test_glm.py: -------------------------------------------------------------------------------- 1 | """Tests for glm_spectrum and glm_epochs""" 2 | 3 | import unittest 4 | import tempfile 5 | import os 6 | 7 | import mne 8 | import numpy as np 9 | 10 | 11 | class TestGLMSpectrum(unittest.TestCase): 12 | 13 | @classmethod 14 | def setUpClass(cls): 15 | from ..utils import simulate_raw_from_template 16 | 17 | cls.flat_channels = None 18 | cls.bad_channels = None 19 | cls.bad_segments = None 20 | 21 | cls.raw = simulate_raw_from_template(500, 22 | flat_channels=cls.flat_channels, 23 | bad_channels=cls.bad_channels, 24 | bad_segments=cls.bad_segments) 25 | 26 | cls.fpath = tempfile.NamedTemporaryFile().name + 'raw.fif' 27 | cls.raw.save(cls.fpath) 28 | 29 | @classmethod 30 | def tearDownClass(cls): 31 | os.remove(cls.fpath) 32 | 33 | def test_glm_spectrum(self): 34 | from ..glm import glm_spectrum 35 | 36 | spec = glm_spectrum(self.raw) 37 | 38 | def test_glm_irasa(self): 39 | from ..glm import glm_irasa 40 | 41 | aper, osc = glm_irasa(self.raw) 42 | -------------------------------------------------------------------------------- /osl_ephys/tests/test_parallel.py: -------------------------------------------------------------------------------- 1 | """Tests for passing arguments into batch preprocessing.""" 2 | 3 | import unittest 4 | 5 | import numpy as np 6 | from dask.distributed import Client, default_client 7 | 8 | 9 | class TestSimpleDask(unittest.TestCase): 10 | 11 | @classmethod 12 | def setUpClass(cls): 13 | client = Client(n_workers=2, threads_per_worker=1) 14 | 15 | @classmethod 16 | def tearDownClass(cls): 17 | client = default_client() 18 | client.shutdown() 19 | 20 | def test_simple_func(selF): 21 | from ..utils.parallel import dask_parallel_bag 22 | 23 | def add_five(x): 24 | return x + 5 25 | 26 | result = dask_parallel_bag(add_five, np.arange(5)) 27 | assert(np.all(result == np.arange(5)+5)) 28 | 29 | def test_simple_func_multiple_inputs(selF): 30 | from ..utils.parallel import dask_parallel_bag 31 | 32 | def multiply(x, y): 33 | return x * y 34 | 35 | inputs = [(a, a) for a in np.arange(5)] 36 | 37 | result = dask_parallel_bag(multiply, inputs) 38 | assert(np.all(result == np.array([0, 1, 4, 9, 16]))) 39 | 40 | def test_simple_func_with_fixed_args(self): 41 | from ..utils.parallel import dask_parallel_bag 42 | 43 | def raise_to_power(x, power): 44 | return x**power 45 | 46 | result = dask_parallel_bag(raise_to_power, np.arange(5), 47 | func_args=[2]) 48 | assert(np.all(result == np.array([0, 1, 4, 9, 16]))) 49 | 50 | result = dask_parallel_bag(raise_to_power, np.arange(5), 51 | func_args=[4]) 52 | assert(np.all(result == np.array([0, 1, 16, 81, 256]))) 53 | 54 | def test_simple_func_with_fixed_kwargs(self): 55 | from ..utils.parallel import dask_parallel_bag 56 | 57 | def raise_to_power(x, power=2): 58 | return x**power 59 | 60 | func_kwargs={'power': 2} 61 | result = dask_parallel_bag(raise_to_power, np.arange(5), 62 | func_kwargs=func_kwargs) 63 | assert(np.all(result == np.array([0, 1, 4, 9, 16]))) 64 | 65 | func_kwargs={'power': 4} 66 | result = dask_parallel_bag(raise_to_power, np.arange(5), 67 | func_kwargs=func_kwargs) 68 | assert(np.all(result == np.array([0, 1, 16, 81, 256]))) 69 | 70 | def test_simple_func_with_everything(self): 71 | from ..utils.parallel import dask_parallel_bag 72 | 73 | def multiply_and_raise_to_power(x, y, const, power=2): 74 | return (x * y)**power + const 75 | 76 | inputs = [(a, a+2) for a in np.arange(5)] 77 | 78 | func_kwargs={'power': 2} 79 | result = dask_parallel_bag(multiply_and_raise_to_power, inputs, 80 | func_args=[5], 81 | func_kwargs=func_kwargs) 82 | assert(np.all(result == np.array([5, 14, 69, 230, 581]))) 83 | -------------------------------------------------------------------------------- /osl_ephys/utils/README.md: -------------------------------------------------------------------------------- 1 | # OSL Utilities 2 | 3 | OSL Utility functions 4 | -------------------------------------------------------------------------------- /osl_ephys/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from . import simulate ## noqa: F401, F403 4 | from . import logger ## noqa: F401, F403 5 | from .study import Study # noqa: F401, F403 6 | from .file_handling import * # noqa: F401, F403 7 | from .spmio import SPMMEEG # noqa: F401, F403 8 | from .parallel import dask_parallel_bag # noqa: F401, F403 9 | from .simulate import * # noqa: F401, F403 10 | from .opm import * # noqa: F401, F403 11 | from .package import soft_import, run_package_tests # noqa: F401, F403 12 | from .version_utils import check_version # noqa: F401, F403 13 | from . import run_func # noqa: F401, F403 14 | 15 | with open(os.path.join(os.path.dirname(__file__), "README.md"), 'r') as f: 16 | __doc__ = f.read() 17 | -------------------------------------------------------------------------------- /osl_ephys/utils/logger.py: -------------------------------------------------------------------------------- 1 | """Logging module for OSL 2 | 3 | Heavily inspired by logging in OSL. 4 | """ 5 | 6 | # Authors: Andrew Quinn 7 | # Chetan Gohil 8 | 9 | import yaml 10 | import logging 11 | 12 | # Initialise logging for this sub-module 13 | osl_logger = logging.getLogger("osl_ephys") 14 | osl_logger.setLevel(logging.WARNING) 15 | 16 | #%% ------------------------------------------------------------ 17 | 18 | 19 | default_config = """ 20 | version: 1 21 | loggers: 22 | osl_ephys: 23 | level: DEBUG 24 | handlers: [console, file] 25 | propagate: false 26 | 27 | handlers: 28 | console: 29 | class : logging.StreamHandler 30 | formatter: brief 31 | level : DEBUG 32 | stream : ext://sys.stdout 33 | file: 34 | class : logging.handlers.RotatingFileHandler 35 | formatter: verbose 36 | filename: {log_file} 37 | backupCount: 3 38 | maxBytes: 102400 39 | 40 | formatters: 41 | brief: 42 | format: '{prefix} %(message)s' 43 | default: 44 | format: '[%(asctime)s] {prefix} %(levelname)-8s : %(message)s' 45 | datefmt: '%H:%M:%S' 46 | verbose: 47 | format: '[%(asctime)s] {prefix} - %(levelname)s - osl-ephys.%(module)s:%(lineno)s : %(message)s' 48 | datefmt: '%Y-%m-%d %H:%M:%S' 49 | 50 | disable_existing_loggers: true 51 | 52 | """ 53 | 54 | 55 | def set_up(prefix='', log_file=None, level=None, console_format=None, startup=True): 56 | """Initialise the osl-ephys module osl_logger. 57 | 58 | Parameters 59 | ---------- 60 | prefix : str 61 | Optional prefix to attach to osl_logger output 62 | log_file : str 63 | Optional path to a log file to record osl_logger output 64 | level : {'CRITICAL', 'WARNING', 'INFO', 'DEBUG'} 65 | String indicating initial logging level 66 | console_format : str 67 | Formatting string for console logging. 68 | 69 | """ 70 | # Format config with user options 71 | if (len(prefix) > 0) and (console_format != 'verbose'): 72 | prefix = prefix + ' :' 73 | new_config = default_config.format(prefix=prefix, log_file=log_file) 74 | # Load config to dict 75 | new_config = yaml.load(new_config, Loader=yaml.FullLoader) 76 | 77 | # Remove log file from dict if not user requested 78 | if log_file is None: 79 | new_config['loggers']['osl_ephys']['handlers'] = ['console'] 80 | del new_config['handlers']['file'] 81 | 82 | # Configure osl_logger with dict 83 | logging.config.dictConfig(new_config) 84 | #osl_logger.config.dictConfig(new_config) 85 | 86 | # Customise options 87 | if level is not None: 88 | set_level(level) 89 | if console_format is not None: 90 | set_format(formatter=console_format, prefix=prefix) 91 | 92 | if startup: 93 | # Say hello 94 | osl_logger.info('osl-ephys Logger Started') 95 | 96 | # Print some info 97 | if log_file is not None: 98 | osl_logger.info('logging to file: {0}'.format(log_file)) 99 | 100 | # Attribute to let us know if we have setup the OSL logger 101 | osl_logger.already_setup = True 102 | 103 | 104 | def set_level(level, handler='console'): 105 | """Set new logging level for osl-ephys module. 106 | 107 | Parameters 108 | ---------- 109 | level : {'CRITICAL', 'WARNING', 'INFO', 'DEBUG'} 110 | String indicating new logging level 111 | handler : str 112 | The handler to set the level for. Defaults to 'console'. 113 | """ 114 | osl_logger = logging.getLogger('osl') 115 | for handler in osl_logger.handlers: 116 | if handler.get_name() == 'console': 117 | if level in ['INFO', 'DEBUG']: 118 | osl_logger.info("osl-ephys osl_logger: handler '{0}' level set to '{1}'".format(handler.get_name(), level)) 119 | handler.setLevel(getattr(logging, level)) 120 | 121 | 122 | def get_level(handler='console'): 123 | """Return current logging level for osl-ephys module. 124 | 125 | Parameters 126 | ---------- 127 | handler : str 128 | The handler to get the level for. Defaults to 'console'. 129 | 130 | Returns 131 | ------- 132 | level : {'CRITICAL', 'WARNING', 'INFO', 'DEBUG'} 133 | String indicating current logging level 134 | 135 | """ 136 | osl_logger = logging.getLogger('osl') 137 | for handler in osl_logger.handlers: 138 | if handler.get_name() == 'console': 139 | return handler.level 140 | 141 | 142 | def log_or_print(msg, warning=False): 143 | """Execute logger.info if an OSL logger has been setup, otherwise print. 144 | 145 | Parameters 146 | ---------- 147 | msg : str 148 | Message to log/print. 149 | warning : bool 150 | Is the msg a warning? Defaults to False, which will print info. 151 | """ 152 | if warning: 153 | msg = f"WARNING: {msg}" 154 | if hasattr(osl_logger, "already_setup"): 155 | osl_logger.info(msg) 156 | else: 157 | print(msg) 158 | -------------------------------------------------------------------------------- /osl_ephys/utils/misc.py: -------------------------------------------------------------------------------- 1 | """Miscellaneous utility classes and functions. 2 | 3 | """ 4 | 5 | import logging 6 | import random 7 | import numpy as np 8 | 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def set_random_seed(seed=None): 14 | """Set all random seeds. 15 | 16 | This includes Python's random module and NumPy. 17 | 18 | Parameters 19 | ---------- 20 | seed : int 21 | Random seed. 22 | """ 23 | if seed is None: 24 | seed = random.randint(0, 2**32 - 1) 25 | 26 | logger.info(f"Setting random seed to {seed}") 27 | 28 | random.seed(seed) 29 | np.random.seed(seed) 30 | return seed -------------------------------------------------------------------------------- /osl_ephys/utils/package.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | 4 | # Housekeeping for logging 5 | import logging 6 | logger = logging.getLogger(__name__) 7 | 8 | def soft_import(package): 9 | """Try to import a package raising friendly error if not present.""" 10 | try: 11 | module = importlib.import_module(package) 12 | except (ImportError, ModuleNotFoundError): 13 | msg = f"Package '{package}' is required for this " 14 | msg += "function to run but cannot be imported. " 15 | msg += "Please install it into your python environment to continue." 16 | raise ModuleNotFoundError(msg) 17 | 18 | return module 19 | 20 | 21 | def run_package_tests(): 22 | """Run OSL tests from within python 23 | 24 | https://docs.pytest.org/en/7.1.x/how-to/usage.html 25 | 26 | Notes 27 | ----- 28 | Calling pytest.main() will result in importing your tests and any modules 29 | that they import. Due to the caching mechanism of python’s import system, 30 | making subsequent calls to pytest.main() from the same process will not 31 | reflect changes to those files between the calls. For this reason, making 32 | multiple calls to pytest.main() from the same process 33 | (in order to re-run tests, for example) is not recommended. 34 | 35 | """ 36 | import pytest 37 | 38 | thisdir = os.path.dirname(os.path.realpath(__file__)) 39 | installdir = os.path.abspath(os.path.join(thisdir, '..')) 40 | canarypth = os.path.join(installdir, 'tests', 'test_00_package_canary.py') 41 | print(installdir) 42 | 43 | out = pytest.main(['-x', canarypth]) 44 | 45 | -------------------------------------------------------------------------------- /osl_ephys/utils/parallel.py: -------------------------------------------------------------------------------- 1 | """Utility functions for parallel processing. 2 | 3 | """ 4 | 5 | # Authors: Andrew Quinn 6 | 7 | from functools import partial 8 | import dask.bag as db 9 | from dask.distributed import Client, LocalCluster, wait, default_client 10 | 11 | # Housekeeping for logging 12 | import logging 13 | osl_logger = logging.getLogger(__name__) 14 | 15 | 16 | def dask_parallel_bag(func, iter_args, 17 | func_args=None, func_kwargs=None): 18 | """A maybe more consistent alternative to ``dask_parallel``. 19 | 20 | Parameters 21 | --------- 22 | func : function 23 | The function to run in parallel. 24 | iter_args : list 25 | A list of iterables to pass to func. 26 | func_args : list, optional 27 | A list of positional arguments to pass to func. 28 | func_kwargs : dict, optional 29 | A dictionary of keyword arguments to pass to func. 30 | 31 | Returns 32 | ------- 33 | flags : list 34 | A list of return values from func. 35 | 36 | References 37 | ---------- 38 | https://docs.dask.org/en/stable/bag.html 39 | 40 | """ 41 | 42 | func_args = [] if func_args is None else func_args 43 | func_kwargs = {} if func_kwargs is None else func_kwargs 44 | 45 | # Get connection to currently active cluster 46 | client = default_client() 47 | 48 | # Print some helpful info 49 | osl_logger.info('Dask Client : {0}'.format(client.__repr__())) 50 | osl_logger.info('Dask Client dashboard link: {0}'.format(client.dashboard_link)) 51 | 52 | osl_logger.debug('Running function : {0}'.format(func.__repr__())) 53 | osl_logger.debug('User args : {0}'.format(func_args)) 54 | osl_logger.debug('User kwargs : {0}'.format(func_kwargs)) 55 | 56 | # Set kwargs - need to handle args on function call to preserve order. 57 | run_func = partial(func, **func_kwargs) 58 | osl_logger.info('Function defined : {0}'.format(run_func)) 59 | 60 | # Ensure input iter_args is list of lists 61 | if all(isinstance(aa, (list, tuple)) for aa in iter_args) is False: 62 | iter_args = [[aa] for aa in iter_args] 63 | 64 | # Add fixed positonal args if specified 65 | if func_args is not None: 66 | iter_args = [list(aa) + func_args for aa in iter_args] 67 | 68 | # Make dask bag from inputs: https://docs.dask.org/en/stable/bag.html 69 | b = db.from_sequence(iter_args) 70 | 71 | # Map iterable arguments to function using dask bag + current client 72 | bm = b.starmap(run_func) 73 | 74 | # Actually run the computation 75 | flags = bm.compute() 76 | 77 | osl_logger.info('Computation complete') 78 | 79 | return flags -------------------------------------------------------------------------------- /osl_ephys/utils/run_func.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | import sys 3 | 4 | def main(argv=None): 5 | # sys.argv[1] is the function name 6 | # sys.argv[2:] are the arguments to the function 7 | # e.g. python -m osl_ephys.utils.run_func my_func arg1 arg2 8 | # will call my_func(arg1, arg2) 9 | if argv is None: 10 | argv = sys.argv[1:] 11 | 12 | func_name = argv[0] 13 | func_args = argv[1:] 14 | 15 | # iteratively open each (sub)module 16 | for ii, mod in enumerate(func_name.split('.')): 17 | if ii==0: 18 | module = __import__(mod) 19 | else: 20 | module = getattr(module, mod) 21 | func = module 22 | 23 | # do some general argument checks 24 | for ii in range(len(func_args)): 25 | if type(func_args[ii]) is str: 26 | if func_args[ii]=='None': 27 | func_args[ii] = None 28 | elif func_args[ii]=='True': 29 | func_args[ii] = True 30 | elif func_args[ii]=='False': 31 | func_args[ii] = False 32 | 33 | # run the function 34 | func(*func_args) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/__init__.py: -------------------------------------------------------------------------------- 1 | from .simulate import simulate_raw_from_template 2 | -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/megin_template_info.fif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/megin_template_info.fif -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/reduced_mvar_params_grad.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/reduced_mvar_params_grad.npy -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/reduced_mvar_params_mag.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/reduced_mvar_params_mag.npy -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/reduced_mvar_pcacomp_grad.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/reduced_mvar_pcacomp_grad.npy -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/reduced_mvar_pcacomp_mag.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/reduced_mvar_pcacomp_mag.npy -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/reduced_mvar_residcov_grad.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/reduced_mvar_residcov_grad.npy -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/reduced_mvar_residcov_mag.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/OHBA-analysis/osl-ephys/5148680f2d85b24bc1cd6720174ed918fcc17cbe/osl_ephys/utils/simulation_config/reduced_mvar_residcov_mag.npy -------------------------------------------------------------------------------- /osl_ephys/utils/simulation_config/simulate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import mne 3 | import sails 4 | import numpy as np 5 | 6 | 7 | def simulate_data(model, num_samples=1000, num_realisations=1, use_cov=True): 8 | num_sources = model.nsignals 9 | 10 | # Preallocate output 11 | Y = np.zeros((num_sources, num_samples, num_realisations)) 12 | 13 | for ep in range(num_realisations): 14 | 15 | # Create driving noise signal 16 | Y[:, :, ep] = np.random.randn(num_sources, num_samples) 17 | 18 | if use_cov: 19 | C = np.linalg.cholesky(model.resid_cov) 20 | Y[:, :, ep] = Y[:, :, ep].T.dot(C).T 21 | 22 | # Main Loop 23 | for t in range(model.order, num_samples): 24 | for p in range(1, model.order): 25 | Y[:, t, ep] -= -model.parameters[:, :, p].dot(Y[:, t-p, ep]) 26 | return Y 27 | 28 | 29 | def simulate_raw_from_template(sim_samples, bad_segs=None): 30 | 31 | basedir = os.path.dirname(os.path.realpath(__file__)) 32 | info = mne.io.read_info(os.path.join(basedir, 'megin_template_info.fif')) 33 | 34 | Y = np.zeros((306, sim_samples)) 35 | for mod in ['mag', 'grad']: 36 | red_model = sails.AbstractLinearModel() 37 | fname = 'reduced_mvar_params_{0}.npy'.format(mod) 38 | red_model.parameters = np.load(os.path.join(basedir, fname)) 39 | fname = 'reduced_mvar_residcov_{0}.npy'.format(mod) 40 | red_model.resid_cov = np.load(os.path.join(basedir, fname)) 41 | red_model.delay_vect = np.arange(20) 42 | fname = 'reduced_mvar_pcacomp_{0}.npy'.format(mod) 43 | pcacomp = np.load(os.path.join(basedir, fname)) 44 | 45 | Xsim = simulate_data(red_model, num_samples=sim_samples) * 2e-12 46 | Xsim = pcacomp.T.dot(Xsim[:,:,0])[:,:,None] # back to full space 47 | 48 | 49 | Y[mne.pick_types(info, meg=mod), :] = Xsim[:, :, 0] 50 | 51 | 52 | sim = mne.io.RawArray(Y, info) 53 | sim.info['sfreq'] = 150 54 | 55 | if bad_segs is not None: 56 | for mod in ['mag', 'grad']: 57 | mne.pick_types 58 | 59 | return sim 60 | 61 | 62 | def simulate_rest_mvar(raw, sim_samples, 63 | mvar_pca=32, mvar_order=12, 64 | picks=None, modalities=None, drop_dig=False): 65 | """Best used on low sample rate data <200Hz. fiff only for now.""" 66 | 67 | if modalities is None: 68 | modalities = ['mag', 'grad'] 69 | 70 | # Fit model and simulate data 71 | Y = np.zeros((raw.info['nchan'], sim_samples)) 72 | for mod in modalities: 73 | X = raw.get_data(picks=mod) 74 | X = X[:, 5000:45000] * 1e12 75 | 76 | red_model, full_model, pca = sails.modelfit.pca_reduced_fit(X, np.arange(mvar_order), mvar_pca) 77 | 78 | scale = X.std() / 1e12 79 | Xsim = simulate_data(red_model, num_samples=sim_samples) * scale 80 | Xsim = pca.components.T.dot(Xsim[:,:,0])[:,:,None] # back to full space 81 | 82 | Y[mne.pick_types(raw.info, meg=mod), :] = Xsim[:, :, 0] 83 | 84 | # Create data info for simulated object 85 | info = mne.io.anonymize_info(raw.info.copy()) 86 | info['description'] = 'osl-ephys Simulated Dataset' 87 | info['experimentor'] = 'osl-ephys' 88 | info['proj_name'] = 'osl_simulate' 89 | info['subject_info'] = {'id': 0, 'first_name': 'osl-ephys', 'last_name': 'Simulated Data'} 90 | if drop_dig: 91 | info.pop('dig') 92 | 93 | if picks is None: 94 | picks = {'meg': True, 'eeg': False, 95 | 'eog': False, 'ecg': False, 96 | 'stim': False, 'misc': False} 97 | 98 | info = mne.pick_info(info, mne.pick_types(info, **pks)) 99 | 100 | sim = mne.io.RawArray(Y, info) 101 | 102 | return sim 103 | -------------------------------------------------------------------------------- /osl_ephys/utils/spmio/README.md: -------------------------------------------------------------------------------- 1 | # A Python data reader for SPM format files 2 | 3 | A simple(ish) data reader for SPM files. This is intended to provide basic spm-file data reading capability into osl-ephys. The data are loaded into an SPM-like python object HOWEVER this is only intended to all the reading of already-preprocessed data, NOT to facilitate mixing preprocessing between SPM and python. 4 | 5 | For example, an intended useage would be to complete a whole preprocessing pipeline in SPM before loading the data into python to run a GLM using general purpose tools. A not intended purpose would be to run part of a preprocessing pipeline in SPM before loading data into Python for source reconstruction. These MEG-specific analyses use lots of meta-data which are often assumed to be in a specific format. We cannot guarantee compatibility between how sensor locations are represented in SPM compared to MNE-Python. (However the reverse is more likely to be true - MNE-Python can save fif files which are easily readable by SPM.) 6 | 7 | 8 | ## Example usage 9 | 10 | Files can be loaded through the `SPMMEEG` object. 11 | 12 | ``` 13 | D = osl_ephys.utils.spmio.SPMMEEG('/path/to/my/spmfile.mat') 14 | ``` 15 | 16 | A summary of the file contents can then be printed to the screen. 17 | 18 | ``` 19 | D.print_info() 20 | ``` 21 | 22 | producing the output.... 23 | 24 | ``` 25 | SPM M/EEG data object - Loaded by osl-ephys 26 | Type: continuous 27 | Transform: {'ID': 'time'} 28 | 1 conditions 29 | 388 channels 30 | 270400 samples/trial 31 | 1 trials 32 | Sampling frequency 400Hz 33 | Loaded from : /Users/andrew/Projects/ntad/analysis/ox_processed/dmmn_bl_raw_tsss.mat 34 | 35 | Montages available : 1 36 | 0 : AFRICA denoised data 37 | Use syntax 'X = D.get_data(montage_index)[channels, samples, trials]' to get data 38 | ``` 39 | 40 | The data is loaded into a memory mapped array and can be accessed using the `get_data` method. 41 | 42 | ``` 43 | X = D.get_data() 44 | ``` 45 | 46 | A montage can be applied to the data by specifying the corresponding montage index in `get_data`. In this case only 1 montage is available - an AFRICA denoised montaged with index zero. 47 | 48 | ``` 49 | X_ica = D.get_data(0) 50 | ``` 51 | 52 | Meta data can be accessed using the helper methods similar to those on the matlab SPM object. 53 | 54 | ``` 55 | D.nsamples 56 | D.nchannels 57 | D.ntrials 58 | 59 | D.condlist 60 | 61 | planar_inds = D.indchantype('MEGPLANAR') 62 | ``` 63 | 64 | ## Sources 65 | This was compiled from previous scripts written by Evan Roberts (https://github.com/evanr70/py_spm) and Mark Hymers. 66 | -------------------------------------------------------------------------------- /osl_ephys/utils/spmio/__init__.py: -------------------------------------------------------------------------------- 1 | from .spmmeeg import SPMMEEG 2 | -------------------------------------------------------------------------------- /osl_ephys/utils/spmio/_data.py: -------------------------------------------------------------------------------- 1 | """Classes relating to the format and storage of MEEG data and sensors.""" 2 | 3 | 4 | import numpy as np 5 | from ._spmmeeg_utils import empty_to_none 6 | 7 | #%% ------------------------------------------------------------- 8 | 9 | KNOWN_DTYPEIDS = { 10 | 16: np.dtype(' 2 | 3 | import numpy as np 4 | 5 | 6 | def _empty_to_val(var, new_val): 7 | return new_val if isinstance(var, np.ndarray) and var.size == 0 else var 8 | 9 | 10 | def empty_to_none(var): 11 | return _empty_to_val(var, None) 12 | 13 | 14 | def empty_to_zero(var): 15 | return _empty_to_val(var, 0) 16 | 17 | 18 | def check_lowered_string(array, search_term): 19 | return np.char.find(np.char.lower(array), search_term.lower()) != -1 20 | -------------------------------------------------------------------------------- /osl_ephys/utils/study.py: -------------------------------------------------------------------------------- 1 | # Authors: 2 | # Andrew Quinn 3 | # Mats van Es 4 | 5 | import re 6 | import glob 7 | import parse 8 | from string import Formatter 9 | 10 | class Study: 11 | """Class for simple file finding and looping. 12 | 13 | Parameters 14 | ---------- 15 | studydir : str 16 | The study directory with wildcards. 17 | 18 | Attributes 19 | ---------- 20 | studydir : str 21 | The study directory with wildcards. 22 | fieldnames : list 23 | The wildcards in the study directory, i.e., the field names in between {braces}. 24 | globdir : str 25 | The study directory with wildcards replaced with *. 26 | match_files : list 27 | The files that match the globdir. 28 | match_values : list 29 | The values of the field names (i.e., wildcards) for each file. 30 | fields : dict 31 | The field names and values for each file. 32 | 33 | Notes 34 | ----- 35 | This class is a simple wrapper around glob and parse. It works something like this: 36 | 37 | >>> studydir = '/path/to/study/{subject}/{session}/{subject}_{task}.fif' 38 | >>> study = Study(studydir) 39 | 40 | Get all files in the study directory: 41 | 42 | >>> study.get() 43 | 44 | Get all files for a particular subject: 45 | 46 | >>> study.get(subject='sub-01') 47 | 48 | Get all files for a particular subject and session: 49 | 50 | >>> study.get(subject='sub-01', session='ses-01') 51 | 52 | The fieldnames that are not specified in ``get`` are replaced with wildcards (``*``). 53 | """ 54 | 55 | def __init__(self, studydir): 56 | """ 57 | Notes 58 | ----- 59 | This class is a simple wrapper around glob and parse. It works something like this: 60 | 61 | >>> studydir = '/path/to/study/{subject}/{session}/{subject}_{task}.fif' 62 | >>> study = Study(studydir) 63 | 64 | Get all files in the study directory: 65 | 66 | >>> study.get() 67 | 68 | Get all files for a particular subject: 69 | 70 | >>> study.get(subject='sub-01') 71 | 72 | Get all files for a particular subject and session: 73 | 74 | >>> study.get(subject='sub-01', session='ses-01') 75 | 76 | The fieldnames that are not specified in ``get`` are replaced with wildcards (*). 77 | """ 78 | self.studydir = studydir 79 | 80 | # Extract field names in between {braces} 81 | self.fieldnames = [fname for _, fname, _, _ in Formatter().parse(self.studydir) if fname] 82 | 83 | # Replace braces with wildcards 84 | self.globdir = re.sub("\{.*?\}","*", studydir) 85 | 86 | self.match_files = sorted(glob.glob(self.globdir)) 87 | print('found {} files'.format(len(self.match_files))) 88 | 89 | self.match_files = [ff for ff in self.match_files if parse.parse(self.studydir, ff) is not None] 90 | print('keeping {} consistent files'.format(len(self.match_files))) 91 | 92 | self.match_values = [] 93 | for fname in self.match_files: 94 | self.match_values.append(parse.parse(self.studydir, fname).named) 95 | 96 | self.fields = {} 97 | # Use first file as a reference for keywords 98 | for key, value in self.match_values[0].items(): 99 | self.fields[key] = [value] 100 | for d in self.match_values[1:]: 101 | self.fields[key].append(d[key]) 102 | 103 | 104 | def refresh(self): 105 | """Refresh the study directory.""" 106 | return self.__init__(self.studydir) 107 | 108 | 109 | def get(self, check_exist=True, **kwargs): 110 | """Get files from the study directory that match the fieldnames. 111 | 112 | Parameters 113 | ---------- 114 | check_exist : bool 115 | Whether to check if the files exist. 116 | **kwargs : dict 117 | The field names and values to match. 118 | 119 | Returns 120 | ------- 121 | out : list 122 | The files that match the field names and values. 123 | 124 | Notes 125 | ----- 126 | Example using ``Study`` and ``Study.get()``: 127 | 128 | >>> studydir = '/path/to/study/{subject}/{session}/{subject}_{task}.fif' 129 | >>> study = Study(studydir) 130 | 131 | Get all files in the study directory: 132 | 133 | >>> study.get() 134 | 135 | Get all files for a particular subject: 136 | 137 | >>> study.get(subject='sub-01') 138 | 139 | Get all files for a particular subject and session: 140 | 141 | >>> study.get(subject='sub-01', session='ses-01') 142 | 143 | The fieldnames that are not specified in ``get`` are replaced with wildcards (``*``). 144 | """ 145 | keywords = {} 146 | for key in self.fieldnames: 147 | keywords[key] = kwargs.get(key, '*') 148 | 149 | fname = self.studydir.format(**keywords) 150 | 151 | # we only want the valid files 152 | if check_exist: 153 | return [ff for ff in glob.glob(fname) if any(ff in ff_valid for ff_valid in self.match_files)] 154 | else: 155 | return glob.glob(fname) 156 | -------------------------------------------------------------------------------- /osl_ephys/utils/trees/mrc_meguk.tree: -------------------------------------------------------------------------------- 1 | {site} 2 | sub-{subject} 3 | anat 4 | sub-{subject}_T1w.nii.gz (t1) 5 | meg 6 | sub-{subject}_task-{task}_meg.fif (fif) 7 | sub-{subject}_task-{task}_meg_ds 8 | sub-{subject}_task-{task}_meg.meg4 (ds) 9 | sub-{subject}_task-{task}_meg 10 | c,rfDC (bti) 11 | -------------------------------------------------------------------------------- /osl_ephys/utils/trees/mrc_meguk_bti.tree: -------------------------------------------------------------------------------- 1 | {site} 2 | sub-{subject} 3 | anat 4 | sub-{subject}_T1w.nii.gz (t1) 5 | meg 6 | sub-{subject}_task-{task}_meg 7 | c,rfDC (bti) 8 | -------------------------------------------------------------------------------- /osl_ephys/utils/trees/mrc_meguk_ctf.tree: -------------------------------------------------------------------------------- 1 | {site} 2 | sub-{subject} 3 | anat 4 | sub-{subject}_T1w.nii.gz (t1) 5 | meg 6 | sub-{subject}_task-{task}_meg.ds 7 | sub-{subject}_task-{task}_meg.meg4 (ds) 8 | -------------------------------------------------------------------------------- /osl_ephys/utils/trees/mrc_meguk_megin.tree: -------------------------------------------------------------------------------- 1 | {site} 2 | derivatives 3 | sub-{subject1} 4 | meg 5 | sub-{subject}_task-{task}_proc-sss_meg.fif (sss_fif) 6 | sub-{subject} 7 | anat 8 | sub-{subject}_T1w.nii.gz (t1) 9 | -------------------------------------------------------------------------------- /osl_ephys/utils/trees/mrc_meguk_processed.tree: -------------------------------------------------------------------------------- 1 | {site} 2 | sub-{subject}_task-{task}_meg_events.npy (events) 3 | sub-{subject}_task-{task}_meg_ica.fif (ica) 4 | sub-{subject}_task-{task}_meg_preproc.log (logfile) 5 | sub-{subject}_task-{task}_meg_preproc.error.log (errorfile) 6 | sub-{subject}_task-{task}_meg_raw.fif (meg) 7 | -------------------------------------------------------------------------------- /osl_ephys/utils/trees/ohba_meg.tree: -------------------------------------------------------------------------------- 1 | {subject}_{task}_bl_tsss_raw.fif (processed_fif) 2 | {subject}_{task}_bl_tsss_epochs.fif (epoched_fif) 3 | {subject}_{task}_bl_tsss_ica.fif (ica) 4 | {subject}_{task}_bl_tsss_events.npy (events) 5 | -------------------------------------------------------------------------------- /osl_ephys/utils/version_utils.py: -------------------------------------------------------------------------------- 1 | from packaging.version import Version, parse 2 | import re 3 | import operator 4 | from importlib.metadata import version 5 | 6 | # Housekeeping for logging 7 | import logging 8 | osl_logger = logging.getLogger(__name__) 9 | 10 | 11 | def _parse_condition(cond): 12 | """Parse strings defining conditional statements. 13 | 14 | Borrowed from EMD package 15 | """ 16 | name = re.split(r'[=<>!]', cond)[0] 17 | comp = cond[len(name):] 18 | 19 | if comp[:2] == '==': 20 | func = operator.eq 21 | elif comp[:2] == '!=': 22 | func = operator.ne 23 | elif comp[:2] == '<=': 24 | func = operator.le 25 | elif comp[:2] == '>=': 26 | func = operator.ge 27 | elif comp[0] == '<': 28 | func = operator.lt 29 | elif comp[0] == '>': 30 | func = operator.gt 31 | else: 32 | print('Comparator not recognised!') 33 | 34 | val = comp.lstrip('!=<>') 35 | 36 | return (name, func, str(val)) 37 | 38 | 39 | def check_version(test_statement, mode='warn'): 40 | """Check whether the version of a package meets a specified condition. 41 | 42 | Parameters 43 | ---------- 44 | test_statement : str 45 | Package version comparison string in the standard format expected by python installs. 46 | eg 'osl-ephys<1.0.0' or 'osl-ephys==0.6.dev0' 47 | mode : {'warn', 'assert'} 48 | Flag indicating whether to warn the user or raise an error if the comparison fails 49 | 50 | """ 51 | test_module, comparator, target_version = _parse_condition(test_statement) 52 | 53 | test_version = Version(version(test_module)) 54 | target_version = Version(target_version) 55 | 56 | if comparator(test_version, target_version) is False: 57 | msg = "Package '{}' version ({}) fails specified requirement ({})" 58 | msg = msg.format(test_module, test_version, test_statement) 59 | 60 | if mode == 'warn': 61 | osl_logger.warning(msg) 62 | elif mode == 'assert': 63 | osl_logger.warning(msg) 64 | raise AssertionError(msg) 65 | -------------------------------------------------------------------------------- /release_notes.md: -------------------------------------------------------------------------------- 1 | OHBA Software Library for the Analysis of Electrophysiology Data (osl-ephys) Release Notes 2 | ========================================= 3 | Information about managing releases of osl-ephys 4 | 5 | General Workflow 6 | ---------------- 7 | 8 | We are going to prepare release vX.Y.Z - for a real release this should contain three numerical values indicating the new version number. eg v0.1.2 or v3.4.7. 9 | 10 | The third digit should be incremented for trivial changes and bugfixes that don't affect the API 11 | The second digit should be incremented for minor changes, could include some API changes 12 | The first digit should be incremented for major changes and additions that shift things a lot and may not be backwards compatible 13 | 14 | The lower digits reset to zero if a higher digit increments, for example a moderate update to v0.2.4 could become v0.3.0. 15 | 16 | The codebase release versions may contain 'dev' in them - please ignore this for the release itself, this is to distinguish code installed from source with formal releases during debugging, you'll add this back in step 7. 17 | 18 | Replace vX.Y.Z with the correct release version from here on out!! 19 | 20 | ##### 0 - Ensure relevant branches are merged to main 21 | 22 | Have a look at the current work-in-progress, is there something that should be included in a new release? 23 | 24 | 25 | ##### 1 - Create a branch and pull request for the release 26 | 27 | This can be done as normal using your favourite method. Give both a helpful name like 'release-vX.Y.Z etc 28 | 29 | Make sure that you create the release branch from the current main branch - not a subbranch, this is easily done in a rush. 30 | 31 | ``` 32 | git checkout main 33 | git remote update 34 | git pull 35 | git checkout -b release-vX.Y.Z 36 | ``` 37 | 38 | ##### 2 - Make sure the tests pass 39 | 40 | Run tests on your local machine, from your root osl directory with: 41 | 42 | ``` 43 | conda activate osle 44 | pytest osl_ephys/tests 45 | ``` 46 | 47 | Passes and warnings are fine. Just need to check for failures. 48 | 49 | Note, you may need to install `pytest` with `pip install pytest` to run the tests. 50 | 51 | 52 | ##### 3 - Increment the version numbers in across the codebase 53 | 54 | You need to update the version number the the following files to version number you'd like to release: 55 | 56 | - `setup.py` 57 | - `osl_ephys/__init__.py` 58 | - `doc/source/conf.py` 59 | - `CITATION.cff` 60 | 61 | Don't forget to commit these changes before continuing. 62 | 63 | ##### 4 - Run a local build and make sure you get the right versions. 64 | 65 | Run a local install using pip from the local directory: 66 | 67 | ``` 68 | pip install . 69 | ``` 70 | 71 | The final line of output should say 'Successfully installed' and include osl with the correct version number, if its incorrect then check the version in `setup.py` 72 | 73 | Next, start a python session, import osl_ephys and check `osl_ephys.__version__` - this should show the correct version number, it it is incorrect then check the version in `osl_ephys/__init__.py` 74 | 75 | 76 | ##### 5 - tag a new version 77 | 78 | Use git to tag the current branch state with an informative message (that must match the correct version number....) and push it to github. 79 | 80 | ``` 81 | git tag -a vX.Y.Z -m "bump to vX.Y.Z" 82 | git push origin vX.Y.Z 83 | ``` 84 | 85 | ##### 6 - Create release on github 86 | 87 | Follow instructions here to publish the release on github 88 | 89 | https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository 90 | 91 | ##### 7 - Push release to PyPi.org 92 | 93 | Create a build/wheel for the tagged version by running 94 | 95 | ``` 96 | python setup.py sdist 97 | python setup.py bdist_wheel --universal 98 | ``` 99 | 100 | and upload to PyPi.org using twine, ensure you have your username and password to hand. 101 | 102 | ``` 103 | twine upload --skip-existing dist/* 104 | ``` 105 | 106 | Note, you may need to install `twine` with `pip install twine`. 107 | 108 | ##### 8 - TEST EVERYTHING! 109 | 110 | Ask your friends and family to install the released package and let you know if there are any problems. Fix any that come up and repeat steps 1 to 8 with a new version number. 111 | Note: if you test the installation from a `pip install osl-ephys`, make sure you're not opening Python from an osl-ephys directory because then the directory will be imported rather than the installed package from pip. 112 | 113 | Do not delete broken package releases, make any fixes in a new 'trivial' update. 114 | 115 | 116 | ##### 9 - Update version numbers to include 'dev' 117 | 118 | The same versions you incremented in step 3 should be updated to increment once more and include 'dev' at the end, this means we will be able to distinguish the tagged/fixed version from future work-in-progress. If we don't do this, then updates to main will have the same version even though they are likely to significantly differ from the release. 119 | 120 | If we just did a minor update and released v0.3.0, we would make the development version v0.4.dev0. 121 | 122 | If we did a trivial update and released v0.1.2, we would still fix the development version to the next minor release v0.2.dev0. Don't bother incrementing the trivial versions. 123 | 124 | You need to change the version number in the following files: 125 | 126 | - `setup.py` 127 | - `osl_ephys/__init__.py` 128 | 129 | Don't forget to commit these changes before continuing. 130 | 131 | ##### 10 - Push branch and merge into main 132 | 133 | To push use: 134 | 135 | ``` 136 | git push --set-upstream origin release-vX.Y.Z 137 | ``` 138 | Remember to replace vX.Y.Z with the latest version number. 139 | 140 | Then create the pull request. 141 | 142 | Wait for someone to point out which mistake you made. Fix it and then repeat steps 1 to 10 with an appropriate new version number. 143 | 144 | Once you're happy and if the tests pass you can merge. 145 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | matplotlib 4 | mne 5 | scikit-learn 6 | fslpy 7 | sails 8 | glmtools 9 | tabulate 10 | pyyaml 11 | neurokit2 12 | jinja2 13 | pandas 14 | nilearn 15 | nibabel 16 | opencv-python 17 | numba 18 | parse 19 | dask 20 | distributed 21 | 22 | # Docs 23 | sphinx 24 | astroid 25 | pydata-sphinx-theme 26 | sphinx_gallery 27 | sphinx-autoapi 28 | numpydoc 29 | sphinx_gallery 30 | pydata-sphinx-theme 31 | 32 | # Dev 33 | setuptools 34 | pytest 35 | pytest-cov 36 | coverage 37 | flake8 38 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=120 3 | extend-ignore=E741,E265,E402 4 | 5 | [options] 6 | packages = 7 | osl_ephys 8 | osl_ephys.glm 9 | osl_ephys.maxfilter 10 | osl_ephys.preprocessing 11 | osl_ephys.report 12 | osl_ephys.source_recon 13 | osl_ephys.source_recon.rhino 14 | osl_ephys.utils 15 | 16 | [build_sphinx] 17 | source-dir = doc/source 18 | build-dir = doc/build 19 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | from setuptools import setup 4 | 5 | # The directory containing this file 6 | HERE = pathlib.Path(__file__).parent 7 | 8 | # The text of the README file 9 | README = (HERE / "README.md").read_text() 10 | 11 | # Requirement categories 12 | reqs = ['numpy', 'scipy', 'matplotlib', 'mne', 'scikit-learn', 'fslpy', 13 | 'sails', 'tabulate', 'pyyaml', 'neurokit2', 'jinja2', 14 | 'glmtools', 'numba', 'nilearn', 'dask', 'distributed', 'parse', 15 | 'opencv-python', 'panel', 'h5io'] 16 | doc_reqs = ['sphinx', 'numpydoc', 'sphinx_gallery', 'pydata-sphinx-theme'] 17 | dev_reqs = ['setuptools', 'pytest', 'pytest-cov', 'coverage', 'flake8'] 18 | 19 | name = 'osl-ephys' 20 | 21 | setup(name=name, 22 | version='2.4.dev0', 23 | description='OHBA Software Library for the analysis of electrophysiological data', 24 | long_description=README, 25 | long_description_content_type="text/markdown", 26 | author='OHBA Analysis Group', 27 | license='MIT', 28 | 29 | # Choose your license 30 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 31 | classifiers=[ 32 | 'Development Status :: 4 - Beta', 33 | 34 | # Indicate who your project is intended for 35 | 'Intended Audience :: Science/Research', 36 | 'Topic :: Scientific/Engineering :: Bio-Informatics', 37 | 'Topic :: Scientific/Engineering :: Information Analysis', 38 | 'Topic :: Scientific/Engineering :: Mathematics', 39 | 40 | # Specify the Python versions you support here. In particular, ensure 41 | # that you indicate whether you support Python 2, Python 3 or both. 42 | 'Programming Language :: Python :: 3', 43 | 'Programming Language :: Python :: 3.7', 44 | 'Programming Language :: Python :: 3.8', 45 | 'Programming Language :: Python :: 3.9', 46 | 'Programming Language :: Python :: 3.10', 47 | 'Programming Language :: Python :: 3.11', 48 | 'Programming Language :: Python :: 3.12', 49 | ], 50 | 51 | python_requires='>=3.7', 52 | install_requires=reqs, 53 | extras_require={ 54 | 'dev': dev_reqs, 55 | 'doc': doc_reqs, 56 | 'full': dev_reqs + doc_reqs, 57 | }, 58 | 59 | zip_safe=False, 60 | entry_points={ 61 | 'console_scripts': [ 62 | 'osl_maxfilter = osl_ephys.maxfilter.maxfilter:main', 63 | 'osl_ica_label = osl_ephys.preprocessing.ica_label:main', 64 | 'osl_ica_apply = osl_ephys.preprocessing.ica_label:apply', 65 | 'osl_preproc = osl_ephys.preprocessing.batch:main', 66 | 'osl_func = osl_ephys.utils.run_func:main', 67 | ]}, 68 | 69 | packages=['osl_ephys', 'osl_ephys.tests', 'osl_ephys.report', 'osl_ephys.maxfilter', 70 | 'osl_ephys.preprocessing', 'osl_ephys.utils', 'osl_ephys.utils.spmio', 71 | 'osl_ephys.source_recon', 'osl_ephys.source_recon.rhino', 'osl_ephys.glm'], 72 | 73 | 74 | package_data={'osl_ephys': [# Simulations 75 | 'utils/simulation_config/*npy', 76 | 'utils/simulation_config/*fif', 77 | # Channel information 78 | 'utils/neuromag306_info.yml', 79 | # Parcellation files 80 | 'source_recon/files/*gz', 81 | # Report templates 82 | 'report/templates/*', 83 | # READMEs 84 | '*/README.md']}, 85 | 86 | command_options={ 87 | 'build_sphinx': { 88 | 'project': ('setup.py', name), 89 | 'version': ('setup.py', name), 90 | 'release': ('setup.py', name)}}, 91 | ) 92 | --------------------------------------------------------------------------------