├── .devcontainer
└── cpu
│ └── devcontainer.json
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── config.yml
│ ├── documentation_issue.md
│ └── feature_request.md
└── workflows
│ ├── docs.yml
│ └── test.yml
├── .gitignore
├── .gitmodules
├── .vscode
└── settings.json
├── LICENSE
├── Makefile
├── README.rst
├── binder
├── environment.yml
└── postBuild
├── doc
├── Makefile
├── _static
│ ├── debug.js
│ └── theme_override.css
├── changelog.rst
├── conf.py
├── docs_notes.md
├── experiments
│ ├── all_examples.rst
│ ├── cueing.rst
│ ├── gonogo.rst
│ ├── vn170.rst
│ ├── vp300.rst
│ └── vssvep.rst
├── getting_started
│ ├── analysis.md
│ ├── available_experiments.md
│ ├── data_zipper.md
│ ├── faq.md
│ ├── installation.rst
│ ├── loading_and_saving.md
│ ├── running_experiments.md
│ └── streaming.md
├── img
│ ├── EEG-ExPy_Logo.png
│ ├── FOOOFGroup_report.png
│ ├── FOOOF_report.png
│ ├── attaching_electrode.png
│ ├── bluemuse.PNG
│ ├── brainbit.png
│ ├── cyton.png
│ ├── cyton_daisy.png
│ ├── eeg-notebooks_democratizing_the_cogneuro_experiment.png
│ ├── eeg-notebooks_logo.png
│ ├── extra_electrode.png
│ ├── ganglion.png
│ ├── gtec-unicorn.jpg
│ ├── install_gitbash.png
│ ├── mark_conda_terminal.png
│ ├── miniconda_install_fig.png
│ ├── miniconda_run_install_fig_1.png
│ ├── miniconda_run_install_fig_2.png
│ ├── miniconda_run_install_fig_3.png
│ ├── miniconda_run_install_fig_4.png
│ ├── miniconda_run_install_fig_5.png
│ ├── miniconda_run_install_fig_6.png
│ ├── miniconda_run_install_fig_7.png
│ ├── miniconda_run_install_fig_8.png
│ ├── notion.png
│ ├── spectrum.png
│ ├── windows_default_directory.PNG
│ └── windows_usb_select.PNG
├── index.rst
└── misc
│ ├── about_the_docs.md
│ ├── muse_info.md
│ ├── ntcs_phase1_instructions.md
│ └── using_an_extra_electrode_muse.md
├── eegexpy
└── __init__.py
├── eegnb
├── __init__.py
├── analysis
│ ├── __init__.py
│ ├── analysis_report.html
│ ├── analysis_report.py
│ ├── analysis_utils.py
│ ├── experiment_descriptions
│ │ ├── visual-N170.txt
│ │ └── visual-P300.txt
│ ├── pipelines.py
│ ├── report.html
│ ├── streaming_utils.py
│ ├── styling.css
│ └── utils.py
├── cli
│ ├── __init__.py
│ ├── __main__.py
│ ├── introprompt.py
│ └── utils.py
├── datasets
│ ├── __init__.py
│ └── datasets.py
├── devices
│ ├── __init__.py
│ ├── eeg.py
│ └── utils.py
├── experiments
│ ├── Experiment.py
│ ├── Experiment_readme.txt
│ ├── __init__.py
│ ├── auditory_oddball
│ │ ├── MUSE_conditions.mat
│ │ ├── __init__.py
│ │ ├── aMMN.py
│ │ ├── aob.py
│ │ ├── auditory_erp_arrayin.py
│ │ ├── auditory_erp_aux.py
│ │ └── diaconescu.py
│ ├── auditory_ssaep
│ │ ├── __init__.py
│ │ ├── ssaep.py
│ │ └── ssaep_onefreq.py
│ ├── n170
│ │ └── __init__.py
│ ├── visual_baselinetask
│ │ ├── __init__.py
│ │ └── baseline_task.py
│ ├── visual_codeprose
│ │ ├── __init__.py
│ │ └── codeprose.py
│ ├── visual_cueing
│ │ ├── __init__.py
│ │ └── cueing.py
│ ├── visual_gonogo
│ │ ├── __init__.py
│ │ └── go_nogo.py
│ ├── visual_n170
│ │ ├── __init__.py
│ │ ├── n170.py
│ │ ├── n170_fixedstimorder.py
│ │ ├── n170_fixedstimorder_list.csv
│ │ └── n170_old.py
│ ├── visual_p300
│ │ ├── __init__.py
│ │ ├── p300.py
│ │ └── p300_stripes.py
│ ├── visual_ssvep
│ │ ├── __init__.py
│ │ └── ssvep.py
│ └── visual_vep
│ │ ├── __init__.py
│ │ └── vep.py
└── stimuli
│ ├── __init__.py
│ ├── utils.py
│ └── visual
│ ├── cats_dogs
│ ├── LICENSE.txt
│ ├── nontarget-234836_640.jpg
│ ├── nontarget-274183_640.jpg
│ ├── nontarget-280332_640.jpg
│ ├── nontarget-734689_640.jpg
│ ├── target-2083492_640.jpg
│ ├── target-360807_640.jpg
│ ├── target-468232_640.jpg
│ └── target-76116_640.jpg
│ └── face_house
│ ├── LICENSE.txt
│ ├── faces
│ ├── Annie_1.jpg
│ ├── Annie_2.jpg
│ ├── Annie_3.jpg
│ ├── Annie_4.jpg
│ ├── Blake_1.jpg
│ ├── Blake_2.jpg
│ ├── Blake_3.jpg
│ ├── Blake_4.jpg
│ ├── Don_1.jpg
│ ├── Don_2.jpg
│ ├── Don_3.jpg
│ ├── Don_4.jpg
│ ├── Estelle_1.jpg
│ ├── Estelle_2.jpg
│ ├── Estelle_3.jpg
│ ├── Estelle_4.jpg
│ ├── Frank_1.jpg
│ ├── Frank_2.jpg
│ ├── Frank_3.jpg
│ ├── Frank_4.jpg
│ ├── Janie_1.jpg
│ ├── Janie_2.jpg
│ ├── Janie_3.jpg
│ ├── Janie_4.jpg
│ ├── Joan_1.jpg
│ ├── Joan_2.jpg
│ ├── Joan_3.jpg
│ ├── Joan_4.jpg
│ ├── Jodi_1.jpg
│ ├── Jodi_2.jpg
│ ├── Jodi_3.jpg
│ ├── Jodi_4.jpg
│ ├── Joe_1.jpg
│ ├── Joe_2.jpg
│ ├── Joe_3.jpg
│ ├── Joe_4.jpg
│ ├── Tim_1.jpg
│ ├── Tim_2.jpg
│ ├── Tim_3.jpg
│ ├── Tim_4.jpg
│ ├── Tom_1.jpg
│ ├── Tom_2.jpg
│ ├── Tom_3.jpg
│ ├── Tom_4.jpg
│ ├── Wallace_1.jpg
│ ├── Wallace_2.jpg
│ ├── Wallace_3.jpg
│ └── Wallace_4.jpg
│ └── houses
│ ├── house1.1.jpg
│ ├── house1.2.jpg
│ ├── house1.3.jpg
│ ├── house1.4.jpg
│ ├── house10.1.jpg
│ ├── house10.2.jpg
│ ├── house10.3.jpg
│ ├── house10.4.jpg
│ ├── house11.1.jpg
│ ├── house11.2.jpg
│ ├── house11.3.jpg
│ ├── house11.4.jpg
│ ├── house12.1.jpg
│ ├── house12.2.jpg
│ ├── house12.3.jpg
│ ├── house12.4.jpg
│ ├── house2.1.jpg
│ ├── house2.2.jpg
│ ├── house2.3.jpg
│ ├── house2.4.jpg
│ ├── house3.1.jpg
│ ├── house3.2.jpg
│ ├── house3.3.jpg
│ ├── house3.4.jpg
│ ├── house4.1.jpg
│ ├── house4.2.jpg
│ ├── house4.3.jpg
│ ├── house4.4.jpg
│ ├── house5.1.jpg
│ ├── house5.2.jpg
│ ├── house5.3.jpg
│ ├── house5.4.jpg
│ ├── house6.1.jpg
│ ├── house6.2.jpg
│ ├── house6.3.jpg
│ ├── house6.4.jpg
│ ├── house7.1.jpg
│ ├── house7.2.jpg
│ ├── house7.3.jpg
│ ├── house7.4.jpg
│ ├── house8.1.jpg
│ ├── house8.2.jpg
│ ├── house8.3.jpg
│ ├── house8.4.jpg
│ ├── house9.1.jpg
│ ├── house9.2.jpg
│ ├── house9.3.jpg
│ └── house9.4.jpg
├── environments
├── eeg-expy-docsbuild.yml
├── eeg-expy-full.yml
├── eeg-expy-stimpres.yml
├── eeg-expy-streaming.yml
└── eeg-expy-streamstim.yml
├── examples
├── README.txt
├── auditory_oddball
│ ├── MUSE_conditions.mat
│ ├── auditory_oddball_diaconescu.ipynb
│ └── designMatrix.mat
├── misc
│ ├── __init__.py
│ ├── mac_notebook.ipynb
│ ├── mac_run_exp.py
│ ├── neurobrite_datasets.ipynb
│ ├── neurobrite_datasets.py
│ └── run_experiment.py
├── rest
│ └── Raw_EEG.ipynb
├── sandbox
│ ├── CONTRIBUTING.md
│ ├── LINUX_INSTRUCTIONS.md
│ ├── N170_analysisonly.ipynb
│ ├── README_OLD.md
│ ├── Raw_EEG_linux.ipynb
│ ├── SSVEP_linux.ipynb
│ ├── auditory_erp_arrayin.py
│ ├── auditory_erp_aux.py
│ ├── auditory_oddball_erp_arrayin.ipynb
│ ├── auditory_stim_with_aux.ipynb
│ ├── designMatrix.mat
│ ├── frequency_content_of_eeg_signals.ipynb
│ ├── list_muses.py
│ ├── n170_fil2.py
│ ├── n170_fil_imslist.csv
│ ├── old_notebooks
│ │ ├── Auditory P300 with Muse.ipynb
│ │ ├── Cross-subject classification.ipynb
│ │ ├── Go No Go with Muse.ipynb
│ │ ├── Left-Right visual field with Muse.ipynb
│ │ ├── N170 with Muse.ipynb
│ │ ├── P300 with Extra Electrode.ipynb
│ │ ├── P300 with Muse.ipynb
│ │ ├── SSAEP with Muse.ipynb
│ │ ├── SSVEP with Muse.ipynb
│ │ └── Spatial Frequency Task with Muse.ipynb
│ ├── requirements_mac.txt
│ ├── running_on_binder.md
│ └── test_muse_markers.ipynb
├── visual_cueing
│ ├── 01r__cueing_singlesub_analysis.py
│ ├── 02r__cueing_group_analysis.py
│ ├── 03r__cueing_behaviour_analysis_winter2019.py
│ ├── 04r__cueing_group_analysis_winter2019.py
│ ├── CueingAnalysis_Colab.ipynb
│ ├── CueingBehaviourAnalysis_Colab_Winter2019.ipynb
│ ├── CueingGroupAnalysis_Colab.ipynb
│ ├── CueingGroupAnalysis_Colab_Winter2019.ipynb
│ ├── README.txt
│ ├── cueing.ipynb
│ ├── cueing_group_analysis.ipynb
│ └── cueing_loop.ipynb
├── visual_gonogo
│ └── README.txt
├── visual_n170
│ ├── 00x__n170_run_experiment.py
│ ├── 01r__n170_viz.py
│ ├── 02r__n170_decoding.py
│ ├── README.txt
│ ├── faq.rst
│ └── test_md_file.md
├── visual_p300
│ ├── 00x__p300_run_experiment.py
│ ├── 01r__p300_viz.py
│ ├── 02r__p300_decoding.py
│ └── README.txt
└── visual_ssvep
│ ├── 00x__ssvep_run_experiment.py
│ ├── 01r__ssvep_viz.py
│ ├── 02r__ssvep_decoding.py
│ └── README.txt
├── pyproject.toml
├── requirements.txt
├── setup.cfg
├── setup.py
└── tests
├── test_empty.py
└── test_run_experiments.py
/.devcontainer/cpu/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "EEG-ExPy-CPU",
3 | "image": "mcr.microsoft.com/devcontainers/python:3.8",
4 |
5 | "customizations": {
6 | "vscode": {
7 | "extensions": [
8 | "ms-python.python"
9 | ],
10 | "settings": {
11 | "python.pythonPath": "/usr/local/bin/python"
12 | }
13 | }
14 | },
15 |
16 | "forwardPorts": [
17 | 8000,
18 | 8888,
19 | 5000,
20 | 6000
21 | ],
22 | // print the python version:
23 | "postCreateCommand": "python --version && pip install -r requirements.txt && pip install -e . && echo 'Dependencies installed'",
24 | "appPort": 8000
25 |
26 | }
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: 🐛 Bug report
3 | about: Report errors or unexpected behavior
4 | title: ''
5 | labels: bug,needs-triage
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## ℹ Computer information
11 |
12 | - Platform OS (e.g Windows, Mac, Linux etc):
13 | - Python Version:
14 | - Brain Interface Used (e.g Muse, OpenBCI, Notion etc):
15 |
16 | ## 📝 Provide detailed reproduction steps (if any)
17 |
18 | 1. …
19 | 2. …
20 | 3. …
21 |
22 | ### ✔️ Expected result
23 |
24 | _What is the expected result of the above steps?_
25 |
26 | ### ❌ Actual result
27 |
28 | _What is the actual result of the above steps?_
29 |
30 | ## 📷 Screenshots
31 |
32 | _Are there any useful screenshots? WinKey+Shift+S (Windows) / Shift+Command+4 (MacOS) and then just paste them directly into the form_
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation_issue.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F4DA Documentation Issue"
3 | about: Report issues in our documentation
4 | title: ''
5 | labels: docs,needs-triage
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
12 | ## 📝 Provide a description of requested docs changes
13 |
14 | _What is the purpose and what should be changed?_
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "⭐ Feature request"
3 | about: Propose something new.
4 | title: ''
5 | labels: needs-triage
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## 📝 Provide a description of the new feature
11 |
12 | _What is the expected behavior of the proposed feature? What is the scenario this would be used?_
13 |
14 | ---
15 |
16 | If you'd like to see this feature implemented, add a 👍 reaction to this post.
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Docs
2 |
3 | on:
4 | push:
5 | branches: [ master, develop, 'dev/*' ]
6 | pull_request:
7 | branches: [ master, develop ]
8 |
9 | jobs:
10 | build:
11 | runs-on: ubuntu-22.04
12 | steps:
13 | - name: Checkout repo
14 | uses: actions/checkout@v3
15 | with:
16 | fetch-depth: 0
17 |
18 | - name: Set up Python
19 | uses: actions/setup-python@v4
20 | with:
21 | python-version: 3.8
22 |
23 | - name: Install dependencies
24 | run: |
25 | make install-deps-apt
26 | python -m pip install --upgrade pip wheel
27 | python -m pip install attrdict
28 |
29 | make install-deps-wxpython
30 |
31 | - name: Build project
32 | run: |
33 | make install-docs-build-dependencies
34 |
35 |
36 | - name: Get list of changed files
37 | id: changes
38 | run: |
39 | git fetch origin master
40 | git diff --name-only origin/master...HEAD > changed_files.txt
41 | cat changed_files.txt
42 |
43 |
44 | - name: Determine build mode
45 | id: mode
46 | run: |
47 | if grep -vqE '^examples/.*\.py$' changed_files.txt; then
48 | echo "FULL_BUILD=true" >> $GITHUB_ENV
49 | echo "Detected non-example file change. Full build triggered."
50 | else
51 | CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' -)
52 | echo "FULL_BUILD=false" >> $GITHUB_ENV
53 | echo "CHANGED_EXAMPLES=$CHANGED_EXAMPLES" >> $GITHUB_ENV
54 | echo "Changed examples: $CHANGED_EXAMPLES"
55 | fi
56 |
57 |
58 | - name: Cache built documentation
59 | id: cache-docs
60 | uses: actions/cache@v4
61 | with:
62 | path: |
63 | doc/_build/html
64 | key: ${{ runner.os }}-sphinx-${{ hashFiles('examples/**/*.py', 'doc/**/*', 'conf.py') }}
65 | restore-keys: |
66 | ${{ runner.os }}-sphinx-
67 |
68 |
69 | - name: Build docs
70 | run: |
71 | make docs
72 |
73 |
74 | - name: Deploy Docs
75 | uses: peaceiris/actions-gh-pages@v3
76 | if: github.ref == 'refs/heads/master' # TODO: Deploy seperate develop-version of docs?
77 | with:
78 | github_token: ${{ secrets.GITHUB_TOKEN }}
79 | publish_dir: doc/_build/html
80 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Test
2 |
3 | on:
4 | push:
5 | branches: [ master, develop ]
6 | pull_request:
7 | branches: [ master, develop ]
8 |
9 | jobs:
10 | test:
11 | name: test (${{ matrix.os }}, py-${{ matrix.python_version }})
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | fail-fast: false
15 | matrix:
16 | os: ['ubuntu-22.04', windows-latest, macOS-latest]
17 | python_version: ['3.8']
18 | include:
19 | # Experimental: Python 3.9
20 | # Works fine, commented out because mostly covered (at least installing/building deps) by the typecheck job
21 | # See issue: https://github.com/NeuroTechX/eeg-notebooks/issues/50
22 | #- os: ubuntu-latest
23 | # python_version: 3.9
24 |
25 | # Check 3.10 for future-proofing
26 | - os: ubuntu-22.04
27 | python_version: '3.10'
28 |
29 | steps:
30 | - uses: actions/checkout@v2
31 | - name: Set up Python
32 | uses: actions/setup-python@v4
33 | with:
34 | python-version: ${{ matrix.python_version }}
35 |
36 | # Not needed if pywinhook is installed from wheels
37 | #- name: Install swig
38 | # if: "startsWith(runner.os, 'windows')"
39 | # run: |
40 | # (New-Object System.Net.WebClient).DownloadFile("http://prdownloads.sourceforge.net/swig/swigwin-4.0.1.zip","swigwin-4.0.1.zip");
41 | # Expand-Archive .\swigwin-4.0.1.zip .;
42 | # echo "$((Get-Item .).FullName)/swigwin-4.0.1" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
43 |
44 | - name: Install APT dependencies
45 | if: "startsWith(runner.os, 'Linux')"
46 | run: |
47 | make install-deps-apt
48 | - name: Upgrade pip
49 | run: |
50 | python -m pip install --upgrade pip wheel
51 | - name: Install Linux dependencies
52 | if: "startsWith(runner.os, 'Linux')"
53 | run: |
54 | make install-deps-wxpython
55 | - name: Install conda
56 | uses: conda-incubator/setup-miniconda@v3
57 | with:
58 | environment-file: environments/eeg-expy-full.yml
59 | auto-activate-base: true
60 | python-version: 3.8
61 | activate-environment: eeg-expy-full
62 | channels: conda-forge
63 | miniconda-version: "latest"
64 | - name: Install dependencies via conda
65 | shell: bash -el {0}
66 | run: |
67 | conda info
68 | conda activate eeg-expy-full
69 | - name: Run eegnb install test
70 | shell: bash -el {0}
71 | run: |
72 | if [ "$RUNNER_OS" == "Linux" ]; then
73 | Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log &
74 | export DISPLAY=:0
75 | fi
76 | eegnb --help
77 | eegnb runexp --help
78 | - name: Run examples with coverage
79 | shell: bash -el {0}
80 | run: |
81 | if [ "$RUNNER_OS" == "Linux" ]; then
82 | Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log &
83 | export DISPLAY=:0
84 | fi
85 | make test PYTEST_ARGS="--ignore=tests/test_run_experiments.py"
86 |
87 |
88 | typecheck:
89 | name: typecheck (${{ matrix.os }}, py-${{ matrix.python_version }})
90 | runs-on: ${{ matrix.os }}
91 | strategy:
92 | fail-fast: false
93 | matrix:
94 | os: ['ubuntu-22.04']
95 | python_version: [3.9]
96 |
97 | steps:
98 | - uses: actions/checkout@v2
99 | - name: Set up Python
100 | uses: actions/setup-python@v1
101 | with:
102 | python-version: ${{ matrix.python_version }}
103 | - name: Install APT dependencies
104 | if: "startsWith(runner.os, 'Linux')"
105 | run: |
106 | make install-deps-apt
107 | - name: Upgrade pip
108 | run: |
109 | python -m pip install --upgrade pip wheel
110 | - name: Install Linux dependencies
111 | if: "startsWith(runner.os, 'Linux')"
112 | run: |
113 | make install-deps-wxpython
114 | - name: Install dependencies
115 | run: |
116 | make build
117 | - name: Typecheck
118 | run: |
119 | make typecheck
120 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | .venv
3 |
4 | *.egg-info/
5 |
6 | # Built as part of docs
7 | doc/auto_examples
8 | doc/_build
9 |
10 | # Built by auto_examples
11 | examples/visual_cueing/*.csv
12 |
13 | # tests/coverage artifacts
14 | .coverage
15 | coverage.xml
16 | htmlcov
17 |
18 | # PyCharm
19 | .idea/
20 |
21 | **/.DS_Store
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "GSOC-eeg-notebooks"]
2 | path = GSOC-eeg-notebooks
3 | url = https://github.com/Parvfect/GSOC-eeg-notebooks
4 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.formatting.provider": "black"
3 | }
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2017, alexandre barachant
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | build:
2 | # Use pep517 to install pygatt==4.0.5(deprecated setuptools/egg installer) on macos
3 | pip install --use-pep517 .[full]
4 |
5 | install-docs-build-dependencies:
6 | pip install -e .[docsbuild]
7 |
8 | test:
9 | pytest $(PYTEST_ARGS)
10 |
11 | typecheck:
12 | # Exclude visual_cueing due to errors
13 | python -m mypy --exclude 'examples/visual_cueing'
14 |
15 | docs:
16 | cd doc && make html
17 |
18 | clean:
19 | cd doc && make clean
20 |
21 | install-deps-apt:
22 | sudo apt-get update # update archive links
23 |
24 | # xvfb is a dependency to create a virtual display
25 | # libgtk-3-dev is a requirement for wxPython
26 | # freeglut3-dev is a requirement for a wxPython dependency
27 | # portaudio19-dev *might* be required to import psychopy on Ubuntu
28 | # pulseaudio *might* be required to actually run the tests (on PsychoPy import)
29 | # libpulse-dev required to build pocketsphinx (speech recognition dependency of psychopy)
30 | # libsdl2-dev required by psychopy
31 | # libnotify4 is so we can have the libnotify.so module used in wxPython working
32 | sudo apt-get -y install xvfb libgtk-3-dev freeglut3-dev portaudio19-dev libpulse-dev pulseaudio libsdl2-dev libnotify4
33 |
34 | # configure dynamic links
35 | sudo ldconfig
36 |
37 | UPDATED_LIBPATH=$(sudo find / -name libnotify.so)
38 | LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$UPDATED_LIBPATH
39 |
40 | install-deps-wxpython:
41 | # Install wxPython wheels since they are distribution-specific and therefore not on PyPI
42 | # See: https://wxpython.org/pages/downloads/index.html
43 | pip install -U -f https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-22.04 wxPython
44 |
--------------------------------------------------------------------------------
/binder/environment.yml:
--------------------------------------------------------------------------------
1 | channels:
2 | - conda-forge
3 | dependencies:
4 | - python=3.7
5 |
--------------------------------------------------------------------------------
/binder/postBuild:
--------------------------------------------------------------------------------
1 | pip install -r requirements-noeeg.txt
2 | pip install -e .
3 |
4 | cd examples
5 | wget https://neurotechx.github.io/eeg-notebooks/_downloads/894670a281f2dbcc8773156c5c011d11/auto_examples_jupyter.zip
6 | unzip auto_examples_jupyter.zip
7 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 |
3 | # Note: when working on & tweaking one example, easiest + quickest to do single-file build with something like
4 | #
5 | # sphinx-build -D sphinx_gallery_conf.filename_pattern=cueing_behavioural_analysis_winter2019.py -b html -d _build/doctrees . _build/html
6 | #
7 |
8 |
9 | # You can set these variables from the command line.
10 | SPHINXOPTS =
11 | SPHINXBUILD = sphinx-build
12 | #SPHINXPROJ = fooof
13 | SPHINXPROJ = eeg-notebooks
14 | SOURCEDIR = .
15 | BUILDDIR = _build
16 |
17 | # Put it first so that "make" without argument is like "make help".
18 | help:
19 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
20 |
21 | .PHONY: help Makefile
22 |
23 | # Catch-all target: route all unknown targets to Sphinx using the new
24 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
25 | %: Makefile
26 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
27 |
28 | # Custom cleaner that also removes the generated files from sphinx-gallery
29 | clean:
30 | rm -rf $(BUILDDIR)/*
31 | rm -rf auto_examples
32 | rm -rf auto_tutorials
33 | rm -rf auto_motivations
34 | rm -rf generated
35 |
36 | # Check that builds site with nitpicky checking, and also does post-hoc link check
37 | check:
38 | make SPHINXOPTS="-n" html
39 | make linkcheck
40 |
41 | # Create the plots used in the FOOOF documentation
42 | plots:
43 | python make_doc_plots.py
44 |
45 | # Build the html site, and push it to gh-pages branch of repo to deploy
46 | install:
47 | # Clean out existing build
48 | make clean
49 | # Clone, specifically, the gh-pages branch, putting it into '_build/gh_pages/'
50 | # --no-checkout just fetches the root folder without content
51 | # --depth 1 is a speed optimization since we don't need the history prior to the last commit
52 | # -b gh-pages fetches only the branch for the gh-pages
53 | #git clone -b gh-pages --single-branch --no-checkout --depth 1 https://github.com/fooof-tools/fooof _build/gh_pages
54 | git clone -b gh-pages --single-branch --no-checkout --depth 1 git@github.com:NeuroTechX/eeg-notebooks _build/gh_pages
55 | # A .nojekyll file tells Github pages to bypass Jekyll processing
56 | touch _build/gh_pages/.nojekyll
57 | # A .nojekyll file tells Github pages to bypass Jekyll processing
58 | touch _build/gh_pages/.nojekyll
59 | # Build the sphinx site
60 | make html
61 | # Copy site into the gh-pages branch folder, then push to Github to deploy
62 | cd _build/ && \
63 | cp -r html/* gh_pages && \
64 | cd gh_pages && \
65 | git add * && \
66 | git add .nojekyll && \
67 | git commit -a -m 'Make install' && \
68 | git push
69 |
--------------------------------------------------------------------------------
/doc/_static/debug.js:
--------------------------------------------------------------------------------
1 | // Add debug actions to flyout menu
2 |
3 | $(function () {
4 | $("[data-toggle='rst-debug-badge']").on("click", function () {
5 | $("[data-toggle='rst-versions']").toggleClass("rst-badge");
6 | });
7 | })
8 |
--------------------------------------------------------------------------------
/doc/_static/theme_override.css:
--------------------------------------------------------------------------------
1 | .wy-nav-top {
2 | background-color: #ff8400 !important;
3 | }
4 | .wy-side-nav-search {
5 | background-color: #FF8C38 !important;
6 | }
7 |
8 |
9 | div[class^="highlight"] a {
10 | background-color: #E6E6E6;
11 | }
12 |
13 | div[class^="highlight"] a:hover {
14 | background-color: #ABECFC;
15 | }
16 |
17 | .rst-versions {
18 | position: relative;
19 | }
20 | .rst-versions.shift-up {
21 | overflow-y: visible;
22 | }
23 |
24 | a[class^="sphx-glr-backref-module-"] {
25 | text-decoration: none;
26 | background-color: rgba(0, 0, 0, 0) !important;
27 | }
28 | a.sphx-glr-backref-module-sphinx_gallery {
29 | text-decoration: underline;
30 | background-color: #E6E6E6;
31 | }
32 |
33 | .anim-state label {
34 | display: inline-block;
35 | }
36 |
--------------------------------------------------------------------------------
/doc/changelog.rst:
--------------------------------------------------------------------------------
1 | **************
2 | Code Changelog
3 | **************
4 |
5 | This page contains the changelog for eeg-notebooks and any notes on updating between versions.
6 |
7 |
8 |
9 | 0.2.X
10 | ======
11 |
12 | The 0.2.X series included several major updates in api, backend, and compatibility.
13 | These updates were introduced around and for the OpenBCI-NTX Challenge 2020
14 |
15 |
16 | Updates include:
17 |
18 |
19 | - Support for OpenBCI EEG devices introduced through brainflow support
20 |
21 | - Abstracted 'device' class introduced to cover both OpenBCI with brainflow and Muse with muse-lsl
22 |
23 | - Subprocess calls for running concurrent psychopy and muselsl streams put inside functions (not required to be called by user)
24 |
25 | - New sphinx gallery-based documentation site, built with sphinx and hosted on gh-pages
26 |
27 | - General cleanup of documentation, installation, and setup instructions
28 |
29 | - Example datasets removed from repo and placed in separate cloud storage
30 |
31 | - Dataset downloader functions implemented
32 |
33 | - Kyle Mathewson's visual cueing experiment + results added
34 |
35 |
36 |
37 | 0.1.X
38 | ======
39 |
40 | The 0.1.X series was the initial port of the muse-lsl code, and development of the jupyter notebook-oriented approach. It was developed principally for the 2018/2019 NeuroBRITE and BrainModes programs.
41 |
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/doc/docs_notes.md:
--------------------------------------------------------------------------------
1 | # EEG-ExPy Documentation Developer Notes
2 |
3 | The documentation page can be found at https://neurotechx.github.io/eeg-expy
4 |
5 | The documentation source files are a combination of plain-text `.rst`, `.md`, and `.py` files.
6 |
7 | It is built locally with `sphinx`, and hosted on the github repo `gh-pages` branch in the usual fashion
8 |
9 |
10 | There are two main locations for the documentation files:
11 |
12 | - The various files and sub-folders in `eeg-expy/doc`, which contain the webpage contents
13 | - The files and sub-folders in `eeg-expy/examples`, which contains `.py` scripts, grouped by experiment
14 |
15 | This general organization (with `doc` and `examples` folders) is widely used by excellent python libraries such as `MNE` and `Nilearn`, and we are largely following suit in the organization here.
16 |
17 | The `.py` files in `examples` contain mixtures of python code and `.rst`-format documentation, which are converted through `sphinx-gallery` into a set of web-pages with formatted text and code and in-line figures. In addition, `sphinx-gallery` creates executable `.py` and `.ipynb` files for each example page, and adds download links to these at the bottom of each page.
18 |
19 | The documentation building command actually executes the python code in each of the `examples` files, and creates figures from them. Errors in the python code lead to incompletely built webpages.
20 |
21 |
22 | ( Side-note: The title `EEG-Notebooks` was originally conceived with the idea of having complete Python-based EEG experiments runnable from a jupyter notebook, with those notebooks being the main contents of the repo. At the user-level, this is still largely the case; but at the development level, we have now switched over from maintaining a set of `ipynb` source files, to maintaining a set of `.py` files (in the `examples folder), that are converted to `.ipynb` files when the `sphinx-gallery` documentation is compiled. This is much better and sustainable from the point of view of version control, since multiple-user contributions to `ipynb` files gets very hairy with git )
23 |
24 |
25 | ## Building the doc site
26 |
27 | The documentation build has only been tested in linux. It may also work on Mac.
28 |
29 | First: install the docs dependencies in a new or existing python environment
30 | (see `requirements-doc.txt`)
31 |
32 | When working on the docs, it is most useful to have 3 terminals open, each with the python environment activated.
33 |
34 | In terminal 1: edit the source files
35 |
36 | In terminal 2: build and re-build the docs periodically to inspect changes
37 |
38 | `cd eeg-expy/doc`
39 | `make html`
40 |
41 | In terminal 3: keep a local http server running to render the docs
42 |
43 | `python -m http.server 8001`
44 |
45 |
46 | In browser, navigate to the port used above
47 |
48 | `localhost:8001`
49 |
50 |
51 | When you are happy with the changes, commit and push the source files, and run the command that builds documentation and pushes to `gh-pages`
52 |
53 | `make install`
54 |
55 |
56 |
57 |
58 | ## Misc notes
59 |
60 | - The `doc/index.rst` defines the overall site structure and table-of-contents tree
61 | - `doc/Makefile` contains the commands for building documentation. The main two commands are `make html` (build docs locally) and `make install` (build docs locally and push to `gh-pages` branch, updating the website`)
62 | - Examples pages can be built individually, rather than re-running the entire doc build process
63 | - The current doc build takes approximately 10 minutes
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/doc/experiments/all_examples.rst:
--------------------------------------------------------------------------------
1 |
2 | .. include:: ../auto_examples/index.rst
3 |
4 |
5 |
--------------------------------------------------------------------------------
/doc/experiments/cueing.rst:
--------------------------------------------------------------------------------
1 | ********************************
2 | _
3 | *********************************
4 |
5 | Visual Cueing
6 | =========================================================
7 |
8 | The visual cueing task can ellicit a number of reliable changes. A central cue indicates the location of an upcoming target onset. Here the task can be changed to be perfectly predictive, or have some level of cue validity. The task is to indicate the orientation of a spatial grating on the target, up for vertical, right for horizontal.
9 |
10 | The examples below demonstrate the following neural and cognitive empirical phenomena, elicited by the visual cueing task:
11 |
12 | *ERPs* - Validly cued targets ellict larger ERPs than invalidly cued targets
13 |
14 | *Response ERPs* - Validly cued targets are more quickly identified and better identified
15 |
16 | *Oscillations* - Alpha power lateralizes after a spatial cue onset preceeding the upcoming onset of a target. Alpha power becomes smaller contraleral to the target side, and larger ipsilateral with the target.
17 |
18 |
19 | **Visual Cueing Experiment Notebook Examples**
20 |
21 | .. include:: ../auto_examples/visual_cueing/index.rst
22 |
23 |
--------------------------------------------------------------------------------
/doc/experiments/gonogo.rst:
--------------------------------------------------------------------------------
1 | ********************************
2 | _
3 | *********************************
4 |
5 | Visual Go-No-Go
6 | =========================================================
7 |
8 | An experiment designed to investigate the event-related potentials that can be detected during a Go-No-Go Task, which measures executive, inhibitory control and sustained attention. The subject is rapidly presented with a sequence of circles and squares and is asked to indicate, by pressing the spacebar, whether a shape is a circle.
9 |
10 | .. include:: ../auto_examples/visual_gonogo/index.rst
11 |
12 |
--------------------------------------------------------------------------------
/doc/experiments/vn170.rst:
--------------------------------------------------------------------------------
1 | ********************************
2 | _
3 | *********************************
4 |
5 | Visual N170
6 | =========================================================
7 |
8 | The N170 is a large negative event-related potential (ERP) component that occurs after the detection of faces, but not objects, scrambled faces, or other body parts such as hands. The N170 occurs around 170ms after face perception and is most easily detected at lateral posterior electrodes such as T5 and T6 1. Frontal or profile views of human (and animal 2) faces elicit the strongest N170 and the strength of the N170 does not seem to be influenced by how familiar a face is. Thus, although there is no consensus on the specific source of the N170, researchers believe it is related to activity in the fusiform face area, an area of the brain that shows a similar response pattern and is involved in encoding the holistic representation of a face (i.e eyes, nose mouth all arranged in the appropriate way).
9 |
10 | In this notebook, we will attempt to detect the N170 with the Muse headband using faces and houses as our stimuli. The Muse’s temporal electrodes (TP9 and TP10) are well positioned to detect the N170 and we expect we’ll be able to see an N170 emerge from just a few dozen trials. We will then run several different classification algorithms on our data in order to evaluate the performance of a potential brain-computer interface using the N170.
11 |
12 | **Visual N170 Experiment Notebook Examples:**
13 |
14 | .. include:: ../auto_examples/visual_n170/index.rst
15 |
16 |
--------------------------------------------------------------------------------
/doc/experiments/vp300.rst:
--------------------------------------------------------------------------------
1 | ********************************
2 | _
3 | *********************************
4 |
5 | Visual P300
6 | =========================================================
7 |
8 | The P300 is a positive event-related potential (ERP) that occurs around 300ms after perceiving a novel or unexpected stimulus. It is most commonly elicited through ‘oddball’ experimental paradigms, where a certain subtype of stimulus is presented rarely amidst a background of another more common type of stimulus. Interestingly, the P300 is able to be elicited by multiple sensory modalities (e.g. visual, odditory, somatosensory). Thus, it is believed that the P300 may be a signature of higher level cognitive processing such as conscious attention.
9 |
10 | In this notebook, we will attempt to elicit a P300 with a visual oddball stimulation paradigm using the Muse headband
11 |
12 |
13 | **Visual P300 Notebook Examples**
14 |
15 | .. include:: ../auto_examples/visual_p300/index.rst
16 |
17 |
--------------------------------------------------------------------------------
/doc/experiments/vssvep.rst:
--------------------------------------------------------------------------------
1 | ********************************
2 | _
3 | *********************************
4 |
5 | Visual SSVEP
6 | =====================
7 |
8 | The steady-state visual evoked potential (SSVEP) is a repetitive evoked potential that is naturally produced when viewing stimuli flashing between a range of 6-75hz. Electrical activity at the same frequency as the visual stimulation can be detected in the occipital areas of the brain, likely due to the perceptual recreation of the stimulus in the primary visual cortex.
9 |
10 | The SSVEP is often used in BCI applications due to its ease of detection and the amount of information that a user can communicate due to the high potential frequency resolution of the SSVEP.
11 |
12 | In this notebook, we will use the Muse EEG headband with an extra occipital electrode to detect the SSVEP and evaluate it’s use in SSVEP-based BCIs.
13 |
14 |
15 | Extra Electrode
16 |
17 | Although the SSVEP is detectable at the default temporal electrodes, it can be seen much more clearly directly over the occipital cortex.
18 |
19 | The Muse 2016 supports the addition of an extra electrode which can be connected through the devices microUSB charging port.
20 |
21 | Instructions on how to build an extra electrode for Muse
22 | Working with the extra electrode
23 | For this experiment, the extra electrode should be placed at POz, right at the back of the skull. It can be secured in place with a bandana or a hat
24 |
25 |
26 | **SSVEP Experiment Notebook Examples**
27 |
28 | .. include:: ../auto_examples/visual_ssvep/index.rst
29 |
30 |
31 |
--------------------------------------------------------------------------------
/doc/getting_started/analysis.md:
--------------------------------------------------------------------------------
1 | # Analyzing data
2 |
3 | ( To add )
4 |
--------------------------------------------------------------------------------
/doc/getting_started/available_experiments.md:
--------------------------------------------------------------------------------
1 |
2 | # Available Experiments
3 |
4 | ### Visual P300 with Oddball paradigm
5 | The visual P300 is a spike that occurs 300ms after perceiving a visual stimulus that has implications on decision making. This was validated in Muse by Alexandre Barachant with the Oddball paradigm, in which low-probability target items (oddballs) are interspersed with high probability non-target items. With AB's paradigm, the experiment takes about 10 minutes to run (5 x 2 minute trials). Although the Muse's sensors aren't in the ideal position for detecting the P300, AB was able to attain "good" accuracy in identifying P300 spikes.
6 |
7 | ### N170
8 | The N170 is an ERP specifically related to the perception of faces. This was validated in Muse by Hubert with a 12 minute experiment (6 x 2 minute trials). Stimuli consists of 12 pictures of houses and 12 pictures of faces. Accuracy of N170 detection is rather good.
9 |
10 | ### SSVEP
11 | The steady state visual evoked potential is a frequency response produced visual stimulation at specific frequencies. It was validated by Hubert in a 12 minute experiment (6 x 2 minute trials). Stimulation frequencies of 30hz and 20hz were used and an extra electrode at POz was added. Found clear peaks in the PSD at the stimulation frequencies. The peaks were most significant at the extra electrode, which is closest to the primary visual regions, but was detectable at all electrodes and found to have remarkably high accuracy when using a filter bank approach to isolate specific frequencies.
12 |
13 | ## Old experiments
14 | ### Go/No-Go
15 | An experiment designed to investigate the event-related potentials that can be detected during a Go-No-Go Task, which measures executive, inhibitory control and sustained attention. The subject is rapidly presented with a sequence of circles and squares and is asked to indicate, by pressing the spacebar, whether a shape is a circle.
16 |
17 |
18 | ### SSAEP
19 | The steady state auditory evoked potential is a frequency response produced when hearing modulating tones of certain frequencies. It was validated in Muse by Hubert, who used 45hz and 40hz amplitude modulation applied to 900 and 770h carrier frequencies. A PSD of the produced EEG signal showed clear spikes, correspondingly, at 45 and 40hz in the temporal electrodes. The N100 and P200 complex was also noticed at the beginning of stimulus onset.
20 |
21 |
22 | ### C1 and P1
23 | C1 and P1 are two ERPs related to the perception of a visual stimulus. The C1 is the first component, appearing in the 65-90ms range after stimulus onset while the P1 appears later, around 100ms.
24 |
25 | C1 and P1 were validated in Muse by Hubert with a left/right visual field experiment. Comparing ERPs to left or right-field presentation of visual stimuli revealed a contralateral pattern of C1 and P1 in both the temporal and anterior electrodes. However, their timing seems a little delayed.
26 |
27 |
28 | ### Auditory P300
29 | Same as the visual P300, but dependent on auditory stimulus. Auditory P300s are normally less distinguishable than visual P300s, but they may be more suited to the Muse since its electrodes are closer to auditory centers (superior temporal cortex).
30 |
31 |
32 | ## Unvalidated Experiments and other phenomena
33 |
34 | ### N100 - P200
35 | The combination of a negative evoked potential around 100ms after any unpredictable stimulus and a positive potential 200ms after. These were noticed in Hubert's SSAEP experiment, but not independently classified or tested.
36 |
37 | ### On-task Beta
38 | Noticed in Hubert's visual grating test, but difficult to extract.
39 |
40 | ### Alpha reset
41 | A noticeable increase in alpha activity after stimulus presentation ends. Noticed in Hubert's visual grating test.
42 |
--------------------------------------------------------------------------------
/doc/getting_started/data_zipper.md:
--------------------------------------------------------------------------------
1 | # Data Zipping
2 | After you have ran experiments, you can compress all recorded data as a zip folder. The method for doing this is using the command line tool with the flags detailed below.
3 |
4 | ### Command Line Interface
5 |
6 | To activate the command line tool, open a command/terminal prompt and enter `eegnb runzip` followed by the appropriate flag for your desired experiment. Command line tool looks through folders in `~/.eegnb/data` for compression. Zip files will be outputted in the format of `~/Desktop` with the following filename `{experiment_name}_{site}-{day_month_year_hour:minute}_zipped`
7 | The possible flags are
8 |
9 | * *-ex ; --experiment*: The experiment to be run
10 | * *-s ; --site*: Subfolder within the experiment
11 | * *-ip ; --prompt*: Bypass the other flags to activate an interactive prompt
12 |
13 | ### Using the introprompt flag
14 |
15 | If using the -ip flag the user will be prompted to input the various session parameters. The prompts are detailed below.
16 |
17 | **Experiment Selection**
18 | ```
19 | Please select which experiment you would like to run:
20 | [0] Visual N170
21 | [1] Visual P300
22 | [2] Visual SSVEP
23 | [3] visual-cue (no description)
24 | [4] visual-codeprose (no description)
25 | [5] Auditory SSAEP (orig)
26 | [6] Auditory SSAEP (single freq)
27 | [7] Auditory oddball (orig)
28 | [8] Auditory oddball (diaconescu)
29 |
30 | Enter Experiment Selection:
31 | ```
32 | This section allows you to select one of the above experiments to run. There are other experiments available, however, they have not yet been updated for the new API to be device agnostic. As they get updated, more experiments will populate this section.
33 |
34 | **Site Selection**
35 | ```
36 | Please select which experiment subfolder you would like to zip. Default 'local_ntcs'
37 |
38 | Current subfolders for experiment visual-N170:
39 |
40 | ['local','local_ntcs','temp']
41 |
42 | Enter folder:
43 | ```
44 |
45 | This selection allows you to select the subfolder for the experiment you have previously chosen. The example provided was for sample sites in the visual-N170 folder.
46 |
47 |
--------------------------------------------------------------------------------
/doc/getting_started/faq.md:
--------------------------------------------------------------------------------
1 | # Frequently Asked Questions
2 |
3 |
4 | ## How do I run an experiment?
5 |
6 | `eegnb runexp -ip`
7 |
8 |
9 | ## How do I visualize a live stream to check the recording quality
10 |
11 | `eegnb view`
12 |
13 |
14 |
15 |
16 | ## I forgot the name of my conda env?
17 |
18 | You can check your conda environments with ```conda env list```
19 |
20 |
21 | ## Where is my data?
22 |
23 | By default, all recorded data is saved to, e.g.,
24 |
25 | `~/.eegnb/data/visual-N170/local/museS/subject0001/session0001/recording_2020-09-19-03.37.42.csv`
26 |
27 |
28 | ## How do I find the correct MAC port for my OpenBCI?
29 |
30 |
--------------------------------------------------------------------------------
/doc/getting_started/loading_and_saving.md:
--------------------------------------------------------------------------------
1 | # Loading and Saving Data
2 | Knowing where the data is saved is integral to the functionality of EEG Notebooks. EEG Notebooks saves data to a default location in a hidden directory. From this directory, the individual files can be found based on a folder structure outlined below in the **naming convention.**
3 |
4 | ## Locating the Default Data Directory
5 |
6 | #### Windows 10
7 | The default directory is found at the location `C:\Users\*USER_NAME*\.eegnb` an example of which is pictured below.
8 | 
9 |
10 | #### Linux
11 |
12 | #### MacOS
13 |
14 | ## Changing the Default Data Directory
15 | The default directory for saving data is automatically set within the library. If you want to save and analyze data to/from a new directory, it must be passed as a parameter to both the `eegnb.generate_save_fn()` and `eegnb.analysis.load_data()` functions.
16 |
17 | **Saving to new directory:**
18 | ``` python
19 | from eegnb import generate_save_fn
20 | from eegnb.experiments.visual_n170 import n170
21 |
22 | # Define session parameters
23 | board = 'cyton'
24 | experiment = 'visual-N170
25 | subject = 1
26 | session = 1
27 |
28 | # Define new directory and generate save filename
29 | new_dir = 'C:/Users/Jadin/Documents/EEG_Notebooks_Data'
30 | save_fn = generate_save_fn(board, experiment, subject, session, new_dir)
31 |
32 | # Continue to run experiment as normal...
33 | ```
34 |
35 | **Loading from new directory:**
36 | ``` python
37 | from eegnb.analysis.utils import load_data
38 |
39 | # Define parameters for session you want to load
40 | board = 'cyton'
41 | experiment = 'visual-N170
42 | subject = 1
43 | session = 1
44 |
45 | # Define new directory
46 | new_dir = 'C:/Users/Jadin/Documents/EEG_Notebooks_Data'
47 |
48 | # Load data
49 | raw = load_data(
50 | subject_id = subject,
51 | session_nb = session,
52 | device_name = board,
53 | experiment = experiment,
54 | data_dir = new_dir
55 | )
56 | ```
57 |
58 | ## Naming Convention
59 | From the specified data directory, EEG notebooks then follows a specific set of naming conventions to define subdirectories and save the data. The full path ends up taking the form
60 | ```
61 | DATA_DIR\experiment\site\device\subject#\session#\file_name.csv
62 | ```
63 | Each field is explained below:
64 |
65 | **Experiment:** This part is the name of the experiment being run. Example names of experiments as they appear in the example datasets are shown below.
66 | ```
67 | visual-N170
68 | visual-P300
69 | visual-SSVEP
70 | ```
71 |
72 | **Site:** The site refers to the recording location, or generally the machine it was recorded to. If you are saving and analyzing only your own data on your local machine, you do not need to specify your site name as it will default to 'local'. When loading example datasets however, it is necessary to specify from which site you would like to load data.
73 |
74 | **Device:** The name of the device being recorded from.
75 |
76 | **Subject #:** When entering subject ID as a parameter, you only need to specify the integer value. The integer will be formatted to `subjectXXXX` where "XXXX" is a four-digit representation of the integer ID#.
77 |
78 | **Session #:** A session in this case would be the full period of time which you have the device on and are taking multiple recordings. For example: if you put the headset on and take five recordings, all five of these recording would belong to session number 1. Once you take a break from consecutive recordings, then this would constitute a new session. Just like the subject ID, this value is passed as an integer and gets converted to a read-able format.
79 |
80 | **File name:** The file name is automatically generated in the format `recording_date_time.csv`
81 |
82 | ### Examples
--------------------------------------------------------------------------------
/doc/img/EEG-ExPy_Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/EEG-ExPy_Logo.png
--------------------------------------------------------------------------------
/doc/img/FOOOFGroup_report.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/FOOOFGroup_report.png
--------------------------------------------------------------------------------
/doc/img/FOOOF_report.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/FOOOF_report.png
--------------------------------------------------------------------------------
/doc/img/attaching_electrode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/attaching_electrode.png
--------------------------------------------------------------------------------
/doc/img/bluemuse.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/bluemuse.PNG
--------------------------------------------------------------------------------
/doc/img/brainbit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/brainbit.png
--------------------------------------------------------------------------------
/doc/img/cyton.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/cyton.png
--------------------------------------------------------------------------------
/doc/img/cyton_daisy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/cyton_daisy.png
--------------------------------------------------------------------------------
/doc/img/eeg-notebooks_democratizing_the_cogneuro_experiment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/eeg-notebooks_democratizing_the_cogneuro_experiment.png
--------------------------------------------------------------------------------
/doc/img/eeg-notebooks_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/eeg-notebooks_logo.png
--------------------------------------------------------------------------------
/doc/img/extra_electrode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/extra_electrode.png
--------------------------------------------------------------------------------
/doc/img/ganglion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/ganglion.png
--------------------------------------------------------------------------------
/doc/img/gtec-unicorn.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/gtec-unicorn.jpg
--------------------------------------------------------------------------------
/doc/img/install_gitbash.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/install_gitbash.png
--------------------------------------------------------------------------------
/doc/img/mark_conda_terminal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/mark_conda_terminal.png
--------------------------------------------------------------------------------
/doc/img/miniconda_install_fig.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_install_fig.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_1.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_2.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_3.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_4.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_5.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_6.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_7.png
--------------------------------------------------------------------------------
/doc/img/miniconda_run_install_fig_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/miniconda_run_install_fig_8.png
--------------------------------------------------------------------------------
/doc/img/notion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/notion.png
--------------------------------------------------------------------------------
/doc/img/spectrum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/spectrum.png
--------------------------------------------------------------------------------
/doc/img/windows_default_directory.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/windows_default_directory.PNG
--------------------------------------------------------------------------------
/doc/img/windows_usb_select.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/doc/img/windows_usb_select.PNG
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../README.rst
2 |
3 |
4 | .. toctree::
5 | :caption: Getting Started
6 | :maxdepth: 3
7 |
8 | getting_started/installation
9 | getting_started/streaming
10 | getting_started/running_experiments
11 | getting_started/data_zipper
12 | getting_started/analysis
13 | getting_started/available_experiments
14 |
15 | .. toctree::
16 | :caption: Experiments
17 | :maxdepth: 4
18 |
19 | experiments/vn170
20 | experiments/vp300
21 | experiments/vssvep
22 | experiments/cueing
23 | experiments/gonogo
24 | experiments/all_examples
25 |
26 |
27 | .. toctree::
28 | :caption: Misc
29 | :maxdepth: 4
30 |
31 | misc/using_an_extra_electrod_with_the_muse
32 | misc/muse_info
33 | misc/about_the_docs
34 | misc/ntcs_phase1_instructions
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/doc/misc/about_the_docs.md:
--------------------------------------------------------------------------------
1 | # About the EEG-ExPy Documentation Pages
2 |
3 | A few comments on how these are put together:
4 |
5 | The documentation pages are written for the following readthedocs sphinx-based setup:
6 |
7 | - `readthedocs` auto-generates the documentation, using various configuration files
8 | - In particular, we use [Nbsphinx](https://github.com/spatialaudio/nbsphinx) to create `html` pages directly.
9 | from a combination of jupyter notebooks, `.rst` files, and `.md` files
10 | - Because the files are not located under the docs folder, we additionally need to make use of [nbsphinx-link](https://github.com/vidartf/nbsphinx-link)
11 |
12 |
13 |
--------------------------------------------------------------------------------
/doc/misc/muse_info.md:
--------------------------------------------------------------------------------
1 | # Technical Information about the MUSE
2 |
3 | ## MUSE setup and usage
4 |
5 | There is a lot of excellent information on MUSE setup and usage on the [website of the Krigolson lab website](http://www.krigolsonlab.com/muse-help.html) at the University of Victoria, BC.
6 |
7 | The following instructional videos are particularly worth checking out:
8 |
9 |
10 | ### Introduction to the MUSE
11 |
12 |
13 | [](https://youtu.be/LihwJxzJALw?t=1s "Introduction to the MUSE")
14 |
15 |
16 | ### Headband fit and signal quality tutorial
17 |
18 | [](https://youtu.be/Y-tF3ii0lHU?t=1s "Headband fit and signal quality tutorial")
19 |
20 | ### Adjusting and fitting the MUSE for better signal quality
21 |
22 | [](https://youtu.be/v8xUYqqJAIg?t=1s "Adjusting and fitting the MUSE for better signal quality")
23 |
24 |
25 | ### Using water for better signal quality
26 |
27 | [](https://youtu.be/gKtVlVCDHGg?t=1s "Using water for better signal quality")
28 |
29 |
30 |
31 |
32 |
33 | ## Details related to Muse ERP analysis
34 |
35 |
36 | Latency and jitter related from the Muse is 40ms +-20ms
37 |
38 | In Krigolson lab's resampling analysis, a sample size of 10 was found to be needed for high
39 | statistical accuracy for N200 and reward positivity, which is similar to traditional EEG,
40 | but greater numbers of subjects were needed for P300
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/doc/misc/using_an_extra_electrode_muse.md:
--------------------------------------------------------------------------------
1 | # Using an extra electrode with Muse
2 |
3 | Although the Muse is wonderful for ease-of-use and affordability, it suffers from a small number of electrode locations and inflexibility of electrode positioning. Fortunately, in order to partially overcome this limitation, the Muse hardware team has allowed an extra electrode to be added to the Muse 2016.
4 |
5 | ## The electrode
6 |
7 | These electrodes are not for sale anywhere; they must be made by hand. Fortunately, their construction appears pretty simple, attach an EEG electrode (any kind) to a male microUSB port with a wire.
8 |
9 | We'll update this section with more info as it comes in from the Muse hardware team.
10 |
11 | 
12 |
13 | ## Attaching the extra electrode
14 |
15 | The extra electrode can be applied anywhere on the head (provide the wire is long enough). Just inset the electrode's microUSB connector into the charging port of the Muse. In order to make sure the electrode stays in place, we recommend using a hat or scarf as pictured.
16 |
17 | 
18 |
19 | ## Getting data from the electrode
20 |
21 | With the extra electrode connected to the Muse, its data is available as the `Right AUX` channel in the `muse-lsl` data stream. It will automatically appear in `muse-lsl`'s viewer. An example of how to access this data and include it in your analysis is shown in the [P300 with Extra Electrode](https://github.com/NeuroTechX/eeg-notebooks/blob/master/notebooks/P300%20with%20Extra%20Electrode.ipynb) notebook
22 |
--------------------------------------------------------------------------------
/eegexpy/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from eegnb.experiments import VisualN170,VisualP300,VisualSSVEP,AuditoryOddball
3 |
4 | from eegnb.analysis.utils import *
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/eegnb/__init__.py:
--------------------------------------------------------------------------------
1 | from os import path, makedirs
2 | from time import strftime, gmtime
3 | from pathlib import Path
4 |
5 | DATA_DIR = path.join(path.expanduser("~/"), ".eegnb", "data")
6 |
7 |
8 | def get_recording_dir(
9 | board_name: str,
10 | experiment: str,
11 | subject_id: int,
12 | session_nb: int,
13 | site="local",
14 | data_dir=DATA_DIR,
15 | ) -> Path:
16 | # convert subject ID to 4-digit number
17 | subject_str = f"subject{subject_id:04}"
18 | session_str = f"session{session_nb:03}"
19 | return _get_recording_dir(
20 | board_name, experiment, subject_str, session_str, site, data_dir=data_dir
21 | )
22 |
23 |
24 | def _get_recording_dir(
25 | board_name: str,
26 | experiment: str,
27 | subject_str: str,
28 | session_str: str,
29 | site: str,
30 | data_dir=DATA_DIR,
31 | ) -> Path:
32 | """A subroutine of get_recording_dir that accepts subject and session as strings"""
33 | # folder structure is /DATA_DIR/experiment/board_name/site/subject/session/*.csv
34 | recording_dir = (
35 | Path(data_dir) / experiment / site / board_name / subject_str / session_str
36 | )
37 |
38 | # check if directory exists, if not, make the directory
39 | if not path.exists(recording_dir):
40 | makedirs(recording_dir)
41 |
42 | return recording_dir
43 |
44 |
45 | def generate_save_fn(
46 | board_name: str,
47 | experiment: str,
48 | subject_id: int,
49 | session_nb: int,
50 | data_dir=DATA_DIR,
51 | ) -> Path:
52 | """Generates a file name with the proper trial number for the current subject/experiment combo"""
53 | recording_dir = get_recording_dir(
54 | board_name, experiment, subject_id, session_nb, data_dir=data_dir
55 | )
56 |
57 | # generate filename based on recording date-and-timestamp and then append to recording_dir
58 | return recording_dir / (
59 | "recording_%s" % strftime("%Y-%m-%d-%H.%M.%S", gmtime()) + ".csv"
60 | )
61 |
--------------------------------------------------------------------------------
/eegnb/analysis/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/analysis/__init__.py
--------------------------------------------------------------------------------
/eegnb/analysis/analysis_report.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
21 | This is an analysis report for the experiment. For more information about the experiment, please visit the documentation
22 |
23 |
24 |
25 |
Raw Epoch
26 |
27 | The raw epoch is shown below. The raw epoch is the data that is recorded from the EEG headset. The raw epoch is then processed to remove noise and artifacts.
28 |
29 |
30 |
31 |
32 |
Stimulus Response
33 |
34 | The stimulus response is shown below. The stimulus response is the data that is recorded from the EEG headset after removing noise and artifacts.
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/eegnb/analysis/analysis_report.py:
--------------------------------------------------------------------------------
1 |
2 | # Generating html using Python
3 |
4 | from airium import Airium
5 | from typing import Dict
6 | import os
7 | import eegnb
8 | import base64
9 |
10 | a = Airium()
11 |
12 | def get_experiment_information(experiment:str):
13 | analysis_save_path = os.path.join(os.path.dirname(eegnb.__file__), "analysis")
14 | file_path = os.path.join(analysis_save_path, "experiment_descriptions")
15 |
16 | with open(os.path.join(file_path, experiment + ".txt"), 'r') as f:
17 | experiment_text = f.readlines()
18 |
19 | return experiment_text
20 |
21 | def get_img_string(image_save_path):
22 | """ Returns image as string to embed into the html report """
23 | return base64.b64encode(open(image_save_path, "rb").read()).decode()
24 |
25 | def get_html(experimental_parameters: Dict):
26 |
27 | # add variable to store the link
28 | analysis_save_path = os.path.join(os.path.dirname(eegnb.__file__), "analysis")
29 | css_path = os.path.join(analysis_save_path, "styling.css")
30 | eeg_device, experiment, subject, session, example, drop_percentage, epochs_chosen = experimental_parameters.values()
31 |
32 | erp_image_path = os.path.join(os.getcwd(), "erp_plot.png")
33 | pos_image_path = os.path.join(os.getcwd(), "power_spectrum.png")
34 |
35 | experiment_text = get_experiment_information(experiment)
36 |
37 |
38 | """ Possibility of unique experiment text - decision to be made """
39 | #experiment_text = ""
40 | #with open('experiment_descriptions/{}.txt'.format(experiment), 'r') as f:
41 | # experiment_text = f.readlines()
42 |
43 | a('')
44 | with a.html():
45 | with a.head():
46 | a.link(href=css_path, rel='stylesheet', type="text/css")
47 | a.title(_t="Analysis Report")
48 |
49 | with a.body():
50 |
51 | # Navigation bar
52 | with a.div(klass="topnav"):
53 | a.a(_t="Description", href="#Description")
54 | a.a(_t="Raw Epoch", href="#Raw Epoch")
55 | a.a(_t="Stimulus Response", href="#Stimulus Response")
56 |
57 | # Description
58 | with a.div(id="Description"):
59 | a.h1(_t="Analysis Report")
60 | with a.p():
61 | a("Experiment Name: {} ".format(experiment))
62 |
63 | if example:
64 | a("Example File ")
65 | else:
66 | a("Subject Id: {} ".format(subject))
67 | a("Session Id: {} ".format(session))
68 |
69 | a("EEG Device: {} ".format(eeg_device))
70 | a('This is an analysis report for the experiment. For more information about the experiment, please visit the documentation
')
71 | a("{} ".format(experiment_text[0]))
72 | a("{} ".format(experiment_text[1]))
73 |
74 | # Raw Epoch
75 | with a.div(id="Raw Epoch"):
76 | a.h2(_t="Raw Epoch")
77 | with a.p():
78 | a("The power spectrum of the raw epoch is displayed below. The raw epoch is then processed to remove noise and artifacts.")
79 | a.img(src="data:image/png;base64, {}".format(get_img_string(pos_image_path)), alt="Raw Epoch")
80 |
81 | # Stimulus Response
82 | with a.div(id="Stimulus Response"):
83 | a.h2(_t="Stimulus Response")
84 | with a.p():
85 | a("The stimulus response is shown below. The stimulus response is the amplitude response at the specific timescales where the response to the stimulus can be detected. ")
86 | a("Epochs chosen: {} ".format(epochs_chosen))
87 | a("Drop Percentage: {} %
".format(round(drop_percentage,2)))
88 | a.img(src="data:image/png;base64, {}".format(get_img_string(erp_image_path)), alt="Stimulus Response")
89 |
90 | # Delete the images
91 | os.remove(erp_image_path)
92 | os.remove(pos_image_path)
93 |
94 | # Return the html
95 | return str(a)
96 |
--------------------------------------------------------------------------------
/eegnb/analysis/experiment_descriptions/visual-N170.txt:
--------------------------------------------------------------------------------
1 | The N170 is a large negative event-related potential (ERP) component that occurs after the detection of faces, but not objects, scrambled faces, or other body parts such as hands.
2 | In the experiment we aim to detect the N170 using faces and houses as our stimuli.
--------------------------------------------------------------------------------
/eegnb/analysis/experiment_descriptions/visual-P300.txt:
--------------------------------------------------------------------------------
1 | The P300 is a positive event-related potential (ERP) that occurs around 300ms after perceiving a novel or unexpected stimulus. It is most commonly elicited through ‘oddball’ experimental paradigms, where a certain subtype of stimulus is presented rarely amidst a background of another more common type of stimulus.
2 | In the experiment, we aimed to elicit P300 response using a visual oddball stimulation.
--------------------------------------------------------------------------------
/eegnb/analysis/report.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Analysis Report
8 |
9 |
10 |
11 |
12 |
32 | This is an analysis report for the experiment.
33 | For more information about the experiment, please visit the documentation.
34 |
35 |
36 |
37 |
38 |
39 |
40 | Raw Epoch
41 |
42 |
43 | The raw epoch is shown below. The raw epoch is the data that is recorded from the EEG headset. The raw epoch is then processed to remove noise and artifacts.
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | Stimulus Response
52 |
53 |
54 | The stimulus response is shown below. The stimulus response is the data that is recorded from the EEG headset after the raw epoch has been processed. The stimulus response is then used to calculate the power spectrum.
55 |
56 |
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/eegnb/analysis/styling.css:
--------------------------------------------------------------------------------
1 | /* Add a black background color to the top navigation */
2 | .topnav {
3 | background-color: #333;
4 | overflow: hidden;
5 | }
6 |
7 | /* Style the links inside the navigation bar */
8 | .topnav a {
9 | float: left;
10 | color: #f2f2f2;
11 | text-align: center;
12 | padding: 14px 16px;
13 | text-decoration: none;
14 | font-size: 17px;
15 | }
16 |
17 | /* Change the color of links on hover */
18 | .topnav a:hover {
19 | background-color: #ddd;
20 | color: black;
21 | }
22 |
23 | /* Add a color to the active/current link */
24 | .topnav a.active {
25 | background-color: #04AA6D;
26 | color: white;
27 | }
28 |
29 | /* Centre the images */
30 | img {
31 | display: block;
32 | margin-left: auto;
33 | margin-right: auto;
34 | }
--------------------------------------------------------------------------------
/eegnb/cli/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/cli/__init__.py
--------------------------------------------------------------------------------
/eegnb/cli/utils.py:
--------------------------------------------------------------------------------
1 |
2 | #change the pref libraty to PTB and set the latency mode to high precision
3 | from psychopy import prefs
4 | prefs.hardware['audioLib'] = 'PTB'
5 | prefs.hardware['audioLatencyMode'] = 3
6 |
7 |
8 | from eegnb.devices.eeg import EEG
9 |
10 | from eegnb.experiments import VisualN170
11 | from eegnb.experiments import VisualP300
12 | from eegnb.experiments import VisualSSVEP
13 | from eegnb.experiments import AuditoryOddball
14 | from eegnb.experiments.visual_cueing import cueing
15 | from eegnb.experiments.visual_codeprose import codeprose
16 | from eegnb.experiments.auditory_oddball import diaconescu
17 | from eegnb.experiments.auditory_ssaep import ssaep, ssaep_onefreq
18 | from typing import Optional
19 |
20 |
21 | # New Experiment Class structure has a different initilization, to be noted
22 | experiments = {
23 | "visual-N170": VisualN170(),
24 | "visual-P300": VisualP300(),
25 | "visual-SSVEP": VisualSSVEP(),
26 | "visual-cue": cueing,
27 | "visual-codeprose": codeprose,
28 | "auditory-SSAEP orig": ssaep,
29 | "auditory-SSAEP onefreq": ssaep_onefreq,
30 | "auditory-oddball orig": AuditoryOddball(),
31 | "auditory-oddball diaconescu": diaconescu,
32 | }
33 |
34 |
35 | def get_exp_desc(exp: str):
36 | if exp in experiments:
37 | module = experiments[exp]
38 | if hasattr(module, "__title__"):
39 | return module.__title__ # type: ignore
40 | return "{} (no description)".format(exp)
41 |
42 |
43 | def run_experiment(
44 | experiment: str, eeg_device: EEG, record_duration: Optional[float] = None, save_fn=None
45 | ):
46 | if experiment in experiments:
47 | module = experiments[experiment]
48 |
49 | # Condition added for different run types of old and new experiment class structure
50 | if experiment == "visual-N170" or experiment == "visual-P300" or experiment == "visual-SSVEP" or experiment == "auditory-oddball orig":
51 | module.duration = record_duration
52 | module.eeg = eeg_device
53 | module.save_fn = save_fn
54 | module.run()
55 | else:
56 | module.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) # type: ignore
57 | else:
58 | print("\nError: Unknown experiment '{}'".format(experiment))
59 | print("\nExperiment can be one of:")
60 | print("\n".join([" - " + exp for exp in experiments]))
61 |
--------------------------------------------------------------------------------
/eegnb/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .datasets import fetch_dataset
2 |
--------------------------------------------------------------------------------
/eegnb/devices/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/devices/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/Experiment_readme.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 | Looking for a general implementation structure where base class implements and passes the following functions,
4 |
5 | def load_stimulus() -> stim (some form of dd array)
6 |
7 | def present_stimulus() -> given trial details does specific thing for experiment
8 |
9 | ** Slight issue is that a lot of parameters will have to be passed which is not the best in practice
10 |
11 | Stuff that can be overwritten in general ...
12 | instruction_text
13 | parameter/trial
14 |
--------------------------------------------------------------------------------
/eegnb/experiments/__init__.py:
--------------------------------------------------------------------------------
1 | from .visual_n170.n170 import VisualN170
2 | from .visual_p300.p300 import VisualP300
3 | from .visual_ssvep.ssvep import VisualSSVEP
4 |
5 | # PTB does not yet support macOS Apple Silicon,
6 | # this experiment needs to run as i386 if on macOS.
7 | import sys
8 | import platform
9 | if sys.platform != 'darwin' or platform.processor() != 'arm':
10 | from .auditory_oddball.aob import AuditoryOddball
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_oddball/MUSE_conditions.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/auditory_oddball/MUSE_conditions.mat
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_oddball/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/auditory_oddball/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_oddball/aMMN.py:
--------------------------------------------------------------------------------
1 | import os
2 | from time import time, sleep
3 | from glob import glob
4 | from random import choice
5 | from optparse import OptionParser
6 |
7 | import numpy as np
8 | from pandas import DataFrame
9 | from psychopy import visual, core, event, sound
10 |
11 | from eegnb import generate_save_fn
12 | from typing import Optional
13 |
14 |
15 | def present(
16 | save_fn: Optional[str] = None,
17 | duration=120,
18 | stim_types=None,
19 | itis=None,
20 | additional_labels={},
21 | secs=0.07,
22 | volume=0.8,
23 | eeg=None,
24 | ):
25 | markernames = [1, 2]
26 | record_duration = np.float32(duration)
27 |
28 | ## Initialize stimuli
29 | # aud1 = sound.Sound('C', octave=5, sampleRate=44100, secs=secs)
30 | aud1 = sound.Sound(440, secs=secs) # , octave=5, sampleRate=44100, secs=secs)
31 | aud1.setVolume(volume)
32 |
33 | # aud2 = sound.Sound('D', octave=6, sampleRate=44100, secs=secs)
34 | aud2 = sound.Sound(528, secs=secs)
35 | aud2.setVolume(volume)
36 | auds = [aud1, aud2]
37 |
38 | # Setup trial list
39 | trials = DataFrame(dict(sound_ind=stim_types, iti=itis))
40 |
41 | for col_name, col_vec in additional_labels.items():
42 | trials[col_name] = col_vec
43 |
44 | # Setup graphics
45 | mywin = visual.Window(
46 | [1920, 1080], monitor="testMonitor", units="deg", fullscr=True
47 | )
48 | fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
49 | fixation.setAutoDraw(True)
50 | mywin.flip()
51 | iteratorthing = 0
52 |
53 | # start the EEG stream, will delay 5 seconds to let signal settle
54 | if eeg:
55 | eeg.start(save_fn, duration=record_duration)
56 |
57 | show_instructions(10)
58 |
59 | # Start EEG Stream, wait for signal to settle, and then pull timestamp for start point
60 | start = time()
61 |
62 | # Iterate through the events
63 | for ii, trial in trials.iterrows():
64 |
65 | iteratorthing = iteratorthing + 1
66 |
67 | # Inter trial interval
68 | core.wait(trial["iti"])
69 |
70 | # Select and display image
71 | ind = int(trial["sound_ind"])
72 | auds[ind].stop()
73 | auds[ind].play()
74 |
75 | # Push sample
76 | if eeg:
77 | timestamp = time()
78 | if eeg.backend == "muselsl":
79 | marker = [additional_labels["labels"][iteratorthing - 1]]
80 | marker = list(map(int, marker))
81 | else:
82 | marker = additional_labels["labels"][iteratorthing - 1]
83 | eeg.push_sample(marker=marker, timestamp=timestamp)
84 |
85 | mywin.flip()
86 | if len(event.getKeys()) > 0:
87 | break
88 | if (time() - start) > record_duration:
89 | break
90 |
91 | event.clearEvents()
92 |
93 | if iteratorthing == 1798:
94 | sleep(10)
95 |
96 | # Cleanup
97 | if eeg:
98 | eeg.stop()
99 |
100 | mywin.close()
101 |
102 |
103 | def show_instructions(duration):
104 |
105 | instruction_text = """
106 | Welcome to the aMMN experiment!
107 |
108 | Stay still, focus on the centre of the screen, and try not to blink.
109 |
110 | This block will run for %s seconds.
111 |
112 | Press spacebar to continue.
113 |
114 | """
115 | instruction_text = instruction_text % duration
116 |
117 | # graphics
118 | mywin = visual.Window([1600, 900], monitor="testMonitor", units="deg", fullscr=True)
119 |
120 | mywin.mouseVisible = False
121 |
122 | # Instructions
123 | text = visual.TextStim(win=mywin, text=instruction_text, color=[-1, -1, -1])
124 | text.draw()
125 | mywin.flip()
126 | event.waitKeys(keyList="space")
127 |
128 | mywin.mouseVisible = True
129 | mywin.close()
130 |
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_oddball/aob.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandas import DataFrame
3 | from psychopy import prefs
4 |
5 | from psychopy import visual, core, event, sound
6 |
7 | from time import time
8 | from eegnb.devices.eeg import EEG
9 | from eegnb.experiments import Experiment
10 | from typing import Optional
11 |
12 |
13 | class AuditoryOddball(Experiment.BaseExperiment):
14 |
15 | def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None, n_trials = 2010, iti = 0.3, soa = 0.2, jitter = 0.2, secs=0.2, volume=0.8, random_state=42, s1_freq="C", s2_freq="D", s1_octave=5, s2_octave=6):
16 |
17 | """
18 |
19 | Auditory Oddball Experiment
20 | ===========================
21 |
22 | Unique Parameters:
23 | -----------
24 |
25 | secs - duration of the sound in seconds (default 0.2)
26 |
27 | volume - volume of the sounds in [0,1] (default 0.8)
28 |
29 | random_state - random seed (default 42)
30 |
31 | s1_freq - frequency of first tone
32 | s2_freq - frequency of second tone
33 |
34 | s1_octave - octave of first tone
35 | s2_octave - octave of second tone
36 |
37 | """
38 |
39 | exp_name = "Auditory Oddball"
40 | super().__init__(exp_name, duration, eeg, save_fn, n_trials, iti, soa, jitter)
41 | self.secs = secs
42 | self.volume = volume
43 | self.random_state = random_state
44 | self.s1_freq = s1_freq
45 | self.s2_freq = s2_freq
46 | self.s1_octave = s1_octave
47 | self.s2_octave = s2_octave
48 |
49 | def load_stimulus(self):
50 | """ Loads the Stimulus """
51 |
52 | # Set up trial parameters
53 | np.random.seed(self.random_state)
54 |
55 | # Initialize stimuli
56 | aud1, aud2 = sound.Sound(self.s1_freq, octave=self.s1_octave, secs=self.secs), sound.Sound(self.s2_freq, octave=self.s2_octave, secs=self.secs)
57 | aud1.setVolume(self.volume)
58 | aud2.setVolume(self.volume)
59 | self.auds = [aud1, aud2]
60 |
61 | # Setup trial list
62 | sound_ind = np.random.binomial(1, 0.25, self.n_trials)
63 | itis = self.iti + np.random.rand(self.n_trials) * self.jitter
64 | self.trials = DataFrame(dict(sound_ind=sound_ind, iti=itis))
65 | self.trials["soa"] = self.soa
66 | self.trials["secs"] = self.secs
67 |
68 | self.fixation = visual.GratingStim(win=self.window, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
69 | self.fixation.setAutoDraw(True)
70 | self.window.flip()
71 |
72 | return
73 |
74 | def present_stimulus(self, idx: int):
75 | """ Presents the Stimulus """
76 |
77 | # Select and play sound
78 | ind = int(self.trials["sound_ind"].iloc[idx])
79 | self.auds[ind].stop()
80 | self.auds[ind].play()
81 |
82 | # Push sample
83 | if self.eeg:
84 | timestamp = time()
85 | marker = [self.markernames[ind]]
86 | marker = list(map(int, marker))
87 | self.eeg.push_sample(marker=marker, timestamp=timestamp)
88 |
89 |
90 |
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_oddball/auditory_erp_aux.py:
--------------------------------------------------------------------------------
1 | """Generate sound-only auditory oddball stimulus presentation.
2 | """
3 | import time
4 | from optparse import OptionParser
5 |
6 | import numpy as np
7 | from pandas import DataFrame
8 | from psychopy import visual, core, event, sound
9 | from pylsl import StreamInfo, StreamOutlet
10 |
11 |
12 | def present(
13 | duration=120,
14 | n_trials=10,
15 | iti=0.3,
16 | soa=0.2,
17 | jitter=0.2,
18 | secs=0.2,
19 | volume=0.8,
20 | random_state=None,
21 | ):
22 |
23 | # Create markers stream outlet
24 | info = StreamInfo("Markers", "Markers", 1, 0, "int32", "myuidw43536")
25 | outlet = StreamOutlet(info)
26 |
27 | np.random.seed(random_state)
28 | markernames = [1, 2]
29 | start = time.time()
30 |
31 | # Set up trial parameters
32 | record_duration = np.float32(duration)
33 |
34 | # Initialize stimuli
35 | aud1 = sound.Sound("C", octave=5, sampleRate=44100, secs=secs)
36 | aud1.setVolume(volume)
37 | aud2 = sound.Sound("D", octave=6, sampleRate=44100, secs=secs)
38 | aud2.setVolume(volume)
39 | auds = [aud1, aud2]
40 |
41 | # Setup trial list
42 | sound_ind = np.random.binomial(1, 0.25, n_trials)
43 | itis = iti + np.random.rand(n_trials) * jitter
44 | trials = DataFrame(dict(sound_ind=sound_ind, iti=itis))
45 | trials["soa"] = soa
46 | trials["secs"] = secs
47 |
48 | for ii, trial in trials.iterrows():
49 |
50 | # Intertrial interval
51 | time.sleep(trial["iti"])
52 |
53 | # Select and play sound
54 | ind = int(trial["sound_ind"])
55 | auds[ind].stop()
56 | auds[ind].play()
57 |
58 | # Send marker
59 | timestamp = time.time()
60 | outlet.push_sample([markernames[ind]], timestamp)
61 |
62 | # Offset
63 | # time.sleep(soa)
64 | # if (time.time() - start) > record_duration:
65 | # break
66 |
67 | # offset
68 | core.wait(soa)
69 | if len(event.getKeys()) > 0 or (time.time() - start) > record_duration:
70 | break
71 | event.clearEvents()
72 |
73 | # if len(event.getKeys()) > 0 or (time() - start) > record_duration:
74 | # break
75 | # event.clearEvents()
76 |
77 | return trials
78 |
79 |
80 | def main():
81 | parser = OptionParser()
82 |
83 | parser.add_option(
84 | "-d",
85 | "--duration",
86 | dest="duration",
87 | type="int",
88 | default=10,
89 | help="duration of the recording in seconds.",
90 | )
91 | parser.add_option(
92 | "-n",
93 | "--n_trials",
94 | dest="n_trials",
95 | type="int",
96 | default=10,
97 | help="number of trials.",
98 | )
99 | parser.add_option(
100 | "-i", "--iti", dest="iti", type="float", default=0.3, help="intertrial interval"
101 | )
102 | parser.add_option(
103 | "-s",
104 | "--soa",
105 | dest="soa",
106 | type="float",
107 | default=0.2,
108 | help="interval between end of stimulus and next trial.",
109 | )
110 | parser.add_option(
111 | "-j",
112 | "--jitter",
113 | dest="jitter",
114 | type="float",
115 | default=0.2,
116 | help="jitter in the intertrial intervals.",
117 | )
118 | parser.add_option(
119 | "-e",
120 | "--secs",
121 | dest="secs",
122 | type="float",
123 | default=0.2,
124 | help="duration of the sound in seconds.",
125 | )
126 | parser.add_option(
127 | "-v",
128 | "--volume",
129 | dest="volume",
130 | type="float",
131 | default=0.8,
132 | help="volume of the sounds in [0, 1].",
133 | )
134 | parser.add_option(
135 | "-r",
136 | "--randomstate",
137 | dest="random_state",
138 | type="int",
139 | default=42,
140 | help="random seed",
141 | )
142 |
143 | (options, args) = parser.parse_args()
144 | trials_df = present(
145 | duration=options.duration,
146 | n_trials=options.duration,
147 | iti=options.iti,
148 | soa=options.soa,
149 | jitter=options.jitter,
150 | secs=options.secs,
151 | volume=options.volume,
152 | random_state=options.random_state,
153 | )
154 |
155 | print(trials_df)
156 |
157 |
158 | if __name__ == "__main__":
159 | main()
160 |
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_oddball/diaconescu.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import h5py
5 |
6 | import eegnb
7 | from . import aMMN
8 |
9 | __title__ = "Auditory oddball (diaconescu)"
10 |
11 |
12 | def makeoddball(inputs, rep):
13 | # based on inputs, creating oddball paradigms markers depending on "switch"
14 | value = inputs[0]
15 | count = 0
16 | markerArray = []
17 | for i in range(len(inputs)):
18 | if inputs[i] == value:
19 | count += 1
20 | if count == rep:
21 | markerArray.append(1)
22 | else:
23 | markerArray.append(3)
24 | else:
25 | if count == rep + 1:
26 | markerArray.append(2)
27 |
28 | else:
29 | markerArray.append(4)
30 | value = inputs[i]
31 | count = 1
32 | return markerArray
33 |
34 |
35 | def maketonesnums(num):
36 | newArray = []
37 | for i in range(num):
38 | newArray.append(90000 + i)
39 | return newArray
40 |
41 |
42 | def present(duration: int, eeg, save_fn: str):
43 | eegnb_dir = os.path.dirname(eegnb.__file__)
44 | mcond_file = os.path.join(
45 | eegnb_dir, "experiments", "auditory_oddball", "MUSE_conditions.mat"
46 | )
47 |
48 | F = h5py.File(mcond_file, "r") # ['museEEG']
49 | highPE = np.squeeze(F["museEEG"]["design"]["highPE"][:]).astype(int)
50 | lowPE = np.squeeze(F["museEEG"]["design"]["lowPE"][:]).astype(int)
51 | inputs = np.squeeze(F["museEEG"]["design"]["inputs"][:]).astype(int)
52 |
53 | # based on inputs, creating oddball paradigms markers depending on "switch"
54 | tonenums = maketonesnums(1800)
55 | oddball3 = makeoddball(inputs, 3)
56 | oddball4 = makeoddball(inputs, 4)
57 | oddball5 = makeoddball(inputs, 5)
58 | oddball6 = makeoddball(inputs, 6)
59 |
60 | # modifying 0s in PE definitions of tones that represent markers to 3s to avoid loss of trials instead of ignoring them
61 | for i in range(len(highPE)):
62 | if highPE[i] == 0:
63 | highPE[i] = 3
64 | if lowPE[i] == 0:
65 | lowPE[i] = 3
66 |
67 | # 1 is standard/bottom, 2 is deviant/high, 3 is "baseline trial"
68 |
69 | stim_types = inputs
70 | itis = np.ones_like(inputs) * 0.5
71 |
72 | newAdditionalMarkers = []
73 |
74 | for i in range(0, len(highPE)):
75 | newAdditionalMarker = (
76 | str(oddball3[i])
77 | + str(oddball4[i])
78 | + str(oddball5[i])
79 | + str(oddball6[i])
80 | + str(highPE[i])
81 | + str(lowPE[i])
82 | )
83 | newAdditionalMarkers.append(newAdditionalMarker)
84 |
85 | aMMN.present(
86 | duration=duration,
87 | stim_types=stim_types,
88 | itis=itis,
89 | additional_labels={"labels": newAdditionalMarkers},
90 | eeg=eeg,
91 | save_fn=save_fn,
92 | )
93 |
--------------------------------------------------------------------------------
/eegnb/experiments/auditory_ssaep/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/auditory_ssaep/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/n170/__init__.py:
--------------------------------------------------------------------------------
1 | # n170 experiments
2 |
--------------------------------------------------------------------------------
/eegnb/experiments/visual_baselinetask/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_baselinetask/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_baselinetask/baseline_task.py:
--------------------------------------------------------------------------------
1 | from time import time
2 | from time import sleep
3 | import os
4 | import random
5 |
6 | import numpy as np
7 | from pandas import DataFrame
8 | from psychopy import prefs
9 |
10 | prefs.general["audioLib"] = ["pygame"]
11 | from psychopy import visual, core, event, sound
12 | from pylsl import StreamInfo, StreamOutlet, local_clock
13 |
14 | # Create markers stream outlet
15 | info = StreamInfo("Markers", "Markers", 1, 0, "int32", "myuidw43536")
16 | outlet = StreamOutlet(info)
17 |
18 | start = time()
19 |
20 | # Initialize stimuli
21 | aud1 = sound.Sound("C", octave=5, sampleRate=44100, secs=0.5)
22 | aud1.setVolume(0.025)
23 |
24 | # Setup graphics
25 | mywin = visual.Window([1440, 900], monitor="testMonitor", units="deg", fullscr=True)
26 |
27 | # Hide the mouse cursor
28 | mywin.mouseVisible = False
29 |
30 | # define the length of each block
31 | exp_length = 20.0
32 |
33 | # randomly pick our condition order
34 | cond_order = random.randint(1, 2)
35 |
36 | # setup our instructions
37 | instr1 = visual.TextStim(
38 | mywin,
39 | text="Keep your eyes open for 20 seconds and focus on the central fixation. You CAN blink during this time. Press the spacebar to begin.",
40 | pos=(0, -3),
41 | )
42 | instr2 = visual.TextStim(
43 | mywin,
44 | text="Keep your eyes closed for 20 seconds, Open them when you hear a long beep/tone. Close them and press the spacebar to begin.",
45 | pos=(0, -3),
46 | )
47 | instr3 = visual.TextStim(mywin, text="Keep your eyes closed at this time.", pos=(0, -3))
48 | instr4 = visual.TextStim(
49 | mywin,
50 | text="You have finished the experiment! Press the spacebar to exit.",
51 | pos=(0, -3),
52 | )
53 |
54 | # setup the fixation
55 | fixation = visual.GratingStim(win=mywin, size=0.1, pos=[0, 0], sf=0, rgb=[1, 1, 1])
56 |
57 | core.wait(2)
58 |
59 | if cond_order == 1:
60 | timestamp = local_clock()
61 | outlet.push_sample([1], timestamp)
62 | core.wait(1)
63 | # display instructions for the first eyes-open block
64 | instr1.setAutoDraw(True)
65 | fixation.setAutoDraw(True)
66 | mywin.flip()
67 | event.waitKeys()
68 |
69 | # start the first eyes-open block
70 | instr1.setAutoDraw(False)
71 | mywin.flip()
72 | timestamp = local_clock()
73 | outlet.push_sample([11], timestamp)
74 | core.wait(exp_length)
75 |
76 | # display instructions for the first eyes-closed block
77 | instr2.setAutoDraw(True)
78 | mywin.flip()
79 | event.waitKeys()
80 |
81 | # start first eyes-closed block
82 | instr2.setAutoDraw(False)
83 | instr3.setAutoDraw(True)
84 | mywin.flip()
85 | timestamp = local_clock()
86 | outlet.push_sample([21], timestamp)
87 | aud1.play()
88 | core.wait(exp_length)
89 | aud1.play()
90 |
91 |
92 | elif cond_order == 2:
93 | timestamp = local_clock()
94 | outlet.push_sample([2], timestamp)
95 | core.wait(1)
96 | # display instructions for the first eyes-closed block
97 | fixation.setAutoDraw(True)
98 | instr2.setAutoDraw(True)
99 | mywin.flip()
100 | event.waitKeys()
101 |
102 | # start first eyes-closed block
103 | instr2.setAutoDraw(False)
104 | instr3.setAutoDraw(True)
105 | mywin.flip()
106 | timestamp = local_clock()
107 | outlet.push_sample([21], timestamp)
108 | aud1.play()
109 | core.wait(exp_length)
110 | aud1.play()
111 |
112 | # display instructions for the first eyes-open block
113 | instr3.setAutoDraw(False)
114 | instr1.setAutoDraw(True)
115 | fixation.setAutoDraw(True)
116 | mywin.flip()
117 | event.waitKeys()
118 |
119 | # start the first eyes-open block
120 | instr1.setAutoDraw(False)
121 | mywin.flip()
122 | timestamp = local_clock()
123 | outlet.push_sample([11], timestamp)
124 | core.wait(exp_length)
125 |
126 |
127 | # display end screen
128 | instr3.setAutoDraw(False)
129 | instr4.setAutoDraw(True)
130 | mywin.flip()
131 | event.waitKeys()
132 |
133 |
134 | # Cleanup
135 | mywin.close()
136 | sleep(5.0)
137 |
138 | os.remove("Stop_EEG.csv")
139 |
--------------------------------------------------------------------------------
/eegnb/experiments/visual_codeprose/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_codeprose/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_cueing/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_cueing/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_gonogo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_gonogo/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_n170/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_n170/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_n170/n170.py:
--------------------------------------------------------------------------------
1 | """ eeg-notebooks/eegnb/experiments/visual_n170/n170.py """
2 |
3 | from psychopy import prefs
4 | #change the pref libraty to PTB and set the latency mode to high precision
5 | prefs.hardware['audioLib'] = 'PTB'
6 | prefs.hardware['audioLatencyMode'] = 3
7 |
8 | import os
9 | from time import time
10 | from glob import glob
11 | from random import choice
12 | from psychopy import visual, core, event
13 |
14 | from eegnb.devices.eeg import EEG
15 | from eegnb.stimuli import FACE_HOUSE
16 | from eegnb.experiments import Experiment
17 | from typing import Optional
18 |
19 |
20 |
21 | class VisualN170(Experiment.BaseExperiment):
22 |
23 | def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None,
24 |
25 | n_trials = 2010, iti = 0.4, soa = 0.3, jitter = 0.2, use_vr = False):
26 |
27 | # Set experiment name
28 | exp_name = "Visual N170"
29 | # Calling the super class constructor to initialize the experiment variables
30 | super(VisualN170, self).__init__(exp_name, duration, eeg, save_fn, n_trials, iti, soa, jitter, use_vr)
31 |
32 | def load_stimulus(self):
33 |
34 | # Loading Images from the folder
35 | load_image = lambda fn: visual.ImageStim(win=self.window, image=fn)
36 |
37 | # Setting up images for the stimulus
38 | self.faces = list(map(load_image, glob(os.path.join(FACE_HOUSE, "faces", "*_3.jpg"))))
39 | self.houses = list(map(load_image, glob(os.path.join(FACE_HOUSE, "houses", "*.3.jpg"))))
40 |
41 | # Return the list of images as a stimulus object
42 | return [self.houses, self.faces]
43 |
44 | def present_stimulus(self, idx: int):
45 |
46 | # Get the label of the trial
47 | label = self.trials["parameter"].iloc[idx]
48 | # Get the image to be presented
49 | image = choice(self.faces if label == 1 else self.houses)
50 | # Draw the image
51 | image.draw()
52 |
53 | # Pushing the sample to the EEG
54 | if self.eeg:
55 | timestamp = time()
56 | if self.eeg.backend == "muselsl":
57 | marker = [self.markernames[label]]
58 | else:
59 | marker = self.markernames[label]
60 | self.eeg.push_sample(marker=marker, timestamp=timestamp)
61 |
62 | self.window.flip()
--------------------------------------------------------------------------------
/eegnb/experiments/visual_n170/n170_fixedstimorder.py:
--------------------------------------------------------------------------------
1 | """
2 | Generate N170
3 | =============
4 |
5 | Face vs. house paradigm stimulus presentation for evoking present.
6 |
7 | """
8 |
9 | from psychopy import prefs
10 | #change the pref libraty to PTB and set the latency mode to high precision
11 | prefs.hardware['audioLib'] = 'PTB'
12 | prefs.hardware['audioLatencyMode'] = 3
13 |
14 | from time import time
15 | from optparse import OptionParser
16 | import os
17 | from glob import glob
18 | from random import choice
19 |
20 | import numpy as np
21 | from pandas import DataFrame, read_csv
22 | from psychopy import visual, core, event
23 | from pylsl import StreamInfo, StreamOutlet
24 |
25 | from eegnb import stimuli, experiments
26 |
27 | stim_dir = os.path.split(stimuli.__file__)[0]
28 | exp_dir = os.path.split(experiments.__file__)[0]
29 |
30 | # fixed stim order list file
31 | fso_list_file = os.path.join(exp_dir, "visual_n170", "n170_fixedstimorder_list.csv")
32 |
33 |
34 | def present(duration=120):
35 |
36 | # Create markers stream outlet
37 | # info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')
38 | info = StreamInfo("Markers", "Markers", 3, 0, "int32", "myuidw43536")
39 | outlet = StreamOutlet(info)
40 |
41 | # markernames = [1, 2]
42 | start = time()
43 |
44 | # Set up trial parameters
45 | # n_trials = 2010
46 | iti = 0.8
47 | soa = 0.2
48 | jitter = 0.2
49 | record_duration = np.float32(duration)
50 |
51 | # Setup trial list
52 | # image_type = np.random.binomial(1, 0.5, n_trials)
53 | # trials = DataFrame(dict(image_type=image_type,
54 | # timestamp=np.zeros(n_trials)))
55 |
56 | fso_ims = read_csv(fso_list_file)
57 | n_trials = fso_ims.shape[0]
58 |
59 | # Setup graphics
60 |
61 | def load_image(filename):
62 | return visual.ImageStim(win=mywin, image=filename)
63 |
64 | mywin = visual.Window(
65 | [1600, 900], monitor="testMonitor", units="deg", winType="pygame", fullscr=True
66 | )
67 |
68 | # faces = list(map(load_image, glob(
69 | # 'stimulus_presentation/stim/face_house/faces/*_3.jpg')))
70 | # houses = list(map(load_image, glob(
71 | # 'stimulus_presentation/stim/face_house/houses/*.3.jpg')))
72 |
73 | # for ii, trial in trials.iterrows():
74 | for ii, trial in fso_ims.iterrows():
75 |
76 | trialnum, filename, facehouse, girlboy = trial.values
77 | filename = os.path.join(stim_dir, filename)
78 |
79 | # Intertrial interval
80 | core.wait(iti + np.random.rand() * jitter)
81 |
82 | # Select and display image
83 | # label = trials['image_type'].iloc[ii]
84 | # image = choice(faces if label == 1 else houses)
85 | image = load_image(filename)
86 |
87 | image.draw()
88 |
89 | # Send marker
90 | timestamp = time()
91 | # outlet.push_sample([markernames[label]], timestamp)
92 | outlet.push_sample([trialnum, facehouse + 1, girlboy + 1], timestamp)
93 |
94 | mywin.flip()
95 |
96 | # offset
97 | core.wait(soa)
98 | mywin.flip()
99 | if len(event.getKeys()) > 0 or (time() - start) > record_duration:
100 | break
101 | event.clearEvents()
102 |
103 | # Cleanup
104 | mywin.close()
105 |
106 |
107 | def main():
108 | parser = OptionParser()
109 |
110 | parser.add_option(
111 | "-d",
112 | "--duration",
113 | dest="duration",
114 | type="int",
115 | default=120,
116 | help="duration of the recording in seconds.",
117 | )
118 |
119 | (options, args) = parser.parse_args()
120 | present(options.duration)
121 |
122 |
123 | if __name__ == "__main__":
124 | main()
125 |
--------------------------------------------------------------------------------
/eegnb/experiments/visual_n170/n170_old.py:
--------------------------------------------------------------------------------
1 | """
2 | Generate N170
3 | =============
4 |
5 | Face vs. house paradigm stimulus presentation for evoking present.
6 |
7 | """
8 |
9 | from time import time
10 | from optparse import OptionParser
11 | import os
12 | from glob import glob
13 | from random import choice
14 |
15 | import numpy as np
16 | from pandas import DataFrame
17 | from psychopy import visual, core, event
18 | from pylsl import StreamInfo, StreamOutlet
19 |
20 | from eegnb import stimuli
21 |
22 | stim_dir = os.path.split(stimuli.__file__)[0]
23 | faces_dir = os.path.join(stim_dir, "visual", "face_house", "faces")
24 | houses_dir = os.path.join(stim_dir, "visual", "face_house", "houses")
25 |
26 |
27 | def present(duration=120):
28 |
29 | # Create markers stream outlet
30 | info = StreamInfo("Markers", "Markers", 1, 0, "int32", "myuidw43536")
31 | outlet = StreamOutlet(info)
32 |
33 | markernames = [1, 2]
34 | start = time()
35 |
36 | # Set up trial parameters
37 | n_trials = 2010
38 | iti = 0.8
39 | soa = 0.2
40 | jitter = 0.2
41 | record_duration = np.float32(duration)
42 |
43 | # Setup trial list
44 | image_type = np.random.binomial(1, 0.5, n_trials)
45 | trials = DataFrame(dict(image_type=image_type, timestamp=np.zeros(n_trials)))
46 |
47 | # Setup graphics
48 |
49 | def load_image(filename):
50 | return visual.ImageStim(win=mywin, image=filename)
51 |
52 | mywin = visual.Window(
53 | [1600, 900], monitor="testMonitor", units="deg", winType="pygame", fullscr=True
54 | )
55 | # faces = list(map(load_image, glob(
56 | # 'stimulus_presentation/stim/face_house/faces/*_3.jpg')))
57 | faces = list(map(load_image, glob(faces_dir + "/*_3.jpg")))
58 | # houses = list(map(load_image, glob(
59 | # 'stimulus_presentation/stim/face_house/houses/*.3.jpg')))
60 | houses = list(map(load_image, glob(houses_dir + "/*.3.jpg")))
61 |
62 | for ii, trial in trials.iterrows():
63 | # Intertrial interval
64 | core.wait(iti + np.random.rand() * jitter)
65 |
66 | # Select and display image
67 | label = trials["image_type"].iloc[ii]
68 | image = choice(faces if label == 1 else houses)
69 | image.draw()
70 |
71 | # Send marker
72 | timestamp = time()
73 | outlet.push_sample([markernames[label]], timestamp)
74 | mywin.flip()
75 |
76 | # offset
77 | core.wait(soa)
78 | mywin.flip()
79 | if len(event.getKeys()) > 0 or (time() - start) > record_duration:
80 | break
81 | event.clearEvents()
82 |
83 | # Cleanup
84 | mywin.close()
85 |
86 |
87 | def main():
88 | parser = OptionParser()
89 |
90 | parser.add_option(
91 | "-d",
92 | "--duration",
93 | dest="duration",
94 | type="int",
95 | default=120,
96 | help="duration of the recording in seconds.",
97 | )
98 |
99 | (options, args) = parser.parse_args()
100 | present(options.duration)
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/eegnb/experiments/visual_p300/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_p300/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_p300/p300.py:
--------------------------------------------------------------------------------
1 |
2 | """ eeg-notebooks/eegnb/experiments/visual_p300/p300.py """
3 |
4 | import os
5 | from time import time
6 | from glob import glob
7 | from random import choice
8 | from optparse import OptionParser
9 | import random
10 |
11 | import numpy as np
12 | from pandas import DataFrame
13 | from psychopy import visual, core, event
14 |
15 | from eegnb.stimuli import CAT_DOG
16 | from eegnb.experiments import Experiment
17 | from eegnb.devices.eeg import EEG
18 | from typing import Optional
19 |
20 | class VisualP300(Experiment.BaseExperiment):
21 |
22 | def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None,
23 |
24 | n_trials = 2010, iti = 0.4, soa = 0.3, jitter = 0.2, use_vr = False):
25 |
26 | exp_name = "Visual P300"
27 | super().__init__(exp_name, duration, eeg, save_fn, n_trials, iti, soa, jitter, use_vr)
28 |
29 | def load_stimulus(self):
30 |
31 | load_image = lambda fn: visual.ImageStim(win=self.window, image=fn)
32 |
33 | self.targets = list(map(load_image, glob(os.path.join(CAT_DOG, "target-*.jpg"))))
34 | self.nontargets = list(map(load_image, glob(os.path.join(CAT_DOG, "nontarget-*.jpg"))))
35 |
36 | return [self.nontargets, self.targets]
37 |
38 | def present_stimulus(self, idx: int):
39 |
40 | label = self.trials["parameter"].iloc[idx]
41 | image = choice(self.targets if label == 1 else self.nontargets)
42 | image.draw()
43 |
44 | # Push sample
45 | if self.eeg:
46 | timestamp = time()
47 | if self.eeg.backend == "muselsl":
48 | marker = [self.markernames[label]]
49 | else:
50 | marker = self.markernames[label]
51 | self.eeg.push_sample(marker=marker, timestamp=timestamp)
52 |
53 | self.window.flip()
--------------------------------------------------------------------------------
/eegnb/experiments/visual_p300/p300_stripes.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandas import DataFrame
3 | from psychopy import visual, core, event
4 | from time import time, strftime, gmtime
5 | from optparse import OptionParser
6 | from pylsl import StreamInfo, StreamOutlet
7 |
8 |
9 | def present(duration, subject, run):
10 |
11 | # create
12 | info = StreamInfo("Markers", "Markers", 1, 0, "int32", "myuidw43536")
13 |
14 | # next make an outlet
15 | outlet = StreamOutlet(info)
16 |
17 | markernames = [1, 2]
18 |
19 | start = time()
20 |
21 | n_trials = 2010
22 | iti = 0.4
23 | soa = 0.3
24 | jitter = 0.2
25 | record_duration = np.float32(duration)
26 |
27 | # Setup log
28 | position = np.random.binomial(1, 0.15, n_trials)
29 |
30 | trials = DataFrame(dict(position=position, timestamp=np.zeros(n_trials)))
31 |
32 | # graphics
33 | mywin = visual.Window(
34 | [1920, 1080], monitor="testMonitor", units="deg", fullscr=True
35 | )
36 | grating = visual.GratingStim(win=mywin, mask="gauss", size=40, sf=2)
37 | fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
38 |
39 | for ii, trial in trials.iterrows():
40 | # inter trial interval
41 | core.wait(iti + np.random.rand() * jitter)
42 |
43 | # onset
44 | grating.phase += np.random.rand()
45 | pos = trials["position"].iloc[ii]
46 | grating.ori = 90 * pos
47 | grating.draw()
48 | fixation.draw()
49 | timestamp = time()
50 | outlet.push_sample([markernames[pos]], timestamp)
51 | mywin.flip()
52 |
53 | # offset
54 | core.wait(soa)
55 | fixation.draw()
56 | mywin.flip()
57 | if len(event.getKeys()) > 0 or (time() - start) > record_duration:
58 | break
59 | event.clearEvents()
60 | # Cleanup
61 | mywin.close()
62 |
63 |
64 | def main():
65 | parser = OptionParser()
66 |
67 | parser.add_option(
68 | "-d",
69 | "--duration",
70 | dest="duration",
71 | type="int",
72 | default=120,
73 | help="duration of the recording in seconds.",
74 | )
75 | parser.add_option(
76 | "-s",
77 | "--subject",
78 | dest="subject",
79 | type="int",
80 | default=1,
81 | help="subject number: must be an integer",
82 | )
83 | parser.add_option(
84 | "-r",
85 | "--run",
86 | dest="run",
87 | type="int",
88 | default=1,
89 | help="run (session) number: must be an integer",
90 | )
91 |
92 | (options, args) = parser.parse_args()
93 | present(options.duration)
94 | present(options.subject)
95 | present(options.n)
96 |
97 |
98 | if __name__ == "__main__":
99 | main()
100 |
--------------------------------------------------------------------------------
/eegnb/experiments/visual_ssvep/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_ssvep/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_ssvep/ssvep.py:
--------------------------------------------------------------------------------
1 |
2 | from eegnb.experiments import Experiment
3 | import os
4 | from time import time
5 | from glob import glob
6 | from random import choice
7 |
8 | import numpy as np
9 | from pandas import DataFrame
10 | from psychopy import visual, core, event
11 |
12 |
13 | from eegnb.devices.eeg import EEG
14 | from eegnb import generate_save_fn
15 | from typing import Optional
16 |
17 |
18 | class VisualSSVEP(Experiment.BaseExperiment):
19 |
20 | def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None, n_trials = 2010, iti = 0.5, soa = 3.0, jitter = 0.2, use_vr=False):
21 |
22 | self.use_vr = use_vr
23 | exp_name = "Visual SSVEP"
24 | super().__init__(exp_name, duration, eeg, save_fn, n_trials, iti, soa, jitter, use_vr)
25 |
26 | def load_stimulus(self):
27 |
28 | grating_sf = 400 if self.use_vr else 0.2
29 | self.grating = visual.GratingStim(win=self.window, mask="circle", size=80, sf=grating_sf)
30 | self.grating_neg = visual.GratingStim(win=self.window, mask="circle", size=80, sf=grating_sf, phase=0.5)
31 |
32 | self.fixation = visual.GratingStim(win=self.window, pos=[0, 0], sf=grating_sf, color=[1, 0, 0])
33 | self.fixation.size = 0.02 if self.use_vr else 0.2
34 |
35 | # Generate the possible ssvep frequencies based on monitor refresh rate
36 | def get_possible_ssvep_freqs(frame_rate, stim_type="single"):
37 |
38 | max_period_nb = int(frame_rate / 6)
39 | periods = np.arange(max_period_nb) + 1
40 |
41 | if stim_type == "single":
42 | freqs = dict()
43 | for p1 in periods:
44 | for p2 in periods:
45 | f = frame_rate / (p1 + p2)
46 | try:
47 | freqs[f].append((p1, p2))
48 | except:
49 | freqs[f] = [(p1, p2)]
50 |
51 | elif stim_type == "reversal":
52 | freqs = {frame_rate / p: [(p, p)] for p in periods[::-1]}
53 |
54 | return freqs
55 |
56 | def init_flicker_stim(frame_rate, cycle, soa):
57 |
58 | if isinstance(cycle, tuple):
59 | stim_freq = frame_rate / sum(cycle)
60 | n_cycles = int(soa * stim_freq)
61 |
62 | else:
63 | stim_freq = frame_rate / cycle
64 | cycle = (cycle, cycle)
65 | n_cycles = int(soa * stim_freq) / 2
66 |
67 | return {"cycle": cycle, "freq": stim_freq, "n_cycles": n_cycles}
68 |
69 | # Set up stimuli
70 |
71 | # Frame rate, in Hz
72 | # GetActualFrameRate() crashes in psychxr due to 'EndFrame called before BeginFrame'
73 | frame_rate = np.round(self.window.displayRefreshRate if self.use_vr else self.window.getActualFrameRate())
74 | freqs = get_possible_ssvep_freqs(frame_rate, stim_type="reversal")
75 | self.stim_patterns = [
76 | init_flicker_stim(frame_rate, 2, self.soa),
77 | init_flicker_stim(frame_rate, 3, self.soa),
78 | ]
79 |
80 | print(
81 | (
82 | "Flickering frequencies (Hz): {}\n".format(
83 | [self.stim_patterns[0]["freq"], self.stim_patterns[1]["freq"]]
84 | )
85 | )
86 | )
87 |
88 |
89 | return [
90 | init_flicker_stim(frame_rate, 2, self.soa),
91 | init_flicker_stim(frame_rate, 3, self.soa),
92 | ]
93 |
94 | def present_stimulus(self, idx: int):
95 |
96 | # Select stimulus frequency
97 | ind = self.trials["parameter"].iloc[idx]
98 |
99 | # Push sample
100 | if self.eeg:
101 | timestamp = time()
102 | if self.eeg.backend == "muselsl":
103 | marker = [self.markernames[ind]]
104 | else:
105 | marker = self.markernames[ind]
106 | self.eeg.push_sample(marker=marker, timestamp=timestamp)
107 |
108 | # Present flickering stim
109 | for _ in range(int(self.stim_patterns[ind]["n_cycles"])):
110 |
111 | for _ in range(int(self.stim_patterns[ind]["cycle"][0])):
112 | if self.use_vr:
113 | tracking_state = self.window.getTrackingState()
114 | self.window.calcEyePoses(tracking_state.headPose.thePose)
115 | self.window.setDefaultView()
116 | self.grating.draw()
117 | self.fixation.draw()
118 | self.window.flip()
119 |
120 | for _ in range(self.stim_patterns[ind]["cycle"][1]):
121 | if self.use_vr:
122 | tracking_state = self.window.getTrackingState()
123 | self.window.calcEyePoses(tracking_state.headPose.thePose)
124 | self.window.setDefaultView()
125 | self.grating_neg.draw()
126 | self.fixation.draw()
127 | self.window.flip()
128 | pass
--------------------------------------------------------------------------------
/eegnb/experiments/visual_vep/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/experiments/visual_vep/__init__.py
--------------------------------------------------------------------------------
/eegnb/experiments/visual_vep/vep.py:
--------------------------------------------------------------------------------
1 | from time import time, strftime, gmtime
2 | from pylsl import StreamInfo, StreamOutlet
3 | from typing import Optional
4 | from eegnb.experiments import Experiment
5 | from eegnb.devices.eeg import EEG
6 |
7 |
8 | class VisualVEP(Experiment.BaseExperiment):
9 |
10 | def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None,
11 |
12 | n_trials = 2000, iti = 0.2, soa = 0.2, jitter = 0.1):
13 |
14 | exp_name = "Visual VEP"
15 | super().__init__(exp_name, duration, eeg, save_fn, n_trials, iti, soa, jitter)
16 |
17 | def load_stimulus():
18 | pass
19 |
20 | def present_stimulus():
21 | pass
22 |
23 |
24 | def present(duration=120):
25 |
26 | # create
27 | info = StreamInfo("Markers", "Markers", 1, 0, "int32", "myuidw43536")
28 |
29 | # next make an outlet
30 | outlet = StreamOutlet(info)
31 |
32 | markernames = [1, 2]
33 |
34 | start = time()
35 |
36 | n_trials = 2000
37 | iti = 0.2
38 | jitter = 0.1
39 | soa = 0.2
40 | record_duration = np.float32(duration)
41 |
42 | # Setup log
43 | position = np.random.randint(0, 2, n_trials)
44 | trials = DataFrame(dict(position=position, timestamp=np.zeros(n_trials)))
45 |
46 | # graphics
47 | mywin = visual.Window(
48 | [1920, 1080], monitor="testMonitor", units="deg", fullscr=True
49 | )
50 | grating = visual.GratingStim(win=mywin, mask="circle", size=20, sf=4)
51 | fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
52 |
53 | for ii, trial in trials.iterrows():
54 | # inter trial interval
55 | core.wait(iti + np.random.rand() * jitter)
56 |
57 | # onset
58 | grating.phase += np.random.rand()
59 | pos = trials["position"].iloc[ii]
60 | grating.pos = [25 * (pos - 0.5), 0]
61 | grating.draw()
62 | fixation.draw()
63 | outlet.push_sample([markernames[pos]], time())
64 | mywin.flip()
65 |
66 | # offset
67 | core.wait(soa)
68 | fixation.draw()
69 | mywin.flip()
70 | if len(event.getKeys()) > 0 or (time() - start) > record_duration:
71 | break
72 | event.clearEvents()
73 | # Cleanup
74 | mywin.close()
75 |
76 |
77 | def main():
78 | parser = OptionParser()
79 |
80 | parser.add_option(
81 | "-d",
82 | "--duration",
83 | dest="duration",
84 | type="int",
85 | default=120,
86 | help="duration of the recording in seconds.",
87 | )
88 |
89 | (options, args) = parser.parse_args()
90 | present(options.duration)
91 |
92 |
93 | if __name__ == "__main__":
94 | main()
95 |
--------------------------------------------------------------------------------
/eegnb/stimuli/__init__.py:
--------------------------------------------------------------------------------
1 | from os import path
2 |
3 | FACE_HOUSE = path.join(path.dirname(__file__), "visual", "face_house")
4 | CAT_DOG = path.join(path.dirname(__file__), "visual", "cats_dogs")
5 |
--------------------------------------------------------------------------------
/eegnb/stimuli/utils.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/utils.py
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/LICENSE.txt:
--------------------------------------------------------------------------------
1 | All the files in this directory were downloaded from pixabay, and are Public Domain (CC0).
2 |
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/nontarget-234836_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/nontarget-234836_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/nontarget-274183_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/nontarget-274183_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/nontarget-280332_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/nontarget-280332_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/nontarget-734689_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/nontarget-734689_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/target-2083492_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/target-2083492_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/target-360807_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/target-360807_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/target-468232_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/target-468232_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/cats_dogs/target-76116_640.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/cats_dogs/target-76116_640.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Stimuli used in:
2 |
3 | Haxby, J., Gobbini, M., Furey, M., Ishai, A., Schouten, J., and Pietrini, P. (2001).
4 | Distributed and overlapping representations of faces and objects in ventral temporal
5 | cortex. Science 293, 2425–2430.
6 |
7 | See: http://www.pymvpa.org/datadb/haxby2001.html
8 |
9 | The original authors of Haxby et al. (2001) hold the copyright of this dataset and
10 | made it available under the terms of the Creative Commons Attribution-Share Alike 3.0 license.
11 |
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Annie_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Annie_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Annie_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Annie_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Annie_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Annie_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Annie_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Annie_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Blake_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Blake_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Blake_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Blake_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Blake_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Blake_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Blake_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Blake_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Don_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Don_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Don_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Don_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Don_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Don_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Don_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Don_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Estelle_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Estelle_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Estelle_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Estelle_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Estelle_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Estelle_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Estelle_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Estelle_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Frank_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Frank_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Frank_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Frank_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Frank_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Frank_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Frank_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Frank_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Janie_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Janie_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Janie_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Janie_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Janie_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Janie_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Janie_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Janie_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joan_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joan_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joan_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joan_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joan_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joan_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joan_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joan_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Jodi_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Jodi_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Jodi_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Jodi_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Jodi_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Jodi_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Jodi_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Jodi_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joe_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joe_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joe_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joe_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joe_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joe_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Joe_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Joe_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tim_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tim_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tim_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tim_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tim_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tim_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tim_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tim_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tom_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tom_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tom_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tom_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tom_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tom_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Tom_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Tom_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Wallace_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Wallace_1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Wallace_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Wallace_2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Wallace_3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Wallace_3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/faces/Wallace_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/faces/Wallace_4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house1.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house1.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house1.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house1.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house1.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house1.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house1.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house1.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house10.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house10.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house10.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house10.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house10.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house10.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house10.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house10.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house11.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house11.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house11.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house11.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house11.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house11.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house11.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house11.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house12.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house12.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house12.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house12.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house12.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house12.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house12.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house12.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house2.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house2.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house2.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house2.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house2.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house2.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house2.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house2.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house3.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house3.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house3.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house3.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house3.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house3.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house3.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house3.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house4.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house4.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house4.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house4.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house4.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house4.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house4.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house4.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house5.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house5.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house5.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house5.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house5.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house5.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house5.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house5.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house6.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house6.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house6.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house6.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house6.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house6.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house6.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house6.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house7.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house7.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house7.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house7.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house7.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house7.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house7.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house7.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house8.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house8.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house8.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house8.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house8.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house8.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house8.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house8.4.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house9.1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house9.1.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house9.2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house9.2.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house9.3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house9.3.jpg
--------------------------------------------------------------------------------
/eegnb/stimuli/visual/face_house/houses/house9.4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/eegnb/stimuli/visual/face_house/houses/house9.4.jpg
--------------------------------------------------------------------------------
/environments/eeg-expy-docsbuild.yml:
--------------------------------------------------------------------------------
1 | name: eeg-expy-docsbuild
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | # System-level dependencies
6 | - python=3.8
7 | - pytables # install pytables for macOS arm64, so do not need to build from source.
8 | - rust # used by docsbuild
9 | - pip
10 | - pip:
11 | # Install package with only Analysis requirements
12 | - -e ..[docsbuild]
--------------------------------------------------------------------------------
/environments/eeg-expy-full.yml:
--------------------------------------------------------------------------------
1 | name: eeg-expy-full
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | # System-level dependencies
6 | - python=3.8
7 | - pytables # install pytables for macOS arm64, so do not need to build from source.
8 | - rust # used by docsbuild
9 | - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found."
10 | - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'"
11 | - pip
12 | - pip:
13 | # Install package with only Analysis requirements
14 | - -e ..[full]
--------------------------------------------------------------------------------
/environments/eeg-expy-stimpres.yml:
--------------------------------------------------------------------------------
1 | name: eeg-expy-stimpres
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | # System-level dependencies
6 | - python=3.8
7 | - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'"
8 | - pip
9 | - pip:
10 | # Install package with Analysis + Streaming requirements
11 | - -e ..[stimpres]
--------------------------------------------------------------------------------
/environments/eeg-expy-streaming.yml:
--------------------------------------------------------------------------------
1 | name: eeg-expy-streaming
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | # System-level dependencies
6 | - python=3.8
7 | - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found."
8 | - pip
9 | - pip:
10 | # Install package with Analysis + Streaming requirements
11 | - -e ..[streaming]
--------------------------------------------------------------------------------
/environments/eeg-expy-streamstim.yml:
--------------------------------------------------------------------------------
1 | name: eeg-expy-streamstim
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | # System-level dependencies
7 | - python=3.8
8 | - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found."
9 | - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'"
10 | - pip
11 | - pip:
12 | # Install package with only Analysis requirements
13 | - -e ..[streamstim]
--------------------------------------------------------------------------------
/examples/README.txt:
--------------------------------------------------------------------------------
1 | .. _examples:
2 |
3 | All Notebook Examples
4 | =======================
5 |
6 | This section has examples of different functionality available in the module.
7 |
8 | Examples are organized by topic, and can be explored in any order.
9 |
10 | .. contents:: Contents
11 | :local:
12 | :depth: 3
13 |
14 |
15 |
--------------------------------------------------------------------------------
/examples/auditory_oddball/MUSE_conditions.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/examples/auditory_oddball/MUSE_conditions.mat
--------------------------------------------------------------------------------
/examples/auditory_oddball/designMatrix.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/examples/auditory_oddball/designMatrix.mat
--------------------------------------------------------------------------------
/examples/misc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/examples/misc/__init__.py
--------------------------------------------------------------------------------
/examples/misc/mac_run_exp.py:
--------------------------------------------------------------------------------
1 | from muselsl import stream, list_muses, view, record
2 | from multiprocessing import freeze_support, set_start_method, Process, Pool
3 | from mne import Epochs, find_events
4 | from time import time, strftime, gmtime
5 | import os
6 | from utils import utils
7 | from collections import OrderedDict
8 | import sys
9 | from optparse import OptionParser
10 | import warnings
11 | #warnings.filterwarnings('ignore')
12 |
13 | parser = OptionParser()
14 |
15 | parser.add_option("-d", "--duration",
16 | dest="duration", type='int', default=400,
17 | help="duration of the recording in seconds")
18 | parser.add_option("-s", "--subject",
19 | dest="subject", type='int', default=2,
20 | help="subject number: must be an integer")
21 | parser.add_option("-r", "--run",
22 | dest="run", type='int', default=1,
23 | help="run (session) number: must be an integer")
24 | parser.add_option("-e", "--experiment",
25 | dest="experiment", type='string', default="n170",
26 | help="name of experiment from stimulus_presentation folder")
27 |
28 | (options, args) = parser.parse_args()
29 |
30 | duration = options.duration
31 | subject = options.subject
32 | run = options.run
33 | experiment = options.experiment
34 | expprez = experiment + '.present'
35 |
36 | exec('from stimulus_presentation import ' + experiment)
37 |
38 | if experiment == 'visual_p300_stripes':
39 | recording_path = os.path.join(os.path.expanduser("~"), "eeg-notebooks", "data", "visual", "P300", "subject" + str(subject), "session" + str(run), ("recording_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime())))
40 | elif experiment == 'n170':
41 | recording_path = os.path.join(os.path.expanduser("~"), "eeg-notebooks", "data", "visual", "N170", "subject" + str(subject), "session" + str(run), ("recording_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime())))
42 | elif experiment == 'ssvep':
43 | recording_path = os.path.join(os.path.expanduser("~"), "eeg-notebooks", "data", "visual", "SSVEP", "subject" + str(subject), "session" + str(run), ("recording_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime())))
44 | elif experiment == 'cueing':
45 | recording_path = os.path.join(os.path.expanduser("~"), "eeg-notebooks", "data", "visual", "cueing", "subject" + str(subject), "session" + str(run), ("subject" + str(subject) + "_session" + str(run) + "_recording_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime()) ) )
46 |
47 | else:
48 | print('Experiment name is not correct. Choose from n170, visual_p300_stripes, or ssvep.')
49 |
50 | stimulus = Process(target=eval(expprez), args=(duration, subject, run))
51 | recording = Process(target=record, args=(duration, recording_path))
52 |
53 | if __name__ == '__main__':
54 | #freeze_support()
55 | set_start_method('spawn', force=True)
56 | pool = Pool(processes=4)
57 |
58 | pool.apply_async(eval(expprez), args=(duration, subject, run))
59 | pool.apply_async(record, args=(duration,recording_path))
60 |
61 | pool.close()
62 | pool.join()
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/examples/misc/neurobrite_datasets.py:
--------------------------------------------------------------------------------
1 | print('Downloading 3 scikit-learn datasets')
2 | import warnings
3 | warnings.filterwarnings('ignore')
4 | import numpy as np, os
5 | from matplotlib import pyplot as plt
6 | from sklearn import datasets
7 |
8 | basedir = os.path.join(os.getcwd(),'stimulus_presentation/stim')
9 |
10 | # olivetti faces
11 | print('Downloading Olivetti faces')
12 | stimdir = os.path.join(basedir, 'olivetti_faces')
13 | try:
14 | os.makedirs(stimdir)
15 | except:
16 | pass
17 |
18 | faces = datasets.fetch_olivetti_faces()
19 | images = faces.images
20 |
21 | for i in range(np.shape(images)[0]):
22 | fig, ax = plt.subplots(1)
23 | plt.imshow(images[i,:,:], 'gray')
24 | plt.axis('off')
25 | ax.get_xaxis().set_visible(False) # this removes the ticks and numbers for x axis
26 | ax.get_yaxis().set_visible(False) # this removes the ticks and numbers for y axis
27 | plt.savefig(stimdir + '/image_' + str(i+1) + '.jpg',bbox_inches='tight',pad_inches=0)
28 | plt.close
29 |
30 |
31 | # labelled faces in the wild
32 | print('Downloading labelled faces in the wild')
33 | stimdir = os.path.join(basedir,'faces_in_wild')
34 | n_images = 200
35 |
36 | try:
37 | os.makedirs(stimdir)
38 | except:
39 | pass
40 |
41 | faces = datasets.fetch_lfw_people()
42 | images = faces.images
43 |
44 | for i in range(n_images):
45 | fig, ax=plt.subplots(1)
46 | plt.imshow(images[i,:,:],'gray')
47 | plt.axis('off')
48 | ax.get_xaxis().set_visible(False) # this removes the ticks and numbers for x axis
49 | ax.get_yaxis().set_visible(False) # this removes the ticks and numbers for y axis
50 | plt.savefig(stimdir + '/image_' + str(i+1) + '.jpg',bbox_inches='tight',pad_inches=0)
51 | plt.close
52 |
53 |
54 | # handwritten digits
55 | print('Downloading digits data')
56 | stimdir = os.path.join(basedir, 'digits')
57 | n_images = 200
58 |
59 | try:
60 | os.makedirs(stimdir)
61 | except:
62 | pass
63 |
64 | digits = datasets.load_digits()
65 | images = digits.images
66 | targets = digits.target
67 |
68 | for i in range(n_images):
69 | fig, ax=plt.subplots(1)
70 | plt.imshow(images[i,:,:],'gray')
71 | plt.axis('off')
72 | ax.get_xaxis().set_visible(False) # this removes the ticks and numbers for x axis
73 | ax.get_yaxis().set_visible(False) # this removes the ticks and numbers for y axis
74 | plt.savefig(stimdir + '/image_' + str(i+1) + '_digit' + str(targets[i]) + '.jpg',bbox_inches='tight',pad_inches=0)
75 | plt.close
76 |
77 |
78 | print('Done downloading all datasets')
79 |
80 |
--------------------------------------------------------------------------------
/examples/misc/run_experiment.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import sys
4 | from multiprocessing import Process
5 | from time import strftime, gmtime
6 |
7 | def run_experiment(expt_name, subj_num='', sess_num='', muse_lsl_dir='muse-lsl'):
8 |
9 | if 'Visual_P300' in expt_name:
10 | from stimulus_presentation import visual_p300 as expt_function
11 |
12 | elif 'Visual_N170' in expt_name:
13 | from stimulus_presentation import n170 as expt_function
14 |
15 | elif 'SSVEP' in expt_name:
16 | from stimulus_presentation import ssvep as expt_function
17 |
18 | elif 'SSAEP' in expt_name:
19 | from stimulus_presentation import ssaep as expt_function
20 |
21 | elif 'Auditory_P300' in expt_name:
22 | from stimulus_presentation import auditory_p300 as expt_function
23 |
24 | expt_args = []
25 | record_args = []
26 |
27 | if 'test' in expt_name:
28 | expt_args.append(20)
29 | record_args.append(20)
30 | else:
31 | expt_args.append(120)
32 | record_args.append(120)
33 |
34 | file_name = expt_name + "_subject" + str(subj_num) + "_session" + str(
35 | sess_num) + "_" + strftime("%Y-%m-%d-%H.%M.%S", gmtime()) + ".csv"
36 | record_args.append(file_name)
37 |
38 | expt_process = Process(target=expt_function.present, args=expt_args)
39 | record_process = Process(target=expt_function.present, args=record_args)
40 |
41 | expt_process.start()
42 | record_process.start()
43 |
44 |
45 | if __name__ == '__main__':
46 |
47 | """
48 | Usage:
49 |
50 | python run_eeg_experiment.py EXPT_NAME SUBJECT_NUM SESS_NUM
51 |
52 |
53 | Experiment names:
54 |
55 | 'Visual_N170'
56 | 'Visual_P300'
57 | 'SSVEP'
58 | 'SSAEP'
59 | 'Auditory_P300'
60 |
61 | Add '_test' to the end of the experiment name to run a quick (20s) version
62 |
63 |
64 | Examples:
65 |
66 | python run_experiment.py N170
67 |
68 |
69 | python run_experiment.py mlsl_SSVEP_test
70 |
71 |
72 | """
73 |
74 | expt_name = sys.argv[1]
75 |
76 | if len(sys.argv) == 4:
77 | subj_num, sess_num = sys.argv[2:4]
78 | else:
79 | subj_num = ''
80 | sess_num = ''
81 |
82 | run_experiment(expt_name, subj_num, sess_num)
83 |
--------------------------------------------------------------------------------
/examples/rest/Raw_EEG.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "inputHidden": true,
8 | "outputHidden": true
9 | },
10 | "outputs": [],
11 | "source": [
12 | "from muselsl import stream, list_muses, view\n",
13 | "from multiprocessing import set_start_method, Process\n",
14 | "from time import sleep\n",
15 | "%matplotlib"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {
22 | "inputHidden": false,
23 | "outputHidden": false
24 | },
25 | "outputs": [],
26 | "source": [
27 | "muses = list_muses()"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": null,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "stream_process = Process(target=stream, args=(muses[0]['address'],))\n",
37 | "stream_process.start()"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "metadata": {},
44 | "outputs": [],
45 | "source": [
46 | "view()"
47 | ]
48 | }
49 | ],
50 | "metadata": {
51 | "kernel_info": {
52 | "name": "mne"
53 | },
54 | "kernelspec": {
55 | "display_name": "Python 2",
56 | "language": "python",
57 | "name": "python2"
58 | },
59 | "language_info": {
60 | "codemirror_mode": {
61 | "name": "ipython",
62 | "version": 2
63 | },
64 | "file_extension": ".py",
65 | "mimetype": "text/x-python",
66 | "name": "python",
67 | "nbconvert_exporter": "python",
68 | "pygments_lexer": "ipython2",
69 | "version": "2.7.15"
70 | },
71 | "nteract": {
72 | "version": "0.8.4"
73 | }
74 | },
75 | "nbformat": 4,
76 | "nbformat_minor": 4
77 | }
78 |
--------------------------------------------------------------------------------
/examples/sandbox/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | :tada::clinking_glasses: First off, thanks for taking the time to contribute! :tada::clinking_glasses:
4 |
5 | Contributions are always welcome, no matter how small.
6 |
7 | ## Contributing Data
8 |
9 | If you have collected some interesting data, we would love to add it to the official EEG Notebooks data sets.
10 |
11 | To contribute your data, first make sure it is appropriately stored in the `data` folder. It should be stored in the structure `eeg-notebooks/data/%experiment/%type/subject%name/session%number`. Subject name should not conflict with any of the prexisting datasets in EEG notebooks.
12 |
13 | [Create a pull request](https://help.github.com/articles/creating-a-pull-request/) with just the data you want to add (not the notebook). In the comments section of the pull request, please include the following information:
14 | - Data collected
15 | - Age of the subject
16 | - Gender of the subject
17 | - Right or left handedness of the subject
18 | - Any interesting notes you collected during data collection or analysis
19 |
20 | ## Contributing Code
21 |
22 | If you find a way to improve the EEG Notebooks repo, feel free to file a pull request so that we can share your improvements with everybody!
23 |
24 | Here's a couple areas that we know could use improvement:
25 | - Support for platform-specific issues (i.e. improving raw EEG plotting experience on Windows)
26 | - Updating the [old EEG notebooks](https://github.com/NeuroTechX/eeg-notebooks/tree/master/old_notebooks) to the newer format, with more description of the experiment background and inclusion of the Muse LSL connection and stimulus presentation scripts
27 |
28 |
29 | ### Code of Conduct
30 | This project adheres to the Contributor Covenant [Code of Conduct](CODE_OF_CONDUCT.md).
31 | By participating you are expected to adhere to these expectations. Please report unacceptable behaviour to [dano@neurotechx.com](mailto:dano@neurotechx.com)
32 |
33 | ### Contributing on Github
34 |
35 | If you're new to Git and want to learn how to fork this repo, make your own additions, and include those additions in the master version of this project, check out this [great tutorial](http://blog.davidecoppola.com/2016/11/howto-contribute-to-open-source-project-on-github/).
36 |
37 | ### Community
38 |
39 | This project is maintained by the [NeuroTechX](http://www.neurotechx.com) community. Join our Slack to check out our #interactive-tutorial channel, where discussions about EEG101 take place.
40 |
41 | If you're in the Toronto area, the NeuroTechTO community often gets together to work on EEG Notebooks and other cool neurotech projects :beers: :pizza:. The NeuroTechX Slack channel is a good place to stay aware of such things, as is the NeuroTechX newsletter, which you can sign up for [here](http://neurotechx.us12.list-manage.com/subscribe?u=5124b2527cf13d913a8beeea3&id=3519c19837)
42 |
--------------------------------------------------------------------------------
/examples/sandbox/LINUX_INSTRUCTIONS.md:
--------------------------------------------------------------------------------
1 | # Running eeg-notebooks on Linux
2 |
3 | There were some issues using Linux with the notebooks. ./notebooks/utils/utils.py was edited and ./notebooks/linux_muse was added to get around the issues.
4 |
5 | Then two notebooks (SSVEP and Raw_EEG) were ported over to versions that work with Linux+Python3. Tested system was an Asus Zenbook 13 running Ubuntu 18 LTS.
6 |
7 | ## Setup
8 |
9 | ```
10 | sudo apt-get update
11 | python3 -m virtualenv venv
12 | source venv/bin/activae
13 | pip3 install -r requirements.txt
14 | pip3 install pygatt==3.1.1
15 | pip3 install bluepy
16 | pip3 install pygame #not sure why this isn't needed/included in the other requirement.txt?
17 | sudo apt-get install libpcap-dev #for muselsl
18 | ```
19 |
20 | ### Bluepy setup
21 |
22 | *Modify the path to match the path of your Python virtualenv*
23 |
24 | ```
25 | sudo setcap 'cap_net_raw,cap_net_admin+eip' ~/Tester/blueberry/eeg-notebooks/venv/lib/python3.6/site-packages/bluepy/bluepy-helper
26 | ```
27 |
--------------------------------------------------------------------------------
/examples/sandbox/README_OLD.md:
--------------------------------------------------------------------------------
1 | # EEG Notebooks
2 |
3 |
4 | A collection of classic EEG experiments implemented in Python and Jupyter notebooks. This repo is a work in progress with the goal of making it easy to perform classical EEG experiments and automatically analyze data.
5 |
6 | Currently, all experiments are implemented for the Muse EEG device and based on work done by Alexandre Barachant and Hubert Banville for the [muse-lsl](https://github.com/alexandrebarachant/muse-lsl) library.
7 |
8 | Please see the [documentation](http://eeg-notebooks.readthedocs.io/) for advanced installation instructions and complete info about the project.
9 |
10 |
11 | ## Getting Started
12 |
13 |
14 | ### Installation
15 |
16 | If you are a Mac user, follow the installation instructions [here](MAC_INSTRUCTIONS.md)
17 |
18 | You will need a Muse 2016 and Python installed on your computer. Psychopy, the stimulus presentation library that underlies most of the experiments, officially only supports Python 2. However, some users, especially those on Linux, have been able to work entirely in Python 3 without any issues.
19 |
20 | `git clone https://github.com/neurotechx/eeg-notebooks`
21 |
22 | Install all requirements.
23 |
24 | `pip install -r requirements.txt`
25 |
26 | See [here](http://eeg-notebooks.readthedocs.io/en/latest/setup_instructions_windows.html)
27 | for more detailed setup instructions for windows operating systems.
28 |
29 |
30 | ### Running Experiments
31 |
32 | Open the experiment you are interested in running in notebooks folder. Notebooks can be opened either with the Jupyter Notebook browser environment (run `jupyter notebook`) or in the [nteract](https://nteract.io/desktop) desktop application.
33 |
34 | All experiments should be able to performed entirely within the notebook environment. On Windows 10, you will want to skip the bluetooth connection step and start an EEG data stream through the [BlueMuse](https://github.com/kowalej/BlueMuse) GUI.
35 |
36 | *Note: if errors are encountered during viewing of the eeg data, try starting the viewer directly from the command line (`muselsl view`). Version 2 of the viewer may work better on Windows computers (`muselsl view -v 2`)
37 |
38 | The basic steps of each experiment are as follows:
39 | 1. Open an LSL stream of EEG data.
40 | 2. Ensure that EEG signal quality is excellent and that there is very little noise. The standard deviation of the signal (displayed next to the raw traces) should ideally be below 10 for all channels of interest.
41 | 3. Define subject and session ID, as well as trial duration. *Note: sessions are analyzed independently. Each session can contain multiple trials or 'run-throughs' of the experiments.*
42 | 4. Simultaneously run stimulus presentation and recording processes to create a data file with both EEG and event marker data.
43 | 5. Repeat step 4 to collect as many trials as needed (4-6 trials of two minutes each are recommended in order to see the clearest results)
44 | 6. Load experimental data into an MNE Raw object.
45 | 7. Apply a band-pass filter to remove noise
46 | 8. Epoch the data, removing epochs where amplitude of the signal exceeded a given threshold (removes eye blinks)
47 | 9. Generate averaged waveforms from all channels for each type of stimulus presented
48 |
49 | Notebooks in the `old_notebooks` folder only contain the data analysis steps (6-9). They can be used by using the `run_experiments.py` script (e.g `python run_eeg_experiment.py Auditory_P300 15 1`)
50 |
51 | Currently available experiments:
52 | - N170 (Faces & Houses)
53 | - SSVEP
54 | - Visual P300
55 | - Cueing (Kyle Mathewson)
56 | - Baseline (Kyle, Eye's Open vs. Closed, needs notebook made)
--------------------------------------------------------------------------------
/examples/sandbox/Raw_EEG_linux.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {
7 | "inputHidden": true,
8 | "outputHidden": true
9 | },
10 | "outputs": [],
11 | "source": [
12 | "from muselsl import stream, view\n",
13 | "from linux_muse import list_muses\n",
14 | "import matplotlib \n",
15 | "matplotlib.use('Agg')\n",
16 | "import matplotlib.pyplot as plt\n",
17 | "from multiprocessing import set_start_method, Process\n",
18 | "from time import sleep\n",
19 | "#%matplotlib"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 3,
25 | "metadata": {
26 | "inputHidden": false,
27 | "outputHidden": false
28 | },
29 | "outputs": [
30 | {
31 | "name": "stdout",
32 | "output_type": "stream",
33 | "text": [
34 | "Found Muse-8798\n"
35 | ]
36 | }
37 | ],
38 | "source": [
39 | "muses = list_muses.list_muses()\n"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": 3,
45 | "metadata": {},
46 | "outputs": [
47 | {
48 | "name": "stdout",
49 | "output_type": "stream",
50 | "text": [
51 | "[{'address': '00:55:da:b0:03:57', 'name': 'Muse-0357'}]\n"
52 | ]
53 | }
54 | ],
55 | "source": [
56 | "print(muses)"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 4,
62 | "metadata": {},
63 | "outputs": [
64 | {
65 | "name": "stdout",
66 | "output_type": "stream",
67 | "text": [
68 | "Connecting to Muse: 00:55:da:b3:87:98...\n",
69 | "Connected.\n",
70 | "Streaming EEG...\n"
71 | ]
72 | }
73 | ],
74 | "source": [
75 | "stream_process = Process(target=stream, args=(muses[0]['address'],))\n",
76 | "stream_process.start()"
77 | ]
78 | },
79 | {
80 | "cell_type": "markdown",
81 | "metadata": {},
82 | "source": [
83 | "Run the following in your terminal (with your virtualenv activated):"
84 | ]
85 | },
86 | {
87 | "cell_type": "markdown",
88 | "metadata": {},
89 | "source": [
90 | "`muselsl view --version 2`"
91 | ]
92 | }
93 | ],
94 | "metadata": {
95 | "kernel_info": {
96 | "name": "mne"
97 | },
98 | "kernelspec": {
99 | "display_name": "Python 3",
100 | "language": "python",
101 | "name": "python3"
102 | },
103 | "language_info": {
104 | "codemirror_mode": {
105 | "name": "ipython",
106 | "version": 3
107 | },
108 | "file_extension": ".py",
109 | "mimetype": "text/x-python",
110 | "name": "python",
111 | "nbconvert_exporter": "python",
112 | "pygments_lexer": "ipython3",
113 | "version": "3.6.9"
114 | },
115 | "nteract": {
116 | "version": "0.8.4"
117 | }
118 | },
119 | "nbformat": 4,
120 | "nbformat_minor": 4
121 | }
122 |
--------------------------------------------------------------------------------
/examples/sandbox/auditory_erp_arrayin.py:
--------------------------------------------------------------------------------
1 | """Generate sound-only auditory oddball stimulus presentation.
2 | """
3 | import time
4 | from optparse import OptionParser
5 |
6 | import numpy as np
7 | from pandas import DataFrame
8 | from psychopy import visual, core, event, sound
9 | from pylsl import StreamInfo, StreamOutlet
10 |
11 |
12 | def present(duration=120,stim_types=None,itis=None,secs=0.07,volume=0.8):
13 |
14 | #def present(duration=120, n_trials=10, iti=0.3, soa=0.2, jitter=0.2,
15 | # secs=0.2, volume=0.8, random_state=None):
16 |
17 |
18 | # Create markers stream outlet
19 | info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')
20 | outlet = StreamOutlet(info)
21 |
22 | #np.random.seed(random_state)
23 | markernames = [1, 2]
24 | start = time.time()
25 |
26 | # Set up trial parameters
27 | record_duration = np.float32(duration)
28 |
29 | # Initialize stimuli
30 | #aud1 = sound.Sound('C', octave=5, sampleRate=44100, secs=secs)
31 | aud1 = sound.Sound(440,secs=secs)#, octave=5, sampleRate=44100, secs=secs)
32 | aud1.setVolume(volume)
33 |
34 |
35 | aud1.setVolume(volume)
36 | #aud2 = sound.Sound('D', octave=6, sampleRate=44100, secs=secs)
37 | aud2 = sound.Sound(528,secs=secs)
38 | aud2.setVolume(volume)
39 | auds = [aud1, aud2]
40 |
41 | # Setup trial list
42 | #sound_ind = np.random.binomial(1, 0.25, n_trials)
43 | #itis = iti + np.random.rand(n_trials) * jitter
44 | #trials = DataFrame(dict(sound_ind=sound_ind, iti=itis))
45 | #trials['soa'] = soa
46 | #trials['secs'] = secs
47 | trials = DataFrame(dict(sound_ind=stim_types,iti=itis))
48 |
49 | # Setup graphics
50 | mywin = visual.Window([1920, 1080], monitor='testMonitor', units='deg',
51 | fullscr=True)
52 | fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,
53 | rgb=[1, 0, 0])
54 | fixation.setAutoDraw(True)
55 | mywin.flip()
56 |
57 |
58 | for ii, trial in trials.iterrows():
59 |
60 | # Intertrial interval
61 | time.sleep(trial['iti'])
62 |
63 | # Select and play sound
64 | ind = int(trial['sound_ind'])
65 | auds[ind].stop()
66 | auds[ind].play()
67 |
68 | # Send marker
69 | timestamp = time.time()
70 | outlet.push_sample([markernames[ind]], timestamp)
71 |
72 | # Offset
73 | #time.sleep(soa)
74 | #if (time.time() - start) > record_duration:
75 | # break
76 |
77 | # offset
78 | #core.wait(soa)
79 |
80 | if len(event.getKeys()) > 0 or (time.time() - start) > record_duration:
81 | break
82 | event.clearEvents()
83 |
84 | #if len(event.getKeys()) > 0 or (time() - start) > record_duration:
85 | # break
86 | #event.clearEvents()
87 |
88 | # Cleanup
89 | mywin.close()
90 |
91 |
92 | return trials
93 |
94 |
95 | def main():
96 | parser = OptionParser()
97 |
98 | parser.add_option(
99 | '-d', '--duration', dest='duration', type='int', default=10,
100 | help='duration of the recording in seconds.')
101 | parser.add_option(
102 | '-n', '--n_trials', dest='n_trials', type='int',
103 | default=10, help='number of trials.')
104 | parser.add_option(
105 | '-i', '--iti', dest='iti', type='float', default=0.3,
106 | help='intertrial interval')
107 | parser.add_option(
108 | '-s', '--soa', dest='soa', type='float', default=0.2,
109 | help='interval between end of stimulus and next trial.')
110 | parser.add_option(
111 | '-j', '--jitter', dest='jitter', type='float', default=0.2,
112 | help='jitter in the intertrial intervals.')
113 | parser.add_option(
114 | '-e', '--secs', dest='secs', type='float', default=0.2,
115 | help='duration of the sound in seconds.')
116 | parser.add_option(
117 | '-v', '--volume', dest='volume', type='float', default=0.8,
118 | help='volume of the sounds in [0, 1].')
119 | parser.add_option(
120 | '-r', '--randomstate', dest='random_state', type='int',
121 | default=42, help='random seed')
122 |
123 | (options, args) = parser.parse_args()
124 | trials_df = present(
125 | duration=options.duration, n_trials=options.duration,
126 | iti=options.iti, soa=options.soa, jitter=options.jitter,
127 | secs=options.secs, volume=options.volume,
128 | random_state=options.random_state)
129 |
130 | print(trials_df)
131 |
132 |
133 | if __name__ == '__main__':
134 | main()
135 |
--------------------------------------------------------------------------------
/examples/sandbox/auditory_erp_aux.py:
--------------------------------------------------------------------------------
1 | """Generate sound-only auditory oddball stimulus presentation.
2 | """
3 | import time
4 | from optparse import OptionParser
5 |
6 | import numpy as np
7 | from pandas import DataFrame
8 | from psychopy import visual, core, event, sound
9 | from pylsl import StreamInfo, StreamOutlet
10 |
11 |
12 | def present(duration=120, n_trials=10, iti=0.3, soa=0.2, jitter=0.2,
13 | secs=0.2, volume=0.8, random_state=None):
14 |
15 |
16 | # Create markers stream outlet
17 | info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')
18 | outlet = StreamOutlet(info)
19 |
20 | np.random.seed(random_state)
21 | markernames = [1, 2]
22 | start = time.time()
23 |
24 | # Set up trial parameters
25 | record_duration = np.float32(duration)
26 |
27 | # Initialize stimuli
28 | aud1 = sound.Sound('C', octave=5, sampleRate=44100, secs=secs)
29 | aud1.setVolume(volume)
30 | aud2 = sound.Sound('D', octave=6, sampleRate=44100, secs=secs)
31 | aud2.setVolume(volume)
32 | auds = [aud1, aud2]
33 |
34 | # Setup trial list
35 | sound_ind = np.random.binomial(1, 0.25, n_trials)
36 | itis = iti + np.random.rand(n_trials) * jitter
37 | trials = DataFrame(dict(sound_ind=sound_ind, iti=itis))
38 | trials['soa'] = soa
39 | trials['secs'] = secs
40 |
41 | for ii, trial in trials.iterrows():
42 |
43 | # Intertrial interval
44 | time.sleep(trial['iti'])
45 |
46 | # Select and play sound
47 | ind = int(trial['sound_ind'])
48 | auds[ind].stop()
49 | auds[ind].play()
50 |
51 | # Send marker
52 | timestamp = time.time()
53 | outlet.push_sample([markernames[ind]], timestamp)
54 |
55 | # Offset
56 | #time.sleep(soa)
57 | #if (time.time() - start) > record_duration:
58 | # break
59 |
60 | # offset
61 | core.wait(soa)
62 | if len(event.getKeys()) > 0 or (time.time() - start) > record_duration:
63 | break
64 | event.clearEvents()
65 |
66 | #if len(event.getKeys()) > 0 or (time() - start) > record_duration:
67 | # break
68 | #event.clearEvents()
69 |
70 |
71 |
72 | return trials
73 |
74 |
75 | def main():
76 | parser = OptionParser()
77 |
78 | parser.add_option(
79 | '-d', '--duration', dest='duration', type='int', default=10,
80 | help='duration of the recording in seconds.')
81 | parser.add_option(
82 | '-n', '--n_trials', dest='n_trials', type='int',
83 | default=10, help='number of trials.')
84 | parser.add_option(
85 | '-i', '--iti', dest='iti', type='float', default=0.3,
86 | help='intertrial interval')
87 | parser.add_option(
88 | '-s', '--soa', dest='soa', type='float', default=0.2,
89 | help='interval between end of stimulus and next trial.')
90 | parser.add_option(
91 | '-j', '--jitter', dest='jitter', type='float', default=0.2,
92 | help='jitter in the intertrial intervals.')
93 | parser.add_option(
94 | '-e', '--secs', dest='secs', type='float', default=0.2,
95 | help='duration of the sound in seconds.')
96 | parser.add_option(
97 | '-v', '--volume', dest='volume', type='float', default=0.8,
98 | help='volume of the sounds in [0, 1].')
99 | parser.add_option(
100 | '-r', '--randomstate', dest='random_state', type='int',
101 | default=42, help='random seed')
102 |
103 | (options, args) = parser.parse_args()
104 | trials_df = present(
105 | duration=options.duration, n_trials=options.duration,
106 | iti=options.iti, soa=options.soa, jitter=options.jitter,
107 | secs=options.secs, volume=options.volume,
108 | random_state=options.random_state)
109 |
110 | print(trials_df)
111 |
112 |
113 | if __name__ == '__main__':
114 | main()
115 |
--------------------------------------------------------------------------------
/examples/sandbox/designMatrix.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/examples/sandbox/designMatrix.mat
--------------------------------------------------------------------------------
/examples/sandbox/list_muses.py:
--------------------------------------------------------------------------------
1 | from bluepy.btle import Scanner, DefaultDelegate
2 |
3 | class ScanDelegate(DefaultDelegate):
4 | def __init__(self):
5 | DefaultDelegate.__init__(self)
6 |
7 | def handleDiscovery(self, dev, isNewDev, isNewData):
8 | pass
9 | # if isNewDev:
10 | # print("Discovered device", dev.addr)
11 | # elif isNewData:
12 | # print("Received new data from", dev.addr)
13 |
14 | def list_muses():
15 | scanner = Scanner().withDelegate(ScanDelegate())
16 | devices = scanner.scan(10.0)
17 |
18 | muses = list()
19 |
20 | for dev in devices:
21 | #print("Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi))
22 | for (adtype, desc, value) in dev.getScanData():
23 | if desc == "Complete Local Name" and "muse" in value.lower():
24 | muse_dict = {'address' : dev.addr, 'name' : value}
25 | muses.append(muse_dict)
26 | print("Found %s" % (value))
27 | return muses
28 |
29 | if __name__ == "__main__":
30 | print(list_muses())
31 |
--------------------------------------------------------------------------------
/examples/sandbox/n170_fil2.py:
--------------------------------------------------------------------------------
1 | """
2 | Generate N170
3 | =============
4 |
5 | Face vs. house paradigm stimulus presentation for evoking present.
6 |
7 | """
8 |
9 | from time import time
10 | from optparse import OptionParser
11 | from glob import glob
12 | from random import choice
13 |
14 | import numpy as np
15 | from pandas import DataFrame,read_csv
16 | from psychopy import visual, core, event
17 | from pylsl import StreamInfo, StreamOutlet
18 |
19 |
20 | def present(duration=120):
21 |
22 | # Create markers stream outlet
23 | #info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')
24 | info = StreamInfo('Markers', 'Markers', 3, 0, 'int32', 'myuidw43536')
25 | outlet = StreamOutlet(info)
26 |
27 | #markernames = [1, 2]
28 | start = time()
29 |
30 | # Set up trial parameters
31 | #n_trials = 2010
32 | iti = 0.8
33 | soa = 0.2
34 | jitter = 0.2
35 | record_duration = np.float32(duration)
36 |
37 | # Setup trial list
38 | #image_type = np.random.binomial(1, 0.5, n_trials)
39 | #trials = DataFrame(dict(image_type=image_type,
40 | # timestamp=np.zeros(n_trials)))
41 |
42 |
43 | fil_ims = read_csv('n170_fil_imslist.csv')
44 | n_trials = fil_ims.shape[0]
45 |
46 |
47 | # Setup graphics
48 |
49 | def load_image(filename):
50 | return visual.ImageStim(win=mywin, image=filename)
51 |
52 | mywin = visual.Window([1600, 900], monitor='testMonitor', units='deg', winType='pygame',
53 | fullscr=True)
54 |
55 | #faces = list(map(load_image, glob(
56 | # 'stimulus_presentation/stim/face_house/faces/*_3.jpg')))
57 | #houses = list(map(load_image, glob(
58 | # 'stimulus_presentation/stim/face_house/houses/*.3.jpg')))
59 |
60 |
61 | #for ii, trial in trials.iterrows():
62 | for ii,trial in fil_ims.iterrows():
63 |
64 | trialnum,filename,facehouse,girlboy = trial.values
65 |
66 | # Intertrial interval
67 | core.wait(iti + np.random.rand() * jitter)
68 |
69 | # Select and display image
70 | #label = trials['image_type'].iloc[ii]
71 | #image = choice(faces if label == 1 else houses)
72 | image = load_image(filename)
73 |
74 | image.draw()
75 |
76 | # Send marker
77 | timestamp = time()
78 | #outlet.push_sample([markernames[label]], timestamp)
79 | outlet.push_sample([trialnum,facehouse+1,girlboy+1], timestamp)
80 |
81 | mywin.flip()
82 |
83 | # offset
84 | core.wait(soa)
85 | mywin.flip()
86 | if len(event.getKeys()) > 0 or (time() - start) > record_duration:
87 | break
88 | event.clearEvents()
89 |
90 | # Cleanup
91 | mywin.close()
92 |
93 |
94 | def main():
95 | parser = OptionParser()
96 |
97 | parser.add_option("-d", "--duration",
98 | dest="duration", type='int', default=120,
99 | help="duration of the recording in seconds.")
100 |
101 | (options, args) = parser.parse_args()
102 | present(options.duration)
103 |
104 |
105 | if __name__ == '__main__':
106 | main()
107 |
--------------------------------------------------------------------------------
/examples/sandbox/requirements_mac.txt:
--------------------------------------------------------------------------------
1 | bitstring==3.1.5
2 | pylsl==1.10.5
3 | pygatt==3.2.0
4 | scikit-learn==0.19.1
5 | pandas==0.23.1
6 | numpy==1.14.5
7 | mne==0.16.1
8 | seaborn==0.8.1
9 | pyriemann==0.2.5
10 | pexpect==4.6.0
11 | jupyter==1.0.0
12 | vispy==0.5.3
13 | pyglet==1.2.0
14 | muselsl==2.0.2
15 | PsychoPy==1.90.2
16 | git+https://github.com/peplin/pygatt
17 | matplotlib==2.2.2
18 |
--------------------------------------------------------------------------------
/examples/sandbox/running_on_binder.md:
--------------------------------------------------------------------------------
1 | Running on Binder
2 | ---------------------------
3 |
4 | 1. Go to https://mybinder.org/
5 |
6 | 2. Enter https://github.com/NeuroTechX/eeg-notebooks
7 |
8 | 3. Navigate to the sandbox folder and run the example analysis-only notebooks
9 |
10 |
--------------------------------------------------------------------------------
/examples/visual_cueing/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NeuroTechX/EEG-ExPy/92d933dce1464290380e0007a362a69ec6e50ace/examples/visual_cueing/README.txt
--------------------------------------------------------------------------------
/examples/visual_gonogo/README.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/examples/visual_n170/00x__n170_run_experiment.py:
--------------------------------------------------------------------------------
1 | """
2 | N170 run experiment
3 | ===============================
4 |
5 | This example demonstrates the initiation of an EEG stream with eeg-expy, and how to run
6 | an experiment.
7 |
8 | """
9 |
10 | ###################################################################################################
11 | # Setup
12 | # ---------------------
13 | #
14 | # Imports
15 | from eegnb import generate_save_fn
16 | from eegnb.devices.eeg import EEG
17 | from eegnb.experiments import VisualN170
18 |
19 | # Define some variables
20 | board_name = "muse2" # board name
21 | experiment_name = "visual_n170" # experiment name
22 | subject_id = 0 # test subject id
23 | session_nb = 0 # session number
24 | record_duration = 120 # recording duration
25 |
26 | # generate save path
27 | save_fn = generate_save_fn(board_name, experiment_name, subject_id, session_nb)
28 |
29 | # create device object
30 | eeg_device = EEG(device=board_name)
31 |
32 | # Experiment type
33 | experiment = VisualN170(duration=record_duration, eeg=eeg_device, save_fn=save_fn, use_vr=False)
34 |
35 | ###################################################################################################
36 | # Run experiment
37 | # ---------------------
38 | #
39 | experiment.run()
40 |
41 | # Saved csv location
42 | print("Recording saved in", experiment.save_fn)
43 |
--------------------------------------------------------------------------------
/examples/visual_n170/01r__n170_viz.py:
--------------------------------------------------------------------------------
1 | """
2 | N170 Load and Visualize Data
3 | ===============================
4 |
5 | This example demonstrates loading, organizing, and visualizing ERP response data from the visual N170 experiment.
6 |
7 | Images of faces and houses are shown in a rapid serial visual presentation (RSVP) stream.
8 |
9 | The data used is the first subject and first session of the one of the eeg-expy N170 example datasets, recorded using the InteraXon MUSE EEG headset (2016 model).
10 | This session consists of six two-minute blocks of continuous recording.
11 |
12 | We first use the `fetch_datasets` to obtain a list of filenames. If these files are not already present
13 | in the specified data directory, they will be quickly downloaded from the cloud.
14 |
15 | After loading the data, we place it in an MNE `Epochs` object, and obtain the trial-averaged response.
16 |
17 | The final figure plotted at the end shows the N170 response ERP waveform.
18 |
19 | """
20 |
21 | ###################################################################################################
22 | # Setup
23 | # ---------------------
24 |
25 | # Some standard pythonic imports
26 | import os
27 | from matplotlib import pyplot as plt
28 | from collections import OrderedDict
29 | import warnings
30 | warnings.filterwarnings('ignore')
31 |
32 | # MNE functions
33 | from mne import Epochs,find_events
34 |
35 | # EEG-Notebooks functions
36 | from eegnb.analysis.analysis_utils import load_data,plot_conditions
37 | from eegnb.datasets import fetch_dataset
38 |
39 | # sphinx_gallery_thumbnail_number = 3
40 |
41 | ###################################################################################################
42 | # Load Data
43 | # ---------------------
44 | #
45 | # We will use the eeg-expy N170 example dataset
46 | #
47 | # Note that if you are running this locally, the following cell will download
48 | # the example dataset, if you do not already have it.
49 | #
50 |
51 | ###################################################################################################
52 |
53 | eegnb_data_path = os.path.join(os.path.expanduser('~/'),'.eegnb', 'data')
54 | n170_data_path = os.path.join(eegnb_data_path, 'visual-N170', 'eegnb_examples')
55 |
56 | # If dataset hasn't been downloaded yet, download it
57 | if not os.path.isdir(n170_data_path):
58 | fetch_dataset(data_dir=eegnb_data_path, experiment='visual-N170', site='eegnb_examples');
59 |
60 | subject = 1
61 | session = 1
62 | raw = load_data(subject,session,
63 | experiment='visual-N170', site='eegnb_examples', device_name='muse2016_bfn',
64 | data_dir = eegnb_data_path)
65 |
66 | ###################################################################################################
67 | # Visualize the power spectrum
68 | # ----------------------------
69 |
70 | raw.plot_psd()
71 |
72 | ###################################################################################################
73 | # Filtering
74 | # ----------------------------
75 |
76 | raw.filter(1,30, method='iir')
77 | raw.plot_psd(fmin=1, fmax=30);
78 |
79 | ###################################################################################################
80 | # Epoching
81 | # ----------------------------
82 |
83 | # Create an array containing the timestamps and type of each stimulus (i.e. face or house)
84 | events = find_events(raw)
85 | event_id = {'House': 1, 'Face': 2}
86 |
87 | # Create an MNE Epochs object representing all the epochs around stimulus presentation
88 | epochs = Epochs(raw, events=events, event_id=event_id,
89 | tmin=-0.1, tmax=0.6, baseline=None,
90 | reject={'eeg': 5e-5}, preload=True,
91 | verbose=False, picks=[0,1,2,3])
92 | print('sample drop %: ', (1 - len(epochs.events)/len(events)) * 100)
93 | epochs
94 |
95 | ###################################################################################################
96 | # Epoch average
97 | # ----------------------------
98 |
99 | conditions = OrderedDict()
100 | #conditions['House'] = [1]
101 | #conditions['Face'] = [2]
102 | conditions['House'] = ['House']
103 | conditions['Face'] = ['Face']
104 | diffwav = ('Face', 'House')
105 |
106 | fig, ax = plot_conditions(epochs, conditions=conditions,
107 | ci=97.5, n_boot=1000, title='',
108 | diff_waveform=diffwav,
109 | channel_order=[1,0,2,3])
110 | # reordering of epochs.ch_names according to [[0,2],[1,3]] of subplot axes
111 |
112 | # Manually adjust the ylims
113 | #for i in [0,2]: ax[i].set_ylim([-0.5e6,0.5e6])
114 | #for i in [1,3]: ax[i].set_ylim([-1.5e6,2.5e6])
115 | plt.tight_layout()
116 |
117 |
--------------------------------------------------------------------------------
/examples/visual_n170/README.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/examples/visual_n170/test_md_file.md:
--------------------------------------------------------------------------------
1 | # Test Markown File
2 |
3 | This is a test markdown file
4 |
5 | # Second section
6 |
7 | For cases when we just want a straight up documentation page
8 |
9 |
10 | ## Blah
11 |
12 | Etc etc
13 |
14 |
--------------------------------------------------------------------------------
/examples/visual_p300/00x__p300_run_experiment.py:
--------------------------------------------------------------------------------
1 | """
2 | P300 run experiment
3 | ===============================
4 |
5 | This example demonstrates the initiation of an EEG stream with eeg-expy, and how to run
6 | an experiment.
7 |
8 | """
9 |
10 | ###################################################################################################
11 | # Setup
12 | # ---------------------
13 | #
14 | # Imports
15 | import os
16 | from eegnb import generate_save_fn
17 | from eegnb.devices.eeg import EEG
18 | from eegnb.experiments import VisualP300
19 |
20 | # Define some variables
21 | board_name = "muse2"
22 | experiment = "visual_p300"
23 | subject_id = 0
24 | session_nb = 0
25 | record_duration = 120
26 |
27 | ###################################################################################################
28 | # Initiate EEG device
29 | # ---------------------
30 | #
31 | # Start EEG device
32 | eeg_device = EEG(device=board_name)
33 |
34 | # Create save file name
35 | save_fn = generate_save_fn(board_name, experiment, subject_id, session_nb)
36 | print(save_fn)
37 |
38 | ###################################################################################################
39 | # Run experiment
40 | # ---------------------
41 | #
42 | # Create Experiment Object
43 | p300 = VisualP300(duration=record_duration, eeg=eeg_device, save_fn=save_fn)
44 | p300.run()
45 |
--------------------------------------------------------------------------------
/examples/visual_p300/01r__p300_viz.py:
--------------------------------------------------------------------------------
1 | """
2 | P300 Load and Visualize Data
3 | ===============================
4 |
5 | This example demonstrates loading, organizing, and visualizing ERP response data from the visual P300 experiment. The experiment uses a visual oddball paradigm. Images of cats and dogs are shwn in a rapid serial visual presentation (RSVP) stream, with cats and dogs categorized respectively as 'targets' or 'non-targets', according to which has high or low probability of occurring, respectively.
6 |
7 | The data used is the first subject and first session of the one of the eeg-expy P300 example datasets, recorded using the InteraXon MUSE EEG headset (2016 model). This session consists of six two-minute blocks of continuous recording.
8 |
9 | We first use the `fetch_datasets` to obtain a list of filenames. If these files are not already present
10 | in the specified data directory, they will be quickly downloaded from the cloud.
11 |
12 | After loading the data, we place it in an MNE `Epochs` object, and obtain the trial-averaged response.
13 |
14 | The final figure plotted at the end shows the P300 response ERP waveform.
15 |
16 | """
17 |
18 | ###################################################################################################
19 | # Setup
20 | # ---------------------
21 |
22 | # Some standard pythonic imports
23 | import os
24 | from matplotlib import pyplot as plt
25 | from collections import OrderedDict
26 | import warnings
27 | warnings.filterwarnings('ignore')
28 |
29 | # MNE functions
30 | from mne import Epochs,find_events
31 |
32 | # EEG-Notebooks functions
33 | from eegnb.analysis.analysis_utils import load_data,plot_conditions
34 | from eegnb.datasets import fetch_dataset
35 |
36 | # sphinx_gallery_thumbnail_number = 3
37 |
38 | ###################################################################################################
39 | # Load Data
40 | # ---------------------
41 | #
42 | # We will use the eeg-expy N170 example dataset
43 | #
44 | # Note that if you are running this locally, the following cell will download
45 | # the example dataset, if you do not already have it.
46 | #
47 | ###################################################################################################
48 |
49 | eegnb_data_path = os.path.join(os.path.expanduser('~/'),'.eegnb', 'data')
50 | p300_data_path = os.path.join(eegnb_data_path, 'visual-P300', 'eegnb_examples')
51 |
52 | # If dataset hasn't been downloaded yet, download it
53 | if not os.path.isdir(p300_data_path):
54 | fetch_dataset(data_dir=eegnb_data_path, experiment='visual-P300', site='eegnb_examples');
55 |
56 |
57 | subject = 1
58 | session = 1
59 | raw = load_data(subject,session,
60 | experiment='visual-P300', site='eegnb_examples', device_name='muse2016',
61 | data_dir = eegnb_data_path)
62 |
63 | ###################################################################################################
64 |
65 | ###################################################################################################
66 | # Visualize the power spectrum
67 | # ----------------------------
68 |
69 | raw.plot_psd()
70 |
71 | ###################################################################################################
72 | # Filteriing
73 | # ----------------------------
74 |
75 | raw.filter(1,30, method='iir')
76 | raw.plot_psd(fmin=1, fmax=30);
77 |
78 | ###################################################################################################
79 | # Epoching
80 | # ----------------------------
81 |
82 | # Create an array containing the timestamps and type of each stimulus (i.e. face or house)
83 | events = find_events(raw)
84 | event_id = {'non-target': 1, 'target': 2}
85 | epochs = Epochs(raw, events=events, event_id=event_id,
86 | tmin=-0.1, tmax=0.8, baseline=None, reject={'eeg': 100e-6}, preload=True,
87 | verbose=False, picks=[0,1,2,3])
88 |
89 | print('sample drop %: ', (1 - len(epochs.events)/len(events)) * 100)
90 |
91 | ###################################################################################################
92 | # Epoch average
93 | # ----------------------------
94 |
95 | conditions = OrderedDict()
96 | conditions['non-target'] = ['non-target']
97 | conditions['target'] = ['target']
98 | diffwav = ["non-target", "target"]
99 |
100 | fig, ax = plot_conditions(epochs, conditions=conditions,
101 | ci=97.5, n_boot=1000, title='',
102 | channel_order=[1,0,2,3],ylim=[-2E6,2.5E6],
103 | diff_waveform = diffwav)
104 |
105 | # Manually adjust the ylims
106 | for i in [0,2]: ax[i].set_ylim([-0.5e6,0.5e6])
107 | for i in [1,3]: ax[i].set_ylim([-1.5e6,2.5e6])
108 |
109 | plt.tight_layout()
110 |
111 |
--------------------------------------------------------------------------------
/examples/visual_p300/02r__p300_decoding.py:
--------------------------------------------------------------------------------
1 | """
2 | P300 Decoding
3 | ===============================
4 |
5 | This example runs a set of machine learning algorithms on the P300 cats/dogs
6 | dataset, and compares them in terms of classification performance.
7 |
8 | The data used is exactly the same as in the P300 `load_and_visualize` example.
9 |
10 | """
11 |
12 | ###################################################################################################
13 | # Setup
14 | # ---------------------
15 |
16 | # Some standard pythonic imports
17 | import warnings
18 | warnings.filterwarnings('ignore')
19 | import os,numpy as np,pandas as pd
20 | from collections import OrderedDict
21 | import seaborn as sns
22 | from matplotlib import pyplot as plt
23 |
24 | # MNE functions
25 | from mne import Epochs,find_events
26 | from mne.decoding import Vectorizer
27 |
28 | # EEG-Notebooks functions
29 | from eegnb.analysis.analysis_utils import load_data
30 | from eegnb.datasets import fetch_dataset
31 |
32 | # Scikit-learn and Pyriemann ML functionalities
33 | from sklearn.pipeline import make_pipeline
34 | from sklearn.linear_model import LogisticRegression
35 | from sklearn.preprocessing import StandardScaler
36 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
37 | from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit
38 | from pyriemann.estimation import ERPCovariances, XdawnCovariances, Xdawn
39 | from pyriemann.tangentspace import TangentSpace
40 | from pyriemann.classification import MDM
41 |
42 | ###################################################################################################
43 | # Load Data
44 | # ---------------------
45 | #
46 | # ( See the P300 `load_and_visualize` example for further description of this)
47 | #
48 |
49 | eegnb_data_path = os.path.join(os.path.expanduser('~/'),'.eegnb', 'data')
50 | p300_data_path = os.path.join(eegnb_data_path, 'visual-P300', 'eegnb_examples')
51 |
52 | # If dataset hasn't been downloaded yet, download it
53 | if not os.path.isdir(p300_data_path):
54 | fetch_dataset(data_dir=eegnb_data_path, experiment='visual-P300', site='eegnb_examples')
55 |
56 |
57 | subject = 1
58 | session = 1
59 | raw = load_data(subject,session,
60 | experiment='visual-P300', site='eegnb_examples', device_name='muse2016',
61 | data_dir = eegnb_data_path)
62 |
63 | ###################################################################################################
64 |
65 | ###################################################################################################
66 | # Filteriing
67 | # ----------------------------
68 |
69 | raw.filter(1,30, method='iir')
70 |
71 | ###################################################################################################
72 | # Epoching
73 | # ----------------------------
74 |
75 | # Create an array containing the timestamps and type of each stimulus (i.e. face or house)
76 | events = find_events(raw)
77 | event_id = {'Non-Target': 1, 'Target': 2}
78 | epochs = Epochs(raw, events=events, event_id=event_id,
79 | tmin=-0.1, tmax=0.8, baseline=None, reject={'eeg': 100e-6}, preload=True, verbose=False, picks=[0,1,2,3])
80 |
81 | print('sample drop %: ', (1 - len(epochs.events)/len(events)) * 100)
82 |
83 | epochs
84 |
85 | ###################################################################################################
86 | # Classfication
87 | # ----------------------------
88 |
89 | clfs = OrderedDict()
90 | clfs['Vect + LR'] = make_pipeline(Vectorizer(), StandardScaler(), LogisticRegression())
91 | clfs['Vect + RegLDA'] = make_pipeline(Vectorizer(), LDA(shrinkage='auto', solver='eigen'))
92 | clfs['Xdawn + RegLDA'] = make_pipeline(Xdawn(2, classes=[1]), Vectorizer(), LDA(shrinkage='auto', solver='eigen'))
93 |
94 | clfs['XdawnCov + TS'] = make_pipeline(XdawnCovariances(estimator='oas'), TangentSpace(), LogisticRegression())
95 | clfs['XdawnCov + MDM'] = make_pipeline(XdawnCovariances(estimator='oas'), MDM())
96 |
97 |
98 | clfs['ERPCov + TS'] = make_pipeline(ERPCovariances(), TangentSpace(), LogisticRegression())
99 | clfs['ERPCov + MDM'] = make_pipeline(ERPCovariances(), MDM())
100 |
101 | # format data
102 | epochs.pick_types(eeg=True)
103 | X = epochs.get_data() * 1e6
104 | times = epochs.times
105 | y = epochs.events[:, -1]
106 |
107 | # define cross validation
108 | cv = StratifiedShuffleSplit(n_splits=10, test_size=0.25, random_state=42)
109 |
110 | # run cross validation for each pipeline
111 | auc = []
112 | methods = []
113 | for m in clfs:
114 | res = cross_val_score(clfs[m], X, y==2, scoring='roc_auc', cv=cv, n_jobs=-1)
115 | auc.extend(res)
116 | methods.extend([m]*len(res))
117 |
118 | results = pd.DataFrame(data=auc, columns=['AUC'])
119 | results['Method'] = methods
120 |
121 | plt.figure(figsize=[8,4])
122 | sns.barplot(data=results, x='AUC', y='Method')
123 | plt.xlim(0.2, 0.85)
124 | sns.despine()
125 |
--------------------------------------------------------------------------------
/examples/visual_p300/README.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/examples/visual_ssvep/00x__ssvep_run_experiment.py:
--------------------------------------------------------------------------------
1 | """
2 | SSVEP run experiment
3 | ===============================
4 |
5 | This example demonstrates the initiation of an EEG stream with eeg-expy, and how to run
6 | an experiment.
7 |
8 | """
9 |
10 | ###################################################################################################
11 | # Setup
12 | # ---------------------
13 | #
14 | # Imports
15 | import os
16 | from eegnb import generate_save_fn
17 | from eegnb.devices.eeg import EEG
18 | from eegnb.experiments import VisualSSVEP
19 |
20 | # Define some variables
21 | board_name = "muse2"
22 | experiment = "visual_ssvep"
23 | subject_id = 0
24 | session_nb = 0
25 | record_duration = 120
26 |
27 | ###################################################################################################
28 | # Initiate EEG device
29 | # ---------------------
30 | #
31 | # Start EEG device
32 | eeg_device = EEG(device=board_name)
33 |
34 | # Create save file name
35 | save_fn = generate_save_fn(board_name, experiment, subject_id, session_nb)
36 | print(save_fn)
37 |
38 | ###################################################################################################
39 | # Run experiment
40 | # ---------------------
41 | #
42 | ssvep = VisualSSVEP(duration=record_duration, eeg=eeg_device, save_fn=save_fn)
43 | ssvep.run()
44 |
--------------------------------------------------------------------------------
/examples/visual_ssvep/README.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.pytest.ini_options]
2 | minversion = "6.0"
3 | addopts = """
4 | --cov=eegnb
5 | --cov-report=term
6 | --cov-report=xml
7 | --cov-report=html
8 | --nbval-lax
9 | --current-env
10 | --ignore-glob 'examples/**.py'
11 | --ignore-glob '**/baseline_task.py'
12 | """
13 | testpaths = [
14 | "eegnb",
15 | "tests",
16 | #"examples",
17 | ]
18 | python_files = ["*.py", "*.ipynb"]
19 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 |
2 | ## ~~ Analysis Requirements ~~
3 |
4 | scikit-learn>=0.23.2
5 | pandas>=1.1.4
6 | numpy>=1.19.4,<1.24 # due to outdated libs not changing the names after: https://github.com/numpy/numpy/pull/22607
7 | mne>=0.20.8
8 | seaborn>=0.11.0
9 | pyriemann>=0.2.7
10 | jupyter
11 | gdown>=4.5.1
12 | matplotlib>=3.3.3
13 | pysocks>=1.7.1
14 | pyserial>=3.5
15 | h5py>=3.1.0
16 | pytest-shutil
17 | pyo>=1.0.3; platform_system == "Linux"
18 | #pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to
19 | # a specific version prevents an endless dependency resolution loop.
20 | pyobjc==7.3; sys_platform == 'darwin'
21 | airium>=0.1.0
22 | attrdict>=2.0.1
23 | attrdict3
24 |
25 |
26 | ## ~~ Streaming Requirements ~~
27 |
28 | muselsl>=2.0.2
29 | # Upgrade from 1.10.5 to 1.16.2 so the arm64 lib is available to macOS Apple Silicon for preventing error:
30 | # pylsl/liblsl64.dylib' (mach-o file, but is an incompatible architecture (have 'x86_64', need 'arm64e' or 'arm64'))
31 | pylsl==1.16.2
32 | brainflow>=4.8.2
33 | pysocks>=1.7.1
34 | pyserial>=3.5
35 | h5py>=3.1.0
36 | pytest-shutil
37 | pyo>=1.0.3; platform_system == "Linux"
38 | #pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to
39 | # a specific version prevents an endless dependency resolution loop.
40 | pyobjc==7.3; sys_platform == 'darwin'
41 | #Removed keyboard dependency due segmentation fault on Apple Silicon: https://github.com/boppreh/keyboard/issues/507
42 | pynput
43 | airium>=0.1.0
44 | attrdict>=2.0.1
45 | attrdict3
46 | click
47 |
48 |
49 | ## ~~ Stimpres Requirements ~~
50 |
51 | #pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to
52 | # a specific version prevents an endless dependency resolution loop.
53 | pyobjc==7.3; sys_platform == 'darwin'
54 | #upgrade psychopy to use newer wxpython dependency which is prebuilt for m1 support.
55 | psychopy==2023.2.2
56 | psychtoolbox
57 | scikit-learn>=0.23.2
58 | pandas>=1.1.4
59 | numpy>=1.19.4,<1.24 # due to outdated libs not changing the names after: https://github.com/numpy/numpy/pull/22607
60 | mne>=0.20.8
61 | seaborn>=0.11.0
62 | pysocks>=1.7.1
63 | pyserial>=3.5
64 | h5py>=3.1.0
65 | pytest-shutil
66 | pyo>=1.0.3; platform_system == "Linux"
67 | airium>=0.1.0
68 | attrdict>=2.0.1
69 | attrdict3
70 |
71 | # pywinhook needs some special treatment since there are only wheels on PyPI for Python 3.7-3.8, and building requires special tools (swig, VS C++ tools)
72 | # See issue: https://github.com/NeuroTechX/eeg-notebooks/issues/29
73 | pywinhook>=1.6.0 ; platform_system == "Windows" and (python_version == "3.7" or python_version == "3.8")
74 | pywinhook @ https://github.com/ActivityWatch/wheels/raw/master/pywinhook/pyWinhook-1.6.2-cp39-cp39-win_amd64.whl ; platform_system == "Windows" and python_version == "3.9"
75 |
76 | # pyglet downgrade to prevent threadmode warning on windows
77 | # See issue: https://github.com/psychopy/psychopy/issues/2876
78 | pyglet==1.4.11 ; platform_system == "Windows"
79 |
80 | # Oculus/Quest VR support - currently only supported on Windows.
81 | psychxr>=0.2.4rc2; platform_system == "Windows"
82 |
83 |
84 |
85 | ## ~~ Docsbuild Requirements ~~
86 | recommonmark
87 | brainflow
88 | numpydoc
89 |
90 | # Docs
91 | sphinx
92 | sphinx-gallery
93 | sphinx_rtd_theme
94 | sphinx-tabs
95 | sphinx-copybutton
96 | sphinxcontrib-httpdomain
97 | numpydoc
98 | recommonmark
99 | versioneer
100 | rst2pdf
101 | docutils
102 |
103 | # Tests
104 | mypy
105 | pytest
106 | pytest-cov
107 | nbval
108 |
109 | # Types
110 | types-requests
111 |
112 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [versioneer]
2 | VCS = git
3 | style = pep440
4 | versionfile_source = eegnb/_version.py
5 | versionfile_build = _version.py
6 | tag_prefix =
7 | parentdir_prefix = eegnb-
8 |
9 | [mypy]
10 | # FIXME: ./examples isn't recursively traversed (see the fix in: https://github.com/python/mypy/pull/9614)
11 | # Once mypy releases a new version we can use, this should work again.
12 | files = eegnb, examples
13 | ignore_missing_imports = true
14 | # FIXME: Once we have type annotations under control, this should be reenabled.
15 | #check_untyped_defs = true
16 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from setuptools import setup, find_packages
4 |
5 | with open("README.rst", "r") as fh:
6 | long_description = fh.read()
7 |
8 | fptxt = open('requirements.txt', 'r').read()
9 | install_requires_analysis = fptxt.split('## ~~ Analysis Requirements ~~')[1].split('## ~~')[0].splitlines()[1:]
10 | install_requires_streaming = fptxt.split('## ~~ Streaming Requirements ~~')[1].split('## ~~')[0].splitlines()[1:]
11 | install_requires_stimpres = fptxt.split('## ~~ Stimpres Requirements ~~')[1].split('## ~~')[0].splitlines()[1:]
12 | install_requires_docsbuild = fptxt.split('## ~~ Docsbuild Requirements ~~')[1].split('## ~~')[0].splitlines()[1:]
13 |
14 | setup(
15 | name="eeg-expy",
16 | version="0.2",
17 | author="John David Griffiths",
18 | author_email="j.davidgriffiths@gmail.com",
19 | description='python library for eeg cognitive neuroscience experiments',
20 | keywords='eeg, cognitive neuroscience, experiments, evoked response, auditory, visual',
21 | long_description=long_description,
22 | long_description_content_type="text/markdown",
23 | install_requires=[ install_requires_analysis ], # base dependencies
24 | extras_require={
25 | 'docsbuild': install_requires_docsbuild,
26 | 'streaming': install_requires_streaming,
27 | 'stimpres': install_requires_stimpres,
28 | 'streamstim': install_requires_streaming + install_requires_stimpres,
29 | 'full': install_requires_docsbuild + install_requires_streaming + install_requires_stimpres
30 | },
31 | url='https://github.com/NeuroTechX/eeg-expy',
32 | license="BSD (3-clause)",
33 | entry_points={"console_scripts": ["eegnb=eegnb.cli.__main__:main",
34 | "eegexpy=eegnb.cli.__main__:main"]},
35 | packages=find_packages(),
36 | classifiers=[
37 | "Programming Language :: Python :: 3",
38 | "License :: OSI Approved :: MIT License",
39 | "Operating System :: OS Independent",
40 | ],
41 | python_requires='>=3.8',
42 | )
43 |
--------------------------------------------------------------------------------
/tests/test_empty.py:
--------------------------------------------------------------------------------
1 | def test_empty():
2 | assert True
3 |
--------------------------------------------------------------------------------
/tests/test_run_experiments.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | ABOUT
4 | ======
5 |
6 | This is a short test script for the primary eeg-expy supported experiments.
7 |
8 | It is intended as a quick manual test that everything is in working order with the eeg-expy installation.
9 |
10 | ( Note that it is far from comprehensive in that regard, however. More coverage and comprehensiveness will be added, primarily if/when it becomes useful to the eeg-expy developers )
11 |
12 | This does a minimal import and run of several experiments
13 |
14 | Note that although this is a .py file in the `tests` folder, it is not directly part of the CI test suite, which is designed around the github CI and sphinx-gallery docs build side of things.
15 |
16 | The reason for this is that it is not straightforward to incorporate tests of EEG device data streaming (which cannot be done in the cloud) or stimulus presentation in to the github CI. And in any case, both of these are highly dependent on the hardware being used (OS and specs of the computer running eeg-expy, the EEG device, etc.).
17 |
18 | So this script serves as a separate line of installation tests.
19 |
20 | If you are using eeg-expy, and especially if you are a developer / contributor / general technically savvy person, this script should be used regularly to check things are working ok. It should be checked, and info fed back via the github repo, on as many OS and device setup configurations as possible.
21 |
22 |
23 | USAGE:
24 | =======
25 |
26 | Currently this script is very minimal.
27 |
28 | At some point we will likely make it a bit more structured wrt options etc.
29 |
30 | The high-level usage instruction is:
31 |
32 | 1. COPY THIS SCRIPT
33 | 2. EDIT THE COPY
34 | 3. RUN THE EDITED COPY
35 |
36 | (because if you edit the original script then you will have issues when updating the library with `git pull`)
37 |
38 | Your edits should be restricted to the config dict at the top.
39 |
40 | The key parts of this are
41 |
42 | - Auditory device configuration
43 | - EEG device and bluetooth streaming configuration (not yet implemented)
44 |
45 | You may need to do some trial-and-error testing to identify the correct info to add there.
46 |
47 | Relying on default configurations and automatically detected parameters is not recommended
48 |
49 |
50 | """
51 |
52 | from psychopy import prefs, sound
53 |
54 | test_config = dict(run_n170 = True,
55 | run_p300 = True,
56 | run_ssvep = True,
57 | run_aob = True,
58 | audio_device = prefs.hardware['audioDevice'],
59 | audio_lib = prefs.hardware['audioLib'],
60 | test_duration = 10,
61 | fullscreen = False
62 | )
63 | # -----------------------------------------------------------------------
64 | # ***EDIT THIS SECTION ONLY*** to specify any non-default config entries
65 | test_config['audio_device'] = "Speakers (Apple Audio Device)" # see `sound.getDevices()`
66 | test_config['audio_lib'] = "ptb"
67 | # ----------------------------------------------------------------------
68 |
69 | # ---------------------------------------
70 | # CONFIG NOTES:
71 | #
72 | # - Windows 11 on iMAC (through bootcamp):
73 | # test_config['audio_device'] = "Speakers (Apple Audio Device)"
74 | # test_config['audio_lib'] = 'ptb'
75 | #
76 | # ---------------------------------------
77 |
78 | tc = test_config
79 |
80 | assert tc['audio_device'] in sound.getDevices()
81 | d = tc['test_duration']
82 |
83 |
84 | if tc['run_n170']:
85 | from eegnb.experiments.visual_n170.n170 import VisualN170
86 | expt = VisualN170(duration=d)
87 | expt.use_fullscr = tc['fullscreen']
88 | expt.run()
89 |
90 | if tc['run_p300']:
91 | from eegnb.experiments.visual_p300.p300 import VisualP300
92 | expt = VisualP300(duration=d)
93 | expt.use_fullscr = tc['fullscreen']
94 | expt.run()
95 |
96 | if tc['run_ssvep']:
97 | from eegnb.experiments.visual_ssvep.ssvep import VisualSSVEP
98 | expt = VisualSSVEP(duration=d)
99 | expt.use_fullscr = tc['fullscreen']
100 | expt.run()
101 |
102 | if tc['run_aob']:
103 | from eegnb.experiments.auditory_oddball.aob import AuditoryOddball
104 | prefs.hardware['audioDevice'] = tc['audio_device']
105 | prefs.hardware['audioLib'] = tc['audio_lib']
106 | expt = AuditoryOddball(duration=d)
107 | expt.use_fullscr = tc['fullscreen']
108 | expt.run()
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------