├── .coveragerc ├── .github └── workflows │ ├── ci.yml │ └── python-publish.yml ├── .gitignore ├── .readthedocs.yml ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── JOSS ├── paper.bib └── paper.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── Tests ├── pytest.ini ├── test_PseudoDevice.py ├── test_Pytorch.py ├── test_Simple.py ├── test_Sklearn.py ├── test_Tensorflow.py ├── test_cli.py ├── test_zDualStream.py └── test_zSimpleOtherFeatures.py ├── appveyor.yml ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── BackgroundInformation │ ├── Contributing.rst │ ├── Epoch_Timing.rst │ ├── Examples.rst │ ├── Feature_Selection.rst │ ├── Getting_Started.rst │ ├── Pseudo_Device.rst │ ├── Theory_Operation.rst │ └── What_is_PyBCI.rst │ ├── Images │ ├── flowchart │ │ └── Flowchart.svg │ ├── operation.svg │ ├── pyBCI.png │ ├── pyBCITitle.png │ ├── pyBCITitle.svg │ └── splitEpochs │ │ ├── example1.png │ │ ├── example1split0.png │ │ └── example1split50.png │ ├── api │ ├── Configurations.rst │ ├── LSLScanner.rst │ ├── PseudoDeviceController.rst │ └── PyBCI.rst │ ├── conf.py │ └── index.rst ├── pybci ├── CliTests │ ├── __init__.py │ ├── testPyTorch.py │ ├── testSimple.py │ ├── testSklearn.py │ └── testTensorflow.py ├── Configuration │ ├── EpochSettings.py │ ├── FeatureSettings.py │ ├── PseudoDeviceSettings.py │ └── __init__.py ├── Examples │ ├── ArduinoHandGrasp │ │ ├── ArduinoToLSL.py │ │ ├── MarkerMaker.py │ │ ├── README.md │ │ ├── ServoControl.ino │ │ ├── testArduinoHand.py │ │ └── testArduinoPytorch.py │ ├── MultimodalPupilLabsEEG │ │ └── testMultimodal.py │ ├── PupilLabsRightLeftEyeClose │ │ ├── README.md │ │ ├── RightLeftMarkers.py │ │ └── bciGazeExample.py │ ├── README.md │ ├── separatePseudoDevice.py │ ├── testEpochTimingsConfig.py │ ├── testPyTorch.py │ ├── testRaw.py │ ├── testSimple.py │ ├── testSklearn.py │ └── testTensorflow.py ├── ThreadClasses │ ├── ClassifierThread.py │ ├── FeatureProcessorThread.py │ ├── MarkerThread.py │ ├── OptimisedDataReceiverThread.py │ └── __init__.py ├── Utils │ ├── Classifier.py │ ├── FeatureExtractor.py │ ├── LSLScanner.py │ ├── Logger.py │ ├── PseudoDevice.py │ └── __init__.py ├── __init__.py ├── cli.py ├── pybci.py └── version.py ├── pyproject.toml ├── requirements-devel.txt └── requirements.txt /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | cli.py 4 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main # or your default branch name 7 | pull_request: 8 | branches: 9 | - main # or your default branch name 10 | 11 | jobs: 12 | test: 13 | runs-on: windows-latest 14 | strategy: 15 | matrix: 16 | python-version: [3.11] 17 | 18 | steps: 19 | - name: Checkout code 20 | uses: actions/checkout@v2 21 | 22 | - name: Set up Python 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: Update PATH and Install Dependencies 28 | run: | 29 | echo "Adding Python to PATH" 30 | echo "$env:PYTHONPATH;${{ env.pythonLocation }}\Scripts;${{ env.pythonLocation }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 31 | pip install --upgrade pip 32 | pip install pytest pytest-timeout pytest-cov 33 | 34 | - name: Download and Setup liblsl 35 | run: | 36 | Invoke-WebRequest -Uri 'https://github.com/sccn/liblsl/releases/download/v1.16.2/liblsl-1.16.2-Win_amd64.zip' -OutFile 'liblsl.zip' 37 | Expand-Archive -Path liblsl.zip -DestinationPath liblsl 38 | Move-Item -Path .\liblsl\bin\lsl.dll -Destination ${{ env.pythonLocation }}\Lib\site-packages\ 39 | echo "PYLSL_LIB=${{ env.pythonLocation }}\Lib\site-packages\lsl.dll" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append 40 | 41 | - name: Additional Dependency Installation 42 | run: | 43 | pip uninstall -y urllib3 44 | pip install --upgrade urllib3>=2.0.5 45 | pip install ruff 46 | pip install . 47 | 48 | - name: Update Environment Variables 49 | run: | 50 | echo "Updating Environment Variables" 51 | $env:PYTHONPATH="${{ github.workspace }}" 52 | $env:PYLSL_LIB="${{ env.pythonLocation }}\Lib\site-packages\lsl.dll" 53 | 54 | 55 | - name: Debug Information 56 | run: | 57 | python --version 58 | pip list 59 | echo "PYTHONPATH: $PYTHONPATH" 60 | echo "PYLSL_LIB: $PYLSL_LIB" 61 | 62 | #- name: Run Tests 63 | # run: | 64 | # pytest Tests/ --cov=pybci --cov-report=xml -vv 65 | 66 | - name: Generate Report 67 | run: | 68 | coverage run --source=pybci/ --omit='pybci/CliTests/*,pybci/cli.py' -m pytest 69 | coverage xml -i 70 | #- name: Upload coverage to GitHub Artifacts 71 | # uses: actions/upload-artifact@v2 72 | # with: 73 | # name: coverage-report 74 | # path: coverage.xml 75 | #- name: List working directory 76 | # run: ls -al 77 | 78 | - name: Upload Coverage to Codecov 79 | uses: codecov/codecov-action@v2 80 | with: 81 | token: ${{ secrets.CODECOV_TOKEN }} # Not required for public repos 82 | # file: coverage.xml # Adjust this path if needed 83 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: pypi publish 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | release: 7 | types: [ created ] 8 | 9 | defaults: 10 | run: 11 | shell: bash 12 | 13 | jobs: 14 | deploy: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Set up Python 19 | uses: actions/setup-python@v3 20 | with: 21 | python-version: '3.x' 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install build 26 | pip install twine 27 | - name: Build package 28 | run: python -m build 29 | - name: Build and publish to TestPyPI 30 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 31 | env: 32 | TWINE_USERNAME: __token__ 33 | TWINE_PASSWORD: ${{ secrets.TEST_PYPI_API_TOKEN }} 34 | run: twine upload --repository-url https://test.pypi.org/legacy/ dist/* 35 | - name: Clean dist directory 36 | run: rm -rf dist/* 37 | - name: Build package for PyPI 38 | run: python -m build 39 | - name: Build and publish to PyPI 40 | if: github.event_name == 'release' && github.event.action == 'created' 41 | env: 42 | TWINE_USERNAME: __token__ 43 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 44 | run: twine upload dist/* 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | pybci/version.py 3 | docs/build -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.11" 13 | # You can also specify other tool versions: 14 | # nodejs: "19" 15 | # rust: "1.64" 16 | # golang: "1.19" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: docs/source/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | 26 | # Optionally declare the Python requirements required to build your docs 27 | python: 28 | install: 29 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: "1.2.0" 2 | authors: 3 | - family-names: Booth 4 | given-names: Liam 5 | orcid: "https://orcid.org/0000-0002-8749-9726" 6 | - family-names: Asghar 7 | given-names: Aziz 8 | orcid: "https://orcid.org/0000-0002-3735-4449" 9 | - family-names: Bateson 10 | given-names: Anthony 11 | orcid: "https://orcid.org/0000-0002-4780-4458" 12 | doi: 10.5281/zenodo.10245437 13 | message: If you use this software, please cite our article in the 14 | Journal of Open Source Software. 15 | preferred-citation: 16 | authors: 17 | - family-names: Booth 18 | given-names: Liam 19 | orcid: "https://orcid.org/0000-0002-8749-9726" 20 | - family-names: Asghar 21 | given-names: Aziz 22 | orcid: "https://orcid.org/0000-0002-3735-4449" 23 | - family-names: Bateson 24 | given-names: Anthony 25 | orcid: "https://orcid.org/0000-0002-4780-4458" 26 | date-published: 2023-12-02 27 | doi: 10.21105/joss.05706 28 | issn: 2475-9066 29 | issue: 92 30 | journal: Journal of Open Source Software 31 | publisher: 32 | name: Open Journals 33 | start: 5706 34 | title: "PyBCI: A Python Package for Brain-Computer Interface (BCI) 35 | Design" 36 | type: article 37 | url: "https://joss.theoj.org/papers/10.21105/joss.05706" 38 | volume: 8 39 | title: "PyBCI: A Python Package for Brain-Computer Interface (BCI) 40 | Design" 41 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Attribution 36 | 37 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct.html) 38 | 39 | For answers to common questions about this code of conduct, see [https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq) 40 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to PyBCI 2 | 3 | Thank you for your interest in contributing to PyBCI! We value your contribution and aim to make the process of contributing as smooth as possible. Here are the guidelines: 4 | 5 | ## Getting Started 6 | 7 | - **Communication:** For general questions or discussions, please open an issue on the [GitHub repository](https://github.com/LMBooth/pybci). 8 | - **Code of Conduct:** Please follow the [Code of Conduct](CODE_OF_CONDUCT.md) to maintain a respectful and inclusive environment. 9 | 10 | ## Contribution Process 11 | 12 | 1. **Fork the Repository:** Fork the [PyBCI repository](https://github.com/LMBooth/pybci) on GitHub to your own account. 13 | 2. **Clone the Forked Repository:** Clone your fork locally on your machine. 14 | 3. **Set Up the Development Environment:** Ensure you have all the necessary tools and dependencies installed to work on PyBCI. 15 | 4. **Create a New Branch:** Create a new branch for the specific issue or feature you are working on. 16 | 5. **Make Your Changes:** Make the necessary changes, adhering to the PyBCI code style and conventions. 17 | 6. **Run Tests:** Run the tests using `pytest` in the root directory to ensure that your changes do not break existing functionality. 18 | 7. **Update Documentation:** If your changes involve modifications to the API or the introduction of new features, update the documentation accordingly. 19 | 8. **Push Your Changes:** Push your changes to your fork on GitHub. 20 | 9. **Submit a Pull Request:** Submit a pull request from your fork to the PyBCI repository. 21 | 22 | ## Development Environment 23 | 24 | Ensure that you have installed the necessary dependencies by running: 25 | 26 | ```bash 27 | pip install -r requirements.txt 28 | ``` 29 | 30 | ### Running Tests 31 | To run the tests, execute: 32 | 33 | ```bash 34 | pytest 35 | ``` 36 | 37 | ### Coding Standards and Conventions 38 | Please adhere to the coding standards and conventions used throughout the PyBCI project. This includes naming conventions, comment styles, and code organization.`` 39 | 40 | ## Documentation 41 | We use Sphinx with ReadTheDocs for documentation. Ensure that you update the documentation if you change the API or introduce new features. 42 | 43 | ## Continuous Integration 44 | We use AppVeyor for continuous integration to maintain the stability of the codebase. Ensure that your changes pass the build on AppVeyor before submitting a pull request. The configuration is located in the appveyor.yml file in the project root. 45 | 46 | ## Licensing 47 | By contributing to PyBCI, you agree that your contributions will be licensed under the same license as the project, as specified in the LICENSE file. 48 | 49 | ## Acknowledgements 50 | Contributors will be acknowledged in a dedicated section of the documentation or project README. 51 | 52 | Thank you for contributing to PyBCI! 53 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use Ubuntu 22.04 LTS as the base image 2 | FROM ubuntu:22.04 3 | 4 | # Avoid prompts from apt-get 5 | ARG DEBIAN_FRONTEND=noninteractive 6 | 7 | # Update the package list 8 | RUN apt-get update -y 9 | 10 | # Install Python 3.10 11 | RUN apt-get install -y python3.10 python3-distutils python3-pip 12 | 13 | # Install additional dependencies 14 | RUN apt-get install -y cmake git libpugixml1v5 wget 15 | 16 | # Install pip for Python 3.10 17 | RUN wget https://bootstrap.pypa.io/get-pip.py && \ 18 | python3.10 get-pip.py && \ 19 | rm get-pip.py 20 | 21 | # Clone the pybci repository 22 | RUN git clone https://github.com/LMBooth/pybci.git /pybci 23 | 24 | # Set the working directory 25 | WORKDIR /pybci 26 | 27 | # Install Python dependencies 28 | RUN python3.10 -m pip install --upgrade urllib3>=2.0.5 29 | RUN python3.10 -m pip install . pytest pytest-timeout ruff 30 | 31 | # Download and install liblsl 32 | 33 | RUN wget https://github.com/sccn/liblsl/releases/download/v1.16.2/liblsl-1.16.2-jammy_amd64.deb -O liblsl.deb && \ 34 | dpkg -i liblsl.deb && \ 35 | rm liblsl.deb 36 | 37 | # Copy liblsl.so to the target directory 38 | RUN mkdir -p /usr/local/lib/python3.10/site-packages/pylsl/lib && \ 39 | cp /usr/lib/liblsl.so /usr/local/lib/python3.10/site-packages/pylsl/lib/ 40 | 41 | # Expose the necessary port (change if needed) 42 | EXPOSE 8080 43 | 44 | # Command to run when starting the container 45 | CMD ["/bin/bash"] 46 | -------------------------------------------------------------------------------- /JOSS/paper.bib: -------------------------------------------------------------------------------- 1 | @book{NEURIPS2019_9015, 2 | title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library}, 3 | booktitle = {Advances in Neural Information Processing Systems 32}, 4 | year = {2019}, 5 | pages = {8024{\textendash}8035}, 6 | publisher = {Curran Associates, Inc.}, 7 | organization = {Curran Associates, Inc.}, 8 | url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf}, 9 | author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith}, 10 | editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d{\textquoteright}Alch{\'e}-Buc and E. Fox and R. Garnett} 11 | } 12 | 13 | @book{oliphant2006guide, 14 | title={A guide to NumPy}, 15 | author={Oliphant, Travis E}, 16 | volume={1}, 17 | year={2006}, 18 | publisher={Trelgol Publishing USA} 19 | } 20 | 21 | @misc{tensorflow2015-whitepaper, 22 | title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems}, 23 | url={https://www.tensorflow.org/}, 24 | note={Software available from tensorflow.org}, 25 | author={ 26 | Mart\'{i}n~Abadi and 27 | Ashish~Agarwal and 28 | Paul~Barham and 29 | Eugene~Brevdo and 30 | Zhifeng~Chen and 31 | Craig~Citro and 32 | Greg~S.~Corrado and 33 | Andy~Davis and 34 | Jeffrey~Dean and 35 | Matthieu~Devin and 36 | Sanjay~Ghemawat and 37 | Ian~Goodfellow and 38 | Andrew~Harp and 39 | Geoffrey~Irving and 40 | Michael~Isard and 41 | Yangqing Jia and 42 | Rafal~Jozefowicz and 43 | Lukasz~Kaiser and 44 | Manjunath~Kudlur and 45 | Josh~Levenberg and 46 | Dandelion~Man\'{e} and 47 | Rajat~Monga and 48 | Sherry~Moore and 49 | Derek~Murray and 50 | Chris~Olah and 51 | Mike~Schuster and 52 | Jonathon~Shlens and 53 | Benoit~Steiner and 54 | Ilya~Sutskever and 55 | Kunal~Talwar and 56 | Paul~Tucker and 57 | Vincent~Vanhoucke and 58 | Vijay~Vasudevan and 59 | Fernanda~Vi\'{e}gas and 60 | Oriol~Vinyals and 61 | Pete~Warden and 62 | Martin~Wattenberg and 63 | Martin~Wicke and 64 | Yuan~Yu and 65 | Xiaoqiang~Zheng}, 66 | year={2015}, 67 | } 68 | 69 | @software{lsl, 70 | author = { 71 | Christian Kothe and 72 | Tristan Stenner and 73 | Chadwick Boulay and 74 | Matthew Grivich and 75 | David Medine and 76 | tobiasherzke and 77 | chausner and 78 | Giso Grimm and 79 | xloem and 80 | Arthur Biancarelli and 81 | Boris Mansencal and 82 | Paul Maanen and 83 | Jérémy Frey and 84 | Jidong Chen and 85 | kyucrane and 86 | Samuel Powell and 87 | Pierre Clisson and 88 | phfix}, 89 | title = {sccn/liblsl: v1.16.2}, 90 | month = may, 91 | year = 2023, 92 | publisher = {Zenodo}, 93 | version = {v1.16.2}, 94 | doi = {10.5281/zenodo.7978343}, 95 | url = {https://doi.org/10.5281/zenodo.7978343}, 96 | date = {2023-11-29} 97 | } 98 | 99 | @misc{vallat_antropy_2023, 100 | author = {Raphael Vallat}, 101 | title = {AntroPy: entropy and complexity of (EEG) time-series in {P}ython}, 102 | year = {2023}, 103 | publisher = {GitHub}, 104 | journal = {GitHub repository}, 105 | howpublished = {\url{https://github.com/raphaelvallat/antropy}}, 106 | note = {Python 3 package providing several time-efficient algorithms for computing the complexity of time-series, used for example to extract features from EEG signals} 107 | } 108 | 109 | @article{scikit-learn, 110 | title={Scikit-learn: Machine Learning in {P}ython}, 111 | author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. 112 | and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. 113 | and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and 114 | Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, 115 | journal={Journal of Machine Learning Research}, 116 | volume={12}, 117 | pages={2825--2830}, 118 | year={2011} 119 | } 120 | 121 | @article{2020SciPy-NMeth, 122 | author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and 123 | Haberland, Matt and Reddy, Tyler and Cournapeau, David and 124 | Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and 125 | Bright, Jonathan and {van der Walt}, St{\'e}fan J. and 126 | Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and 127 | Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and 128 | Kern, Robert and Larson, Eric and Carey, C J and 129 | Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and 130 | {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and 131 | Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and 132 | Harris, Charles R. and Archibald, Anne M. and 133 | Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and 134 | {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, 135 | title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific 136 | Computing in Python}}, 137 | journal = {Nature Methods}, 138 | year = {2020}, 139 | volume = {17}, 140 | pages = {261--272}, 141 | adsurl = {https://rdcu.be/b08Wh}, 142 | doi = {10.1038/s41592-019-0686-2}, 143 | } 144 | 145 | @article{2021bateson_asghar, 146 | author = {Bateson, Anthony and Asghar, Aziz}, 147 | year = {2021}, 148 | month = {05}, 149 | pages = {1-1}, 150 | title = {Development and Evaluation of a Smartphone-Based Electroencephalography (EEG) System}, 151 | volume = {PP}, 152 | journal = {IEEE Access}, 153 | doi = {10.1109/ACCESS.2021.3079992} 154 | } 155 | 156 | @article{OpenViBE, 157 | author={Renard, Yann and Lotte, Fabien and Gibert, Guillaume and Congedo, Marco and Maby, Emmanuel and Delannoy, Vincent and Bertrand, Olivier and Lécuyer, Anatole}, 158 | journal={Presence}, 159 | title={OpenViBE: An Open-Source Software Platform to Design, Test, and Use Brain–Computer Interfaces in Real and Virtual Environments}, 160 | year={2010}, 161 | volume={19}, 162 | number={1}, 163 | pages={35-53}, 164 | doi={10.1162/pres.19.1.35}} 165 | 166 | @article{BCI2000, 167 | author={Schalk, G. and McFarland, D.J. and Hinterberger, T. and Birbaumer, N. and Wolpaw, J.R.}, 168 | journal={IEEE Transactions on Biomedical Engineering}, 169 | title={BCI2000: a general-purpose brain-computer interface (BCI) system}, 170 | year={2004}, 171 | volume={51}, 172 | number={6}, 173 | pages={1034-1043}, 174 | doi={10.1109/TBME.2004.827072}} 175 | 176 | @article{BciPy, 177 | author = {Tab Memmott and Aziz Koçanaoğulları and Matthew Lawhead and Daniel Klee and Shiran Dudy and Melanie Fried-Oken and Barry Oken}, 178 | title = {BciPy: brain–computer interface software in {P}ython}, 179 | journal = {Brain-Computer Interfaces}, 180 | volume = {8}, 181 | number = {4}, 182 | pages = {137-153}, 183 | year = {2021}, 184 | publisher = {Taylor & Francis}, 185 | doi = {10.1080/2326263X.2021.1878727}, 186 | URL = { 187 | https://doi.org/10.1080/2326263X.2021.1878727 188 | }, 189 | eprint = { 190 | https://doi.org/10.1080/2326263X.2021.1878727 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /JOSS/paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'PyBCI: A Python Package for Brain-Computer Interface (BCI) Design' 3 | tags: 4 | - brain-computer-interface 5 | - python 6 | - bci 7 | - lsl 8 | - labstreaminglayer 9 | - machinelearning 10 | authors: 11 | - name: Liam Booth 12 | orcid: 0000-0002-8749-9726 13 | affiliation: "1" 14 | - name: Aziz Asghar 15 | orcid: 0000-0002-3735-4449 16 | affiliation: "2" 17 | - name: Anthony Bateson 18 | orcid: 0000-0002-4780-4458 19 | affiliation: "1" 20 | affiliations: 21 | - name: Faculty of Science and Engineering, University of Hull, United Kingdom. 22 | index: 1 23 | - name: Centre for Anatomical and Human Sciences, Hull York Medical School, University of Hull, United Kingdom. 24 | index: 2 25 | date: 26 June 2023 26 | bibliography: paper.bib 27 | --- 28 | 29 | # Summary: 30 | PyBCI is an open-source Python framework designed to streamline brain-computer interface (BCI) research. It offers a comprehensive platform for real-time data acquisition, labeling, classification and analysis. PyBCI is compatible with a wide range of time-series hardware and software data sources, thanks to its integration with the Lab Streaming Layer (LSL) protocol [@lsl]. 31 | 32 | # Statement of Need: 33 | 34 | BCI research brings together diverse fields like neuroscience, engineering, and data science, requiring specialized tools for data acquisition, feature extraction, and real-time analysis. Existing solutions may offer partial functionalities or be cumbersome to use, slowing down the pace of innovation. PyBCI addresses these challenges by providing a flexible, Python-based platform aimed at researchers and developers in the BCI domain. Assuming a foundational understanding of Python, the software serves as a comprehensive solution for both academic and industry professionals. 35 | 36 | Designed to be lightweight and user-friendly, PyBCI emphasizes quick customization and integrates seamlessly with the Lab Streaming Layer (LSL) for data acquisition and labeling [@lsl]. The platform incorporates reputable machine learning libraries like PyTorch [@NEURIPS2019_9015], TensorFlow [@tensorflow2015-whitepaper], and Scikit-learn [@scikit-learn], as well as feature extraction tools such as Antropy [@vallat_antropy_2023], NumPy [@oliphant2006guide], and SciPy [@2020SciPy-NMeth]. This integration allows users to focus more on their research and less on software development. While a detailed comparison with other software solutions will follow in the 'State of the Field' section, PyBCI sets itself apart through its emphasis on ease of use and technological integration. 37 | 38 | # State of the Field: 39 | 40 | There are a variety of BCI software packages available, each with its own advantages and limitations. Notable packages include solutions like OpenViBE [@OpenViBE] and BCI2000 [@BCI2000] that offer ease of use for those without programming expertise. BciPy [@BciPy], another Python-based platform, provides some level of customization but does not allow for the easy integration of popular machine learning libraries. In contrast, PyBCI offers seamless integration with a variety of machine learning libraries and feature extraction tools. This flexibility makes PyBCI a robust choice for researchers seeking a tailored, code-based approach to their BCI experiments. 41 | 42 | # Software functionality and performance: 43 | 44 | PyBCI accelerates the pace of BCI research by streamlining data collection, processing, and model analysis. It uses the Lab Streaming Layer (LSL) to handle data acquisition and labelling, allowing for real-time, synchronous data collection from multiple devices [@lsl]. Samples are collected in chunks from the LSL data streams and stored in pre-allocated NumPy arrays. When in training mode based on a configurable time window before and after each marker type. When in test mode, data is continuously processed and analysed based on the global epoch timing settings. For feature extraction, PyBCI leverages the power of NumPy [@oliphant2006guide], SciPy [@2020SciPy-NMeth], and Antropy [@vallat_antropy_2023], robust Python libraries known for their efficiency in handling numerical operations. Machine learning, a crucial component of BCI research, is facilitated with PyTorch [@NEURIPS2019_9015], SciKit-learn [@scikit-learn] and TensorFlow [@tensorflow2015-whitepaper]. Scikit-learn offers a wide range of traditional algorithms for classification, including things like regression, and clustering, while TensorFlow and PyTorch provide comprehensive ecosystems for developing and training bespoke deep learning machine learning models. 45 | 46 | # Impact: 47 | 48 | By providing a comprehensive, open-source platform for BCI research, PyBCI aims to advance the field. When integrated with off-the-shelf devices that are LSL-enabled, as well as with pre-built LSL data viewers and marker delivery systems, PyBCI facilitates the efficient design, testing, and implementation of advanced BCI experiments.The integration of LSL, PyTorch, Scikit-learn, TensorFlow, Antropy, NumPy, and SciPy into one platform simplifies the research process, encouraging innovation and collaboration in the field of brain computer/human machine interfaces. 49 | 50 | # Acknowledgements 51 | 52 | The io:bio mobile EEG device [@2021bateson_asghar] was used to create an initial port for streaming time-series physiological data in to the Lab Streaming Layer, so we could receive, analyse, record, and classify EMG, ECG and EEG data - enabling prior required experimentation to creating PyBCI. 53 | 54 | The work carried out by Christian Kothe creating the Lab Streaming Layer and continuous maintenance to the pylsl repository by Chadwick Boulay enables unification across many off shelf devices. Chadwick Boulay also gave helpful recommendations in the GitHub issue: https://github.com/labstreaminglayer/pylsl/issues/70. 55 | 56 | # References 57 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 LMBooth 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE -------------------------------------------------------------------------------- /Tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore::DeprecationWarning:tensorflow\.python\.framework\.dtypes:35 4 | ignore::RuntimeWarning:numpy\.core\.fromnumeric:3504 5 | ignore::RuntimeWarning:numpy\.core\._methods:129 -------------------------------------------------------------------------------- /Tests/test_PseudoDevice.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci.Utils.PseudoDevice import PseudoDeviceController 3 | 4 | def test_run_pseudo(): 5 | expected_duration = "~40 minutes" # Adjust this to your expected time 6 | print(f"\n\n=== WARNING: The tests are expected to take {expected_duration} total! ===\n") 7 | pd = PseudoDeviceController(is_multiprocessing=True) 8 | pd.BeginStreaming() 9 | time.sleep(5) 10 | pd.StopStreaming() 11 | time.sleep(5) 12 | 13 | pd = PseudoDeviceController(is_multiprocessing=False) 14 | pd.BeginStreaming() 15 | time.sleep(5) 16 | pd.StopStreaming() 17 | time.sleep(5) 18 | -------------------------------------------------------------------------------- /Tests/test_Pytorch.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | import time 3 | import torch 4 | from torch.utils.data import DataLoader, TensorDataset 5 | from torch import nn 6 | 7 | class SimpleNN(nn.Module): 8 | def __init__(self, input_size, hidden_size, num_classes): 9 | super(SimpleNN, self).__init__() 10 | self.fc1 = nn.Linear(input_size, hidden_size) 11 | self.bn1 = nn.BatchNorm1d(hidden_size) 12 | self.relu = nn.ReLU(inplace=True) # In-place operation 13 | self.fc2 = nn.Linear(hidden_size, hidden_size) 14 | self.bn2 = nn.BatchNorm1d(hidden_size) 15 | self.fc3 = nn.Linear(hidden_size, num_classes) 16 | 17 | def forward(self, x): 18 | out = self.fc1(x) 19 | if out.shape[0] > 1: # Skip BatchNorm if batch size is 1 20 | out = self.bn1(out) 21 | out = self.relu(out) 22 | out = self.fc2(out) 23 | if out.shape[0] > 1: # Skip BatchNorm if batch size is 1 24 | out = self.bn2(out) 25 | out = self.relu(out) 26 | out = self.fc3(out) 27 | return out 28 | def PyTorchModel(x_train, x_test, y_train, y_test): 29 | input_size = 2*8 # num of channels multipled by number of default features (rms and mean freq) 30 | hidden_size = 100 31 | num_classes = 4 # default in pseudodevice 32 | model = SimpleNN(input_size, hidden_size, num_classes) 33 | model.train() 34 | criterion = nn.CrossEntropyLoss() 35 | optimizer = torch.optim.Adam(model.parameters(), lr=0.001) 36 | epochs = 10 37 | train_data = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train).long()) 38 | train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True, drop_last=True) # Drop last incomplete batch 39 | for epoch in range(epochs): 40 | for inputs, labels in train_loader: 41 | optimizer.zero_grad() 42 | outputs = model(inputs) 43 | loss = criterion(outputs, labels) 44 | loss.backward() 45 | optimizer.step() 46 | model.eval() 47 | accuracy = 0 48 | with torch.no_grad(): 49 | test_outputs = model(torch.Tensor(x_test)) 50 | _, predicted = torch.max(test_outputs.data, 1) 51 | correct = (predicted == torch.Tensor(y_test).long()).sum().item() 52 | accuracy = correct / len(y_test) 53 | return accuracy, model 54 | 55 | #@pytest.mark.timeout(300) # Extended timeout to 5 minutes 56 | def test_run_bci(): 57 | bci = PyBCI(minimumEpochsRequired = 2, createPseudoDevice=True, torchModel=PyTorchModel) 58 | while not bci.connected: 59 | bci.Connect() 60 | time.sleep(1) 61 | bci.TrainMode() 62 | accuracy_achieved = False 63 | marker_received = False 64 | in_test_mode = False 65 | accuracy=None 66 | while True: 67 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 68 | time.sleep(0.5) # wait for marker updates 69 | #print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 70 | if len(currentMarkers) > 1: # check there is more then one marker type received 71 | marker_received = True 72 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 73 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 74 | accuracy = classInfo["accuracy"]### 75 | if accuracy > 0: 76 | # set to above 0 to show some accuracy was retruend from model 77 | accuracy_achieved = True 78 | bci.TestMode() 79 | break 80 | #if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+4: 81 | # break 82 | while True: 83 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 84 | print(markerGuess) 85 | #guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 86 | in_test_mode = True 87 | time.sleep(1) 88 | bci.StopThreads() 89 | break 90 | #print("Current marker estimation: " + str(guess), end=" \r") 91 | assert accuracy_achieved and marker_received and in_test_mode 92 | -------------------------------------------------------------------------------- /Tests/test_Simple.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | import time 3 | from pybci.Configuration.EpochSettings import GlobalEpochSettings 4 | 5 | gs = GlobalEpochSettings() 6 | gs.tmax = 1 # grab 1 second after marker 7 | gs.tmin = 0 # grab 0 seconds before marker 8 | gs.splitCheck = False # splits samples between tmin and tmax 9 | gs.windowLength = 0.5 # 10 | gs.windowOverlap = 0.5 # windows overap by 50%, so for a total len 11 | 12 | def test_run_bci(): 13 | bci = PyBCI(minimumEpochsRequired = 2, createPseudoDevice=True, globalEpochSettings=gs, loggingLevel = "INFO") 14 | 15 | while not bci.connected: 16 | bci.Connect() 17 | time.sleep(1) 18 | bci.TrainMode() 19 | accuracy_achieved = False 20 | marker_received = False 21 | in_test_mode = False 22 | accuracy=None 23 | while True: 24 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 25 | time.sleep(0.5) # wait for marker updates 26 | #print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 27 | if len(currentMarkers) > 1: # check there is more then one marker type received 28 | marker_received = True 29 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 30 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 31 | accuracy = classInfo["accuracy"]### 32 | if accuracy > 0: 33 | # set to above 0 to show some accuracy was retruend from model 34 | accuracy_achieved = True 35 | bci.TestMode() 36 | break 37 | #if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+4: 38 | # break 39 | while True: 40 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 41 | print(markerGuess) 42 | #guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 43 | in_test_mode = True 44 | #time.sleep(1) 45 | bci.StopThreads() 46 | break 47 | #print("Current marker estimation: " + str(guess), end=" \r") 48 | assert accuracy_achieved and marker_received and in_test_mode 49 | -------------------------------------------------------------------------------- /Tests/test_Sklearn.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | import time 3 | from sklearn.neural_network import MLPClassifier 4 | 5 | def test_run_bci(): 6 | clf = MLPClassifier(max_iter = 1000, solver ="lbfgs")#solver=clf, alpha=alpha,hidden_layer_sizes=hid) 7 | 8 | bci = PyBCI(minimumEpochsRequired = 2, clf = clf, createPseudoDevice=True) 9 | 10 | while not bci.connected: 11 | bci.Connect() 12 | time.sleep(1) 13 | bci.TrainMode() 14 | accuracy_achieved = False 15 | marker_received = False 16 | accuracy=None 17 | while True: 18 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 19 | time.sleep(0.5) # wait for marker updates 20 | #print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 21 | if len(currentMarkers) > 1: # check there is more then one marker type received 22 | marker_received = True 23 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 24 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 25 | accuracy = classInfo["accuracy"]### 26 | if accuracy > 0: 27 | # set to above 0 to show some accuracy was retruend from model 28 | accuracy_achieved = True 29 | bci.StopThreads() 30 | break 31 | #if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+4: 32 | # break 33 | assert accuracy_achieved and marker_received 34 | -------------------------------------------------------------------------------- /Tests/test_Tensorflow.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | import time 3 | import tensorflow as tf 4 | num_chs = 8 # 8 channels are created in the PseudoLSLGenerator 5 | num_feats = 2 # default is mean freq and rms to keep it simple 6 | num_classes = 4 # number of different triggers (can include baseline) sent, defines if we use softmax of binary 7 | # Define the GRU model 8 | model = tf.keras.Sequential() 9 | model.add(tf.keras.layers.Reshape((num_chs*num_feats, 1), input_shape=(num_chs*num_feats,))) 10 | model.add(tf.keras.layers.GRU(units=256))#, input_shape=num_chs*num_feats)) # maybe should show this example as 2d with toggleable timesteps disabled 11 | model.add(tf.keras.layers.Dense(units=512, activation='relu')) 12 | model.add(tf.keras.layers.Flatten())# )tf.keras.layers.Dense(units=128, activation='relu')) 13 | model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')) # softmax as more then binary classification (sparse_categorical_crossentropy) 14 | #model.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) # sigmoid as ninary classification (binary_crossentropy) 15 | model.summary() 16 | model.compile(loss='sparse_categorical_crossentropy',# using sparse_categorical as we expect multi-class (>2) output, sparse because we encode targetvalues with integers 17 | optimizer='adam', 18 | metrics=['accuracy']) 19 | 20 | #@pytest.mark.timeout(300) # Extended timeout to 5 minutes 21 | def test_run_bci(): 22 | bci = PyBCI(minimumEpochsRequired = 2, model = model, createPseudoDevice=True) 23 | 24 | while not bci.connected: 25 | bci.Connect() 26 | time.sleep(1) 27 | bci.TrainMode() 28 | accuracy_achieved = False 29 | marker_received = False 30 | in_test_mode = False 31 | accuracy=None 32 | while True: 33 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 34 | time.sleep(0.5) # wait for marker updates 35 | #print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 36 | if len(currentMarkers) > 1: # check there is more then one marker type received 37 | marker_received = True 38 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 39 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 40 | accuracy = classInfo["accuracy"]### 41 | if accuracy > 0: 42 | # set to above 0 to show some accuracy was retruend from model 43 | accuracy_achieved = True 44 | bci.TestMode() 45 | break 46 | #if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+4: 47 | # break 48 | while True: 49 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 50 | print(markerGuess) 51 | #guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 52 | in_test_mode = True 53 | time.sleep(1) 54 | bci.StopThreads() 55 | break 56 | #print("Current marker estimation: " + str(guess), end=" \r") 57 | assert accuracy_achieved and marker_received and in_test_mode 58 | -------------------------------------------------------------------------------- /Tests/test_cli.py: -------------------------------------------------------------------------------- 1 | from pybci.CliTests.testSimple import main as mainSimple 2 | from pybci.CliTests.testSklearn import main as mainSklearn 3 | from pybci.CliTests.testPyTorch import main as mainPyTorch 4 | from pybci.CliTests.testTensorflow import main as mainTensorflow 5 | from unittest.mock import patch 6 | 7 | # Example usage 8 | #def test_cli(): 9 | def test_cli_simple_timeout(): 10 | with patch('builtins.input', return_value='stop'): 11 | timeout = 30 # timeout in seconds 12 | #def run_main(): 13 | # nonlocal my_bci_wrapper 14 | mainSimple(createPseudoDevice=True, min_epochs_train=1, min_epochs_test=2, timeout=timeout) 15 | 16 | #main_thread = threading.Thread(target=run_main) 17 | #main_thread.start() 18 | #main_thread.join() 19 | 20 | def test_cli_sklearn_timeout(): 21 | with patch('builtins.input', return_value='stop'): 22 | timeout = 30 # timeout in seconds 23 | #def run_main(): 24 | # nonlocal my_bci_wrapper 25 | mainSklearn(createPseudoDevice=True, min_epochs_train=1, min_epochs_test=2, timeout=timeout) 26 | 27 | #main_thread = threading.Thread(target=run_main) 28 | #main_thread.start() 29 | #main_thread.join() 30 | 31 | def test_cli_pytorch_timeout(): 32 | with patch('builtins.input', return_value='stop'): 33 | timeout = 30 # timeout in seconds 34 | 35 | # nonlocal my_bci_wrapper 36 | mainPyTorch(createPseudoDevice=True, min_epochs_train=1, min_epochs_test=2, timeout=timeout) 37 | 38 | #main_thread = threading.Thread(target=run_main) 39 | #main_thread.start() 40 | #main_thread.join() 41 | 42 | def test_cli_tensorflow_timeout(): 43 | with patch('builtins.input', return_value='stop'): 44 | timeout = 30 # timeout in seconds 45 | mainTensorflow(createPseudoDevice=True, min_epochs_train=1, min_epochs_test=2, timeout=timeout) 46 | 47 | #main_thread = threading.Thread(target=run_main) 48 | #main_thread.start() 49 | #main_thread.join() -------------------------------------------------------------------------------- /Tests/test_zDualStream.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | from pybci.Utils.PseudoDevice import PseudoDeviceController 4 | 5 | def test_run_dual(): 6 | 7 | pd1 = PseudoDeviceController(is_multiprocessing=True, dataStreamName="dev1") 8 | pd1.BeginStreaming() 9 | 10 | pd2 = PseudoDeviceController(is_multiprocessing=True, dataStreamName="dev2", createMarkers=False) 11 | pd2.BeginStreaming() 12 | time.sleep(5) 13 | 14 | bci = PyBCI(minimumEpochsRequired = 2, createPseudoDevice=False) 15 | 16 | while not bci.connected: 17 | bci.Connect() 18 | time.sleep(1) 19 | bci.TrainMode() 20 | accuracy_achieved = False 21 | marker_received = False 22 | accuracy=None 23 | while True: 24 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 25 | time.sleep(0.5) # wait for marker updates 26 | #print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 27 | if len(currentMarkers) > 1: # check there is more then one marker type received 28 | marker_received = True 29 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 30 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 31 | accuracy = classInfo["accuracy"]### 32 | if accuracy > 0: 33 | # set to above 0 to show some accuracy was retruend from model 34 | accuracy_achieved = True 35 | bci.TestMode() 36 | break 37 | #if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+4: 38 | # break 39 | while True: 40 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 41 | print(markerGuess) 42 | #guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 43 | in_test_mode = True 44 | #time.sleep(1) 45 | pd1.StopStreaming() 46 | pd2.StopStreaming() 47 | bci.StopThreads() 48 | 49 | break 50 | #print("Current marker estimation: " + str(guess), end=" \r") 51 | assert accuracy_achieved and marker_received and in_test_mode 52 | -------------------------------------------------------------------------------- /Tests/test_zSimpleOtherFeatures.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | from pybci.Configuration.FeatureSettings import GeneralFeatureChoices 3 | from pybci.Configuration.EpochSettings import IndividualEpochSetting, GlobalEpochSettings 4 | from pybci.Utils.FeatureExtractor import GenericFeatureExtractor 5 | import time 6 | 7 | # Test case using the fixture 8 | #@pytest.mark.timeout(300) # Extended timeout to 5 minutes 9 | def test_run_bci(): 10 | features = GeneralFeatureChoices 11 | features.psdBand = True 12 | #features.appr_entropy = True 13 | features.perm_entropy = True 14 | features.spec_entropy = True 15 | features.svd_entropy = True 16 | features.rms = False 17 | features.meanPSD = False 18 | features.medianPSD = True 19 | features.variance = True 20 | features.meanAbs = True 21 | features.waveformLength = True 22 | features.zeroCross = True 23 | features.slopeSignChange = True 24 | 25 | markerSettings = {} 26 | markerSettings["baseline"] = IndividualEpochSetting() 27 | markerSettings["baseline"].splitCheck = False 28 | markerSettings["baseline"].tmin = 0 # time in seconds to capture samples before trigger 29 | markerSettings["baseline"].tmax= 2 # time in seconds to capture samples after trigger 30 | 31 | markerSettings["Marker1"] = IndividualEpochSetting() 32 | markerSettings["Marker1"].splitCheck = True 33 | markerSettings["Marker1"].tmin = 0 # time in seconds to capture samples before trigger 34 | markerSettings["Marker1"].tmax= 2 # time in seconds to capture samples after trigger 35 | 36 | extractor = GenericFeatureExtractor(featureChoices=features) 37 | 38 | 39 | 40 | bci = PyBCI(minimumEpochsRequired = 2, createPseudoDevice= True, customEpochSettings=markerSettings, streamCustomFeatureExtract={"PyBCIPseudoDataStream":extractor}, 41 | markerStream= "PyBCIPseudoMarkers", dataStreams=["PyBCIPseudoDataStream"]) 42 | # set new config settings after instantiation 43 | bci.ConfigureEpochWindowSettings(globalEpochSettings = GlobalEpochSettings(), customEpochSettings = markerSettings) 44 | while not bci.connected: 45 | bci.Connect() 46 | time.sleep(1) 47 | bci.TrainMode() 48 | marker_received = False 49 | in_test_mode = False 50 | accuracy_achieved = False 51 | accuracy= 0 52 | while True: 53 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 54 | time.sleep(0.5) # wait for marker updates 55 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 56 | if len(currentMarkers) > 1: # check there is more then one marker type received 57 | marker_received = True 58 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 59 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 60 | accuracy = classInfo["accuracy"]### 61 | if accuracy > 0: 62 | # set to above 0 to show some accuracy was retruend from model 63 | accuracy_achieved = True 64 | bci.TestMode() 65 | break 66 | #if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+4: 67 | # break 68 | while True: 69 | 70 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 71 | print(markerGuess) 72 | bci.CurrentFeaturesTargets() 73 | in_test_mode = True 74 | time.sleep(1) 75 | bci.StopThreads() 76 | break 77 | #print("Current marker estimation: " + str(guess), end=" \r") 78 | assert accuracy_achieved and marker_received and in_test_mode 79 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx-rtd-theme 2 | -------------------------------------------------------------------------------- /docs/source/BackgroundInformation/Contributing.rst: -------------------------------------------------------------------------------- 1 | Contributing to PyBCI 2 | ===================== 3 | 4 | Thank you for your interest in contributing to PyBCI! We value your contribution and aim to make the process of contributing as smooth as possible. Here are the guidelines: 5 | 6 | Getting Started 7 | --------------- 8 | 9 | - **Communication:** For general questions or discussions, please open an issue on the `GitHub repository `_. 10 | - **Code of Conduct:** Please follow the `Code of Conduct `_ to maintain a respectful and inclusive environment. 11 | 12 | Contribution Process 13 | -------------------- 14 | 15 | 1. **Fork the Repository:** Fork the `PyBCI repository `_ on GitHub to your own account. 16 | 2. **Clone the Forked Repository:** Clone your fork locally on your machine. 17 | 3. **Set Up the Development Environment:** Ensure you have all the necessary tools and dependencies installed to work on PyBCI. 18 | 4. **Create a New Branch:** Create a new branch for the specific issue or feature you are working on. 19 | 5. **Make Your Changes:** Make the necessary changes, adhering to the PyBCI code style and conventions. 20 | 6. **Run Tests:** Run the tests using :class:`pytest` to ensure that your changes do not break existing functionality. 21 | 7. **Update Documentation:** If your changes involve modifications to the API or the introduction of new features, update the documentation accordingly. 22 | 8. **Push Your Changes:** Push your changes to your fork on GitHub. 23 | 9. **Submit a Pull Request:** Submit a pull request from your fork to the PyBCI repository. 24 | 25 | Development Environment 26 | ----------------------- 27 | 28 | Ensure that you have installed the necessary dependencies by running: 29 | 30 | .. code-block:: bash 31 | 32 | pip install -r requirements.txt 33 | 34 | Running Tests 35 | ------------- 36 | 37 | To run the tests, execute: 38 | 39 | .. code-block:: bash 40 | 41 | pytest 42 | 43 | Coding Standards and Conventions 44 | -------------------------------- 45 | 46 | Please adhere to the coding standards and conventions used throughout the PyBCI project. This includes naming conventions, comment styles, and code organization. 47 | 48 | Documentation 49 | ------------- 50 | 51 | We use Sphinx with ReadTheDocs for documentation. Ensure that you update the documentation if you change the API or introduce new features. 52 | 53 | Continuous Integration 54 | ---------------------- 55 | 56 | We use AppVeyor for continuous integration to maintain the stability of the codebase. Ensure that your changes pass the build on AppVeyor before submitting a pull request. The configuration is located in the :py:data:`appveyor.yml` file in the project root. 57 | 58 | Licensing 59 | --------- 60 | 61 | By contributing to PyBCI, you agree that your contributions will be licensed under the same license as the project, as specified in the LICENSE file. 62 | 63 | Acknowledgements 64 | ---------------- 65 | 66 | Contributors will be acknowledged in a dedicated section of the documentation or project README. 67 | 68 | Thank you for contributing to PyBCI! 69 | -------------------------------------------------------------------------------- /docs/source/BackgroundInformation/Epoch_Timing.rst: -------------------------------------------------------------------------------- 1 | .. _epoch_timing: 2 | 3 | Epoch Timing 4 | ############ 5 | 6 | What are Epochs? 7 | ---------------- 8 | Epochs are periods of time. 9 | 10 | In relation to training models on set actions for brain computer interfaces, it is useful to define epochs by the amount of time-series data before and after a marker has been received indicating a desired classification action has been executed. A selected LSL marker stream is used to send strings which represent unique target training markers. Once data is sliced in to epochs it may be processed for feature extraction, or in some cases passed as raw time-series input, see :ref:`custom-extractor` and :ref:`raw-extractor` for more information on feature extraction. 11 | 12 | Setting the :py:data:`globalEpochSettings` variable when initialising pybci with the :class:`GlobalEpochSettings() = gs` class sets the target window length and overlap for the training time windows. It is desirable to have a single global window length (:py:data:`gs.windowLength`) that all epochs are sliced to match, this gives a uniform array when passing to the classifier. In testing mode there is a continuous rolling window of data sliced to the specified size set with :py:data:`gs.windowLength` and overlapped based on the :py:data:`gs.windowOverlap`, see :ref:`set_custom_epoch_times` for more info. 13 | 14 | .. _set_custom_epoch_times: 15 | 16 | Setting Custom Epoch Times 17 | -------------------------- 18 | 19 | The figure below illustrates when you may have epochs of differing lengths received on the LSL marker stream. A baseline marker may signify an extended period, in this case 10 seconds, and our motor task is only 1 second long. To account for this set :py:data:`customEpochSettings` and :py:data:`globalEpochSettings` accordingly, note the LSL Marker for baseline should match the key for the :py:data:`customEpochSettings` dict. NOTE: when using :py:data:`customEpochSettings` all epochs must be defined in the dict, other markers will be ignored, if :py:data:`customEpochSettings` is not used all markers on the selected marker stream will be used as a classification type: 20 | 21 | .. code-block:: python 22 | 23 | gs = GlobalEpochSettings() 24 | gs.tmax = 1 # grab 1 second after marker 25 | gs.tmin = 0 # grabs 0 seconds before marker 26 | gs.splitCheck = False # splits samples between tmin and tmax 27 | gs.windowLength = 1 # window length of 1 s, means all marker timing windows will be 1 second long 28 | gs.windowOverlap = 0.5 # windows overap by 50%, so for a total len 29 | markerSettings = {} 30 | markerSettings["baseline"] = IndividualEpochSetting() 31 | markerSettings["baseline"].splitCheck = False 32 | markerSettings["baseline"].tmin = 0 # time in seconds to capture samples before trigger 33 | markerSettings["baseline"].tmax= 10 # time in seconds to capture samples after trigger 34 | markerSettings["Marker1"] = IndividualEpochSetting() 35 | markerSettings["Marker1"].splitCheck = False 36 | markerSettings["Marker1"].tmin = 0 # time in seconds to capture samples before trigger 37 | markerSettings["Marker1"].tmax= 1 # time in seconds to capture samples after trigger 38 | 39 | bci = PyBCI(customEpochSettings=markerSettings, globalEpochSettings=gs) 40 | 41 | Highlighting these epochs on some psuedo emg data looks like the following: 42 | 43 | .. _nosplitExample: 44 | 45 | .. image:: ../Images/splitEpochs/example1.png 46 | :target: https://github.com/LMBooth/pybci/blob/main/docs/source/Images/splitEpochs/example1.png 47 | 48 | 49 | Overlapping Epoch Windows 50 | ------------------------- 51 | 52 | By setting splitCheck to True for :py:data:`markerSettings["baseline"].splitCheck` and :py:data:`gs.windowOverlap` to 0 we can turn one marker into 10 epochs, shown below: 53 | 54 | .. _nooverlapExample: 55 | 56 | .. image:: ../Images/splitEpochs/example1split0.png 57 | :target: https://github.com/LMBooth/pybci/blob/main/docs/source/Images/splitEpochs/example1split0.png 58 | 59 | 60 | By setting :py:data:`gs.windowOverlap` to 0.5 we can overlap 1 second epochs by 50% yielding 19 (2n-1) epochs, shown below: 61 | 62 | .. _overlapExample: 63 | 64 | .. image:: ../Images/splitEpochs/example1split50.png 65 | :target: https://github.com/LMBooth/pybci/blob/main/docs/source/Images/splitEpochs/example1split50.png 66 | 67 | 68 | Debugging Timing Errors 69 | ----------------------- 70 | When initialising the :class:`PyBCI()` class set :py:data:`loggingLevel` to “TIMING” to time the feature extraction time for each data inlet as well as classification testing and training times. These are the most computationally intensive tasks and will induce the most lag in the the system. Each printed time must be shorter then :py:data:`globalEpochSettings.windowLength` * ( 1- :py:data:`globalEpochSettings.windowOverlap` ) to minimise delays from input data action to classification output. 71 | -------------------------------------------------------------------------------- /docs/source/BackgroundInformation/Examples.rst: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Examples 4 | ======== 5 | 6 | The following examples can all be found on the `PyBCI GitHub repository `_. 7 | 8 | .. note:: 9 | The examples have shields describing whether they work with PyBCI's pseudoDevice class and what additional external hardware is required. Pseudo Device description found here: :ref:`what-pseudo-device` 10 | 11 | If using your own LSL-capable hardware and marker stream having no need for the pseudo device set :py:data:`createPseudoDevice=False`. 12 | 13 | PyBCI requires an LSL marker stream for defining when time series data should be attributed to an action/marker/epoch and an LSL data stream to create time-series data. 14 | 15 | If the user has no available LSL hardware to hand they can set :py:data:`createPseudoDevice=True` when instantiating the PyBCI object to enable a pseudo LSL data stream to generate time-series data and LSL marker stream for epoching the data. More information on PyBCI's Pseudo Device class can be found here: :ref:`what-pseudo-device`. 16 | 17 | The `example scripts `_ illustrate various applied ML libraries (SKLearn, Tensorflow, PyTorch) or provide examples of how to integrate LSL hardware. 18 | 19 | The code snippet can be used below to run a simple classification task using the Pseudo Device, alternatively call pybci in the command line to get a list of CLI commands and tests: 20 | 21 | 22 | ArduinoHandGrasp 23 | ---------------- 24 | .. image:: https://img.shields.io/badge/Pseudo_Device-Not_Available-red 25 | :alt: pseudo device not available shield 26 | .. image:: https://img.shields.io/badge/Arduino-Required-yellow 27 | :alt: arduino required shield 28 | .. image:: https://img.shields.io/badge/Myoware_Muscle_Sensor-Required-yellow 29 | :alt: Myoware required shield 30 | 31 | - **GitHub Link**: `ArduinoHandGrasp/ `_ 32 | - **Description**: This folder contains an LSL marker creator in `MarkerMaker.py`, which uses PyQt5 as an on-screen text stimulus. It also includes `ServoControl.ino`, designed for an Arduino Uno to control 5 servo motors. A `Myoware Muscle Sensor` is attached to analog pin A0. The `ArduinoToLSL.py` script sends and receives serial data, while `testArduinoHand.py` classifies the data. 33 | 34 | PupilLabsRightLeftEyeClose 35 | -------------------------- 36 | .. image:: https://img.shields.io/badge/Pseudo_Device-Not_Available-red 37 | :alt: pseudo device not available shield 38 | .. image:: https://img.shields.io/badge/Pupil_Labs_Hardware-Required-yellow 39 | :alt: pupil required shield 40 | 41 | - **GitHub Link**: `PupilLabsRightLeftEyeClose/ `_ 42 | - **Description**: This folder contains a basic Pupil Labs example with a custom extractor class. `RightLeftMarkers.py` uses Tkinter to generate visual stimuli. `bciGazeExample.py` shows how a custom feature extractor class can be used. 43 | 44 | MultimodalPupilLabsEEG 45 | ----------------------- 46 | .. image:: https://img.shields.io/badge/Pseudo_Device-Not_Available-red 47 | :alt: pseudo device not available shield 48 | .. image:: https://img.shields.io/badge/Pupil_Labs_Hardware-Required-yellow 49 | :alt: pupil required shield 50 | .. image:: https://img.shields.io/badge/ioBio_EEG_Device-Required-yellow 51 | :alt: iobio EEG device required shield 52 | 53 | - **GitHub Link**: `MultimodalPupilLabsEEG/ `_ 54 | - **Description**: An advanced example illustrating the use of two devices: Pupil Labs and Hull University ioBio EEG device. Includes a YouTube video demonstrating the multimodal example. 55 | 56 | testEpochTimingsConfig 57 | ----------------------- 58 | .. image:: https://img.shields.io/badge/Pseudo_Device-Available-blue 59 | :alt: pseudo device not available shield 60 | - **GitHub Link**: `testEpochTimingsConfig `_ 61 | - **Description**: A simple example showing custom global epoch settings. 62 | 63 | testPytorch 64 | ----------- 65 | .. image:: https://img.shields.io/badge/Pseudo_Device-Available-blue 66 | :alt: pseudo device not available shield 67 | - **GitHub Link**: `testPytorch `_ 68 | - **Description**: Provides an example of using a PyTorch Neural Net Model as the classifier. 69 | 70 | testRaw 71 | ------- 72 | .. image:: https://img.shields.io/badge/Pseudo_Device-Available-blue 73 | :alt: pseudo device not available shield 74 | - **GitHub Link**: `testRaw `_ 75 | - **Description**: Demonstrates how raw time series data can be used as an input by utilizing a custom feature extractor class. 76 | 77 | testSimple 78 | ---------- 79 | .. image:: https://img.shields.io/badge/Pseudo_Device-Available-blue 80 | :alt: pseudo device not available shield 81 | - **GitHub Link**: `testSimple `_ 82 | - **Description**: Provides the simplest setup with default settings. 83 | 84 | testSklearn 85 | ----------- 86 | .. image:: https://img.shields.io/badge/Pseudo_Device-Available-blue 87 | :alt: pseudo device not available shield 88 | - **GitHub Link**: `testSklearn `_ 89 | - **Description**: Similar to `testSimple`, but uses an MLP as a custom classifier. 90 | 91 | testTensorflow 92 | -------------- 93 | .. image:: https://img.shields.io/badge/Pseudo_Device-Available-blue 94 | :alt: pseudo device not available shield 95 | - **GitHub Link**: `testTensorflow `_ 96 | - **Description**: Similar to `testSimple`, but allows for a custom TensorFlow model to be used. 97 | -------------------------------------------------------------------------------- /docs/source/BackgroundInformation/Getting_Started.rst: -------------------------------------------------------------------------------- 1 | Getting Started 2 | ############### 3 | 4 | 5 | 6 | Python Package Dependencies Version Minimums 7 | ============================================ 8 | PyBCI is tested on Python versions 3.9, 3.10 and 3.11 (`defined via appveyor.yml `__) 9 | 10 | The following package versions define the minimum supported by PyBCI, also defined in :py:data:`setup.py`: 11 | 12 | .. code-block:: console 13 | 14 | pylsl>=1.16.1, 15 | scipy>=1.11.1, 16 | numpy>=1.24.3, 17 | antropy>=0.1.6, 18 | tensorflow>=2.13.0, 19 | scikit-learn>=1.3.0, 20 | torch>=2.0.1 21 | 22 | Earlier packages may work but are not guaranteed to be supported. 23 | 24 | Prerequisite for Non-Windows Users 25 | ================================== 26 | If you are not using windows then there is a prerequisite stipulated on the `pylsl repository `_ to obtain a liblsl shared library. See the `liblsl repo documentation `_ for more information. Once the liblsl library has been downloaded pip install pybci-package should work. 27 | 28 | .. _installation: 29 | Installation 30 | ============ 31 | 32 | For stable releases use: :py:data:`pip install pybci-package` 33 | 34 | For development versions use: :py:data:`pip install git+https://github.com/LMBooth/pybci.git` or 35 | 36 | .. code-block:: console 37 | 38 | git clone https://github.com/LMBooth/pybci.git 39 | cd pybci 40 | pip install -e . 41 | 42 | 43 | Optional: Virtual Environment 44 | ----------------------------- 45 | Or optionally, install and run in a virtual environment: 46 | 47 | Windows: 48 | 49 | .. code-block:: console 50 | 51 | python -m venv my_env 52 | .\my_env\Scripts\Activate 53 | pip install pybci-package # For stable releases 54 | # OR 55 | pip install git+https://github.com/LMBooth/pybci.git # For development version 56 | 57 | Linux/MaxOS: 58 | 59 | .. code-block:: console 60 | 61 | python3 -m venv my_env 62 | source my_env/bin/activate 63 | pip install pybci-package # For stable releases 64 | # OR 65 | pip install git+https://github.com/LMBooth/pybci.git # For development version 66 | 67 | There has been issues raised with Linux successfully running all pytests and examples, there is a dockerfile included in the root repository outlining what should be a successful build of ubuntu 22:04. 68 | 69 | Dockerfile 70 | ====================== 71 | There is an Ubuntu 22.04 setup found in the `Dockerfile `__ in the root of the directory which can be used in conjunction with `docker `__ . 72 | 73 | Once docker is installed call the following in the root directory: 74 | 75 | .. code-block:: console 76 | 77 | sudo docker build -t pybci . 78 | sudo docker run -it -p 4000:8080 pybci 79 | 80 | Then either run the :py:data:`pybci` CLI command or run :py:data:`pytest Tests`. 81 | 82 | Download the Dockerfile and run 83 | 84 | Running Pytest Locally 85 | ====================== 86 | After installing pybci and downloading and extracting the pybci git repository, navigate to the extracted location and run :py:data:`pip install requirements-devel.txt` to install pytest, then call :py:data:`pytest -vv -s Tests\\` to run all the automated tests and ensure all 10 tests pass (should take approximately 15 mins to complete), this will ensure pybci functionality is as desired. 87 | 88 | .. _simpleimplementation: 89 | 90 | Simple Implementation 91 | ===================== 92 | PyBCI requires an LSL marker stream for defining when time series data should be attributed to an action/marker/epoch and an LSL data stream to create time-series data. 93 | 94 | If the user has no available LSL hardware to hand they can set :py:data:`createPseudoDevice=True` when instantiating the PyBCI object to enable a pseudo LSL data stream to generate time-series data and LSL marker stream for epoching the data. More information on PyBCI's Pseudo Device class can be found here: :ref:`what-pseudo-device`. 95 | 96 | The `example scripts `_ illustrate various applied ML libraries (SKLearn, Tensorflow, PyTorch) or provide examples of how to integrate LSL hardware. 97 | 98 | The code snippet can be used below to run a simple classification task using the Pseudo Device, alternatively call pybci in the command line to get a list of CLI commands and tests: 99 | 100 | .. code-block:: python 101 | 102 | from pybci import PyBCI 103 | import time 104 | 105 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 106 | bci = PyBCI(minimumEpochsRequired = 5, createPseudoDevice=True) 107 | while not bci.connected: # check to see if lsl marker and datastream are available 108 | bci.Connect() 109 | time.sleep(1) 110 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 111 | accuracy = 0 112 | try: 113 | while(True): 114 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 115 | time.sleep(0.5) # wait for marker updates 116 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 117 | if len(currentMarkers) > 1: # check there is more then one marker type received 118 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 119 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 120 | accuracy = classInfo["accuracy"] 121 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 122 | bci.TestMode() 123 | break 124 | while True: 125 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 126 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 127 | print("Current marker estimation: " + str(guess), end=" \r") 128 | time.sleep(0.2) 129 | except KeyboardInterrupt: # allow user to break while loop 130 | print("\nLoop interrupted by user.") 131 | 132 | -------------------------------------------------------------------------------- /docs/source/BackgroundInformation/Pseudo_Device.rst: -------------------------------------------------------------------------------- 1 | Pseudo Device 2 | ############# 3 | 4 | .. _what-pseudo-device: 5 | 6 | What is the Pseudo Device? 7 | ========================== 8 | For ease of use the variable bool :py:data:`createPseudoDevice` can be set to True when instantiating :class:`PyBCI()` so the default PseudoDevice is run in another process enabling examples to be run without the need of LSL enabled hardware. 9 | 10 | The :py:data:`PseudoDeviceController` class can be used when the user has no available LSL marker or data streams, allowing for quick and simple execution of the examples. The Pseudo Device enables testing pipelines without the need of configuring and running LSL enabled hardware. 11 | 12 | The :class:`PseudoDeviceController` class holds marker information and generates signal data based on the given configuration set in :ref:`configuring-pseudo-device`. 13 | 14 | The :py:data:`PseudoDeviceController` can have the string "process" or "thread" set to decide whether the pseudo device should be a multiprocessed or threaded operation respectively, by default it is set to "process". 15 | 16 | Any generic LSLViewer can be used to view the generated data, `example viewers found on this link. `_ 17 | 18 | .. _configuring-pseudo-device: 19 | 20 | Configuring the Pseudo Device Controller 21 | ======================================== 22 | 23 | By default the :class:`PseudoDeviceController` has 4 markers, "baseline", "Marker1", "Marker2", "Marker3" and "Marker4", each with peak frequencies of 3, 8, 10 and 12 Hz respectively. 24 | Each signal is modified for 1 second after the marker has occurred, and the seconds between the markers are spaced by 5 seconds. 25 | 26 | Upon creating PyBCI object a dict of the following ``kwargs`` can be passed to dictate the behaviour of the pseudo device: 27 | 28 | .. code-block:: 29 | 30 | is_multiprocessing – bool: Flag indicating if the class instance is running in a multiprocessing environment. Default is True. 31 | markerConfigStrings – list(str): List of marker strings used for generating marker data. Default is [“Marker1”, “Marker2”, “Marker3”]. 32 | pseudoMarkerDataConfigs – list: List of PseudoDataConfig objects for configuring the pseudo EMG signals. If None, default configurations will be used. 33 | createMarkers - Sets whether given stream will also fire markers. 34 | pseudoMarkerConfig – PseudoMarkerConfig: Configuration settings for pseudo markers. Default is PseudoMarkerConfig. 35 | dataStreamName – string: Name to be assigned to the data stream. Default is “PyBCIPseudoDataStream”. 36 | dataStreamType – string: Type of the data stream (e.g., “EMG”). Default is “EMG”. 37 | sampleRate – int: The sample rate in Hz for the data stream. Default is 250. 38 | channelCount – int: The number of channels for the data stream. Default is 8. 39 | logger – Logger: Logger object for logging activities. Default is Logger(Logger.INFO). 40 | log_queue – multiprocessing.Queue: Queue object for logging activities in a multiprocessing environment. Default is None. 41 | 42 | 43 | The following show the contents of the PseudoDataConfig and PseudoDataConfig classes: 44 | 45 | .. code-block:: python 46 | 47 | class PseudoDataConfig: 48 | duration = 1.0 49 | noise_level = 1 50 | amplitude = 2 51 | frequency = 3 52 | 53 | class PseudoMarkerConfig: 54 | markerName = "PyBCIPseudoMarkers" 55 | markerType = "Markers" 56 | baselineMarkerString = "baseline" 57 | repeat = True 58 | autoplay = True 59 | num_baseline_markers = 10 60 | number_marker_iterations = 10 61 | seconds_between_markers = 5 62 | seconds_between_baseline_marker = 10 63 | baselineConfig = PseudoDataConfig() 64 | 65 | Two LSL streams are then created, one marker stream for informing pybci an event has occurred and a datastream which has the corresponding altered data to train the applied model with. 66 | -------------------------------------------------------------------------------- /docs/source/BackgroundInformation/What_is_PyBCI.rst: -------------------------------------------------------------------------------- 1 | What is PyBCI? 2 | ############## 3 | 4 | Statement of need 5 | ================= 6 | PyBCI addresses the growing need for a real-time Brain-Computer Interface (BCI) software capable of handling diverse physiological sensor data streams. By leveraging robust machine learning libraries such as PyTorch, SKLearn, and TensorFlow, alongside the Lab Streaming Layer protocol, PyBCI facilitates the integration of real-time data analysis and model training. This opens up avenues for researchers and practitioners to not only receive and analyze physiological sensor data but also develop, test, and deploy machine learning models seamlessly, fostering innovation in the rapidly evolving field of BCIs. 7 | 8 | General Overview 9 | ================ 10 | PyBCI is a python based brain computer interface software designed to receive a varying number, be it singular or multiple, Lab Streaming Layer enabled physiological sensor data streams. An understanding of time-series data analysis, the lab streaming layer protocol, and machine learning techniques are a must to integrate innovative ideas with this interface. An LSL marker stream is required to train the model, where a received marker epochs the data received on the accepted datastreams based on a configurable time window around certain markers - custom marker strings can optionally be split and overlapped to count for more then one marker, example: 11 | 12 | A baseline marker may have one marker sent for a 60 second window, where as target actions may only be ~0.5s long, so to conform when testing the model and giving a standardised window length would be desirable to split the 60s window after the received baseline marker in to ~0.5s windows. By overlapping windows we try to account for potential missed signal patterns/aliasing, as a rule of thumb it would be advised when testing a model to have an overlap of larger than or equal to 50%, see `Shannon nyquist criterion `_. `See here for more information on epoch timing `_. 13 | 14 | Once the data has been epoched it is sent for feature extraction, there is a general feature extraction class which can be configured for general time and/or frequency analysis based features, ideal for data stream types like "EEG" and "EMG". Since data analysis, preprocessing and feature extraction techniques can vary greatly between devices, a custom feature extraction class can be created for each data stream maker type. `See here for more information on feature extraction `_. 15 | 16 | Finally a passable, customisable sklearn or tensorflow classifier can be given to the bci class, once a defined number of epochs have been obtained for each received epoch/marker type the classifier can begin to fit the model. It's advised to use :py:meth:`ReceivedMarkerCount()` to get the number of received training epochs received, once the min num epochs received of each type is larger than or equal to :py:attr:`minimumEpochsRequired` (default 10 of each epoch) the model will begin to fit. Once fit classifier info can be queried with :py:meth:`CurrentClassifierInfo()`, when a desired accuracy is met or number of epochs :py:meth:`TestMode()` can be called. Once in test mode you can query what pybci estimates the current bci epoch is (typically a "baseline" marker is given in the training period for no state). `Review the examples for sklearn and model implementations `_. 17 | 18 | Finally a passable pytorch, sklearn or tensorflow classifier can be given to the bci class, once a defined number of epochs have been obtained for each received epoch/marker type the classifier can begin to fit the model. It's advised to use :py:meth:`ReceivedMarkerCount()` to get the number of received training epochs received, once the min num epochs received of each type is larger than or equal to :py:attr:`minimumEpochsRequired` (default 10 of each epoch) the model will begin to fit. Once fit the classifier info can be queried with :py:meth:`CurrentClassifierInfo()`, this returns the model used and accuracy. If enough epochs are received or high enough accuracy is obtained :py:meth:`TestMode()` can be called. Once in test mode you can query what pybci estimates the current bci epoch is (typically baseline is used for no state). `Review the examples for sklearn and model implementations `_. 19 | 20 | All the `examples `__ found on the github not in a dedicated folder have a pseudo LSL data generator enabled by default, by setting :py:data:`createPseudoDevice=True` so the examples can run without the need of LSL capable hardware. 21 | 22 | -------------------------------------------------------------------------------- /docs/source/Images/pyBCI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/docs/source/Images/pyBCI.png -------------------------------------------------------------------------------- /docs/source/Images/pyBCITitle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/docs/source/Images/pyBCITitle.png -------------------------------------------------------------------------------- /docs/source/Images/splitEpochs/example1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/docs/source/Images/splitEpochs/example1.png -------------------------------------------------------------------------------- /docs/source/Images/splitEpochs/example1split0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/docs/source/Images/splitEpochs/example1split0.png -------------------------------------------------------------------------------- /docs/source/Images/splitEpochs/example1split50.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/docs/source/Images/splitEpochs/example1split50.png -------------------------------------------------------------------------------- /docs/source/api/Configurations.rst: -------------------------------------------------------------------------------- 1 | pybci.Configuration.EpochSettings import GlobalEpochSettings 2 | =============== 3 | .. class:: GlobalEpochSettings() 4 | 5 | GlobalEpochSettings class holds global time related variables for slicing epochs and capturing samples so many seconds before and after markers, this will define rolling window in testmode too. 6 | 7 | :splitCheck: bool: Checks whether or not subdivide epochs. 8 | :tmin: float: Time in seconds to capture samples before trigger. 9 | :tmax: float: Time in seconds to capture samples after trigger. 10 | :windowLength: float: If splitcheck true - time in seconds to split epoch. 11 | :windowOverlap: float: If splitcheck true - percentage value > 0 and < 1, example; if epoch has tmin of 0 and tmax of 1 with window length of 0.5 we have 1 epoch between t 0 and t0.5 another at 0.25 to 0.75, 0.5 to 1 12 | 13 | pybci.Configuration.EpochSettings import IndividualEpochSetting 14 | =============== 15 | .. class:: IndividualEpochSetting() 16 | 17 | IndividualEpochSetting class holds time related variables for slicing epoch markers with differing time windows to the global window settings, will be sliced and overlapped to create windows in shape of GlobalEpochSettings.windowLength. 18 | 19 | :splitCheck: bool: Checks whether or not subdivide epochs. 20 | :tmin: float: Time in seconds to capture samples before trigger. 21 | :tmax: float: Time in seconds to capture samples after trigger. 22 | 23 | pybci.Configuration.FeatureSettings import GeneralFeatureChoices 24 | =============== 25 | .. class:: GeneralFeatureChoices() 26 | 27 | GeneralFeatureChoices class holds booleans for quickly setting the generic feature class extractor. 28 | 29 | :psdBand: bool: default = True, Checks whether or not psdBand features are desired. 30 | :appr_entropy: bool: default = False, Checks whether or not appr_entropy features are desired. 31 | :perm_entropy: bool: default = False, Checks whether or not perm_entropy features are desired. 32 | :spec_entropy: bool: default = False, Checks whether or not spec_entropy features are desired. 33 | :svd_entropy: bool: default = False, Checks whether or not svd_entropy features are desired. 34 | :samp_entropy: bool: default = False, Checks whether or not samp_entropy features are desired. 35 | :rms: bool: default = True, Checks whether or not rms features are desired. 36 | :meanPSD: bool: default = True, Checks whether or not meanPSD features are desired. 37 | :medianPSD: bool: default = True, Checks whether or not medianPSD features are desired. 38 | :variance: bool: default = True, Checks whether or not variance features are desired. 39 | :meanAbs: bool: default = True, Checks whether or not meanAbs features are desired. 40 | :waveformLength: bool: default = False, Checks whether or not waveformLength features are desired. 41 | :zeroCross: bool: default = False, Checks whether or not zeroCross features are desired. 42 | :slopeSignChange: bool: default = False, Checks whether or not slopeSignChange features are desired. 43 | -------------------------------------------------------------------------------- /docs/source/api/LSLScanner.rst: -------------------------------------------------------------------------------- 1 | pybci.Utils.LSLScanner import LSLScanner 2 | =============== 3 | .. class:: LSLScanner(parent, dataStreamsNames=None, markerStreamName=None, streamTypes=None, markerTypes=None, printDebug=True) 4 | 5 | The LSLScanner class scans and selects desired data and marker streams from available LSL streams. 6 | 7 | :parent: class: Parent object. 8 | :dataStreamsNames: list(str): Allows user to set custom acceptable EEG stream definitions, if None defaults to streamTypes scan. 9 | :markerStreamName: string: Allows user to set custom acceptable Marker stream definitions, if None defaults to markerTypes scan. 10 | :streamTypes: list(str): Allows user to set custom acceptable EEG type definitions, ignored if dataStreamsNames not None. 11 | :markerTypes: list(str): markerTypes: Allows user to set custom acceptable Marker type definitions, ignored if markerStreamName not None. 12 | :printDebug: bool: If true, prints LSLScanner debug information. 13 | 14 | .. py:method:: ScanStreams() 15 | 16 | Scans LSL for both data and marker channels. 17 | 18 | .. py:method:: ScanDataStreams() 19 | 20 | Scans available LSL streams and appends inlet to self.dataStreams 21 | 22 | .. py:method:: ScanMarkerStreams() 23 | 24 | Scans available LSL streams and appends inlet to self.markerStreams 25 | 26 | 27 | .. py:method:: CheckAvailableLSL() 28 | 29 | Checks streaminlets available, prints if printDebug 30 | 31 | :returns: `True` if 1 marker stream present and available datastreams are present. False if no datastreams are present and/or more or less than one marker stream is present. 32 | -------------------------------------------------------------------------------- /docs/source/api/PseudoDeviceController.rst: -------------------------------------------------------------------------------- 1 | pybci.Utils.PseudoDevice import PseudoDeviceController 2 | ======================== 3 | 4 | .. class:: PseudoDeviceController(is_multiprocessing=True, markerConfigStrings=["Marker1", "Marker2", "Marker3"], pseudoMarkerDataConfigs=None, createMarkers=True, pseudoMarkerConfig=PseudoMarkerConfig, dataStreamName="PyBCIPseudoDataStream", dataStreamType="EMG", sampleRate=250, channelCount=8, logger=Logger(Logger.INFO), log_queue=None) 5 | 6 | The PseudoDeviceController class is designed for generating pseudo EMG signals and markers, simulating a Lab Streaming Layer (LSL) device. It supports both multiprocessing and threading environments, depending on the `is_multiprocessing` parameter. 7 | 8 | :param is_multiprocessing: bool: Indicates if the class instance operates in a multiprocessing environment. Default is `True`. 9 | :param markerConfigStrings: list(str): Marker strings for generating marker data. Default is ["Marker1", "Marker2", "Marker3"]. 10 | :param pseudoMarkerDataConfigs: list: Configurations for pseudo EMG signals. Uses default configurations if `None`. 11 | :param createMarkers: bool: Flag to determine if markers should be created. Default is `True`. 12 | :param pseudoMarkerConfig: PseudoMarkerConfig: Settings for pseudo markers. 13 | :param dataStreamName: string: Name for the data stream. 14 | :param dataStreamType: string: Data stream type (e.g., "EMG"). 15 | :param sampleRate: int: Sample rate in Hz. 16 | :param channelCount: int: Number of channels. 17 | :param logger: Logger: Logger object for logging. 18 | :param log_queue: multiprocessing.Queue or queue.Queue: Queue object for logging. 19 | 20 | .. note:: 21 | The sample rate is not exact and may vary with CPU strain. 22 | 23 | .. py:method:: BeginStreaming() 24 | 25 | Initiates streaming of pseudo EMG data and markers. This method should be called to start the device's operation. 26 | 27 | .. py:method:: StopStreaming() 28 | 29 | Stops the data and marker streaming, signaling the termination. 30 | 31 | .. py:method:: log_message(level='INFO', message="") 32 | 33 | Logs a message to the `log_queue` or directly, based on the operation mode. 34 | 35 | :param level: string: Log message level (e.g., "INFO", "ERROR"). 36 | :param message: string: Message to log. 37 | 38 | .. note:: 39 | Ensure a graceful shutdown by calling `StopStreaming()`. 40 | 41 | .. note:: 42 | The PseudoDeviceController is suitable for simulations and testing purposes. It may require specific setup for multiprocessing or threading environments. 43 | -------------------------------------------------------------------------------- /docs/source/api/PyBCI.rst: -------------------------------------------------------------------------------- 1 | PyBCI 2 | ===== 3 | 4 | .. class:: PyBCI(dataStreams=None, markerStream=None, streamTypes=None, markerTypes=None, loggingLevel=Logger.INFO, globalEpochSettings=Global EpochSettings(), custom EpochSettings={}, streamChsDropDict={}, streamCustomFeatureExtract={}, minimum EpochsRequired=10, createPseudoDevice=False, pseudoDeviceArgs=None, clf=None, model=None, torchModel=None) 5 | 6 | The PyBCI object is the main controller for interfacing with all relevant threads. When initialised, it sets up the main operation of the BCI and can be queried for relevant information. 7 | 8 | :param dataStreams: list(str) or None: Allows the user to set custom acceptable EEG stream definitions. Defaults to `streamTypes` scan if `None`. 9 | :param markerStream: str or None: Allows the user to set custom acceptable Marker stream definition. Defaults to `markerTypes` scan if `None`. 10 | :param streamTypes: list(str) or None: Allows the user to set custom acceptable EEG type definitions, ignored if `dataStreams` is not `None`. 11 | :param markerTypes: list(str) or None: Allows the user to set custom acceptable Marker type definitions, ignored if `markerStream` is not `None`. 12 | :param loggingLevel: string: Sets PyBCI print level. Options are 'INFO', 'WARNING', 'TIMING', and 'NONE'. 13 | :param global EpochSettings: Global EpochSettings: Sets global timing settings for epochs. 14 | :param custom EpochSettings: dict: Sets individual timing settings for epochs. 15 | :param streamChsDropDict: dict: Specifies which channels to drop for each data stream. 16 | :param streamCustomFeatureExtract: dict: Allows a custom feature extractor class for each data stream. 17 | :param minimum EpochsRequired: int: Minimum number of required epochs before model fitting begins. 18 | :param createPseudoDevice: bool: If True, auto-generates LSL marker and LSL data. 19 | :param pseudoDeviceArgs: dict: Dictionary of arguments to initialize pseudo device. 20 | :param clf: sklearn.base.ClassifierMixin or None: Allows custom Sklearn model to be passed. 21 | :param model: tf.keras.model or None: Allows custom TensorFlow model to be passed. 22 | :param torchModel: custom def or None: Custom torch function should be passed with 4 inputs. 23 | 24 | .. note:: 25 | For more information on epoch settings, see `Global EpochSettings()` and `Individual EpochSettings()`. 26 | 27 | .. py:method:: __enter__() 28 | 29 | Connects to the BCI. Same as __init__. 30 | 31 | .. py:method:: __exit__(exc_type, exc_val, exc_tb) 32 | 33 | Stops all threads of the BCI. 34 | 35 | .. py:method:: Connect() 36 | 37 | Checks for valid data and marker streams, sets `self.connected`. Returns boolean indicating connection status. 38 | 39 | .. py:method:: TrainMode() 40 | 41 | Sets mode to Train. Tries to connect if not already connected. 42 | 43 | .. py:method:: TestMode() 44 | 45 | Sets mode to Test. Tries to connect if not already connected. 46 | 47 | .. py:method:: CurrentClassifierInfo() 48 | 49 | :returns: Dictionary containing "clf", "model," "torchModel," and "accuracy." If not connected, returns `{"Not Connected": None}`. 50 | 51 | .. py:method:: CurrentClassifierMarkerGuess() 52 | 53 | :returns: Integer or None. Returns integer corresponding to value of key from `ReceivedMarkerCount()` dictionary. Returns None if in Train mode. 54 | 55 | .. py:method:: CurrentFeaturesTargets() 56 | 57 | :returns: Dictionary containing "features" and "targets." If not connected, returns `{"Not Connected": None}`. 58 | 59 | .. py:method:: ReceivedMarkerCount() 60 | 61 | :returns: Dictionary where each key is a received marker string and the value is a list. The list contains the marker ID and received count for that marker type. 62 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -- Project information ----------------------------------------------------- 2 | 3 | project = 'PyBCI' 4 | copyright = '2025, Liam Booth' 5 | author = 'Liam Booth' 6 | 7 | # The full version, including alpha/beta/rc tags 8 | release = '1.5.1' 9 | 10 | # -- General configuration --------------------------------------------------- 11 | extensions = [ 12 | 'sphinx.ext.intersphinx', 13 | 'sphinx.ext.autodoc', 14 | 'sphinx.ext.extlinks', 15 | ] 16 | html_theme = 'sphinx_rtd_theme' 17 | templates_path = ['_templates'] 18 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 19 | master_doc = 'index' # for Sphinx < 2.0 20 | latex_logo = 'Images/pyBCI.png' 21 | # -- Options for HTML output ------------------------------------------------- 22 | 23 | html_theme_options = { 24 | # 'logo': 'logo.png', 25 | 'github_user': 'LMBooth', 26 | 'github_repo': 'PyBCI', 27 | 'github_button': 'true', 28 | } 29 | html_logo = 'Images/pyBCI.png' 30 | # Add any paths that contain custom static files (such as style sheets) here, 31 | # relative to this directory. They are copied after the builtin static files, 32 | # so a file named "default.css" will overwrite the builtin "default.css". 33 | html_static_path = ['_static'] 34 | 35 | # Set the location of your Python module(s) 36 | # Replace 'your_package_name' with the actual package/module name 37 | autodoc_mock_imports = ['pybci'] 38 | 39 | # Include all members (methods, attributes, etc.) in the API documentation 40 | autodoc_default_options = { 41 | 'members': None, 42 | 'undoc-members': True, 43 | 'private-members': True, 44 | 'show-inheritance': True, 45 | } 46 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to the PyBCI documentation! 2 | =================================== 3 | 4 | **PyBCI** is a Python package to create a Brain Computer Interface (BCI) with data synchronisation and pipelining handled by the `Lab Streaming Layer `_, machine learning with `Pytorch `_, `scikit-learn `_ or `TensorFlow `_, leveraging packages like `Antropy `_, `SciPy `_ and `NumPy `_ for generic time and/or frequency based feature extraction or optionally have the users own custom feature extraction class used. 5 | 6 | The goal of PyBCI is to enable quick iteration when creating pipelines for testing human machine and brain computer interfaces, namely testing applied data processing and feature extraction techniques on custom machine learning models. Training the BCI requires LSL enabled devices and an LSL marker stream for training stimuli. All the `examples `__ found on the github not in a dedicated folder have a pseudo LSL data generator enabled by default, by setting :py:data:`createPseudoDevice=True` so the examples can run without the need of LSL capable hardware. 7 | 8 | `Github repo here! `_ 9 | 10 | If samples have been collected previously and model made the user can set the :py:data:`clf`, :py:data:`model`, or :py:data:`torchModel` to their sklearn, tensorflow or pytorch classifier and immediately set :py:data:`bci.TestMode()`. 11 | 12 | Check out the :doc:`BackgroundInformation/Getting_Started` section for :ref:`installation` of the project. 13 | 14 | .. note:: 15 | 16 | This project is under active development. 17 | 18 | Contents 19 | -------- 20 | 21 | .. toctree:: 22 | :maxdepth: 1 23 | :caption: User's Guide 24 | 25 | BackgroundInformation/Getting_Started 26 | BackgroundInformation/What_is_PyBCI 27 | BackgroundInformation/Theory_Operation 28 | BackgroundInformation/Epoch_Timing 29 | BackgroundInformation/Feature_Selection 30 | BackgroundInformation/Pseudo_Device 31 | BackgroundInformation/Contributing 32 | BackgroundInformation/Examples 33 | 34 | .. toctree:: 35 | :maxdepth: 1 36 | :caption: API 37 | 38 | api/PyBCI 39 | api/LSLScanner 40 | api/PseudoDeviceController 41 | api/Configurations 42 | -------------------------------------------------------------------------------- /pybci/CliTests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/pybci/CliTests/__init__.py -------------------------------------------------------------------------------- /pybci/CliTests/testSimple.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from ..pybci import PyBCI 3 | import time 4 | import threading 5 | 6 | stop_signal = threading.Event() # Global event to control the main loop 7 | 8 | def command_listener(): 9 | while not stop_signal.is_set(): 10 | command = input("PyBCI: [CLI] - Enter 'stop' to terminate\n") 11 | if command == 'stop': 12 | stop_signal.set() 13 | break 14 | 15 | class CLI_testSimpleWrapper: 16 | def __init__(self, createPseudoDevice, min_epochs_train, min_epochs_test, timeout): 17 | self.createPseudoDevice = createPseudoDevice 18 | self.timeout = timeout 19 | self.min_epochs_train = min_epochs_train 20 | self.min_epochs_test = min_epochs_test 21 | self.accuracy = 0 22 | self.currentMarkers = {} 23 | if self.min_epochs_test <= self.min_epochs_train: 24 | self.min_epochs_test = self.min_epochs_train+1 25 | #current_os = get_os() 26 | #if current_os == "Windows": 27 | self.bci = PyBCI(minimumEpochsRequired = 3, createPseudoDevice=True) 28 | #else: 29 | # pdc = PseudoDeviceController(execution_mode="process") 30 | # pdc.BeginStreaming() 31 | # time.sleep(10) 32 | # self.bci = PyBCI(minimumEpochsRequired = 3, createPseudoDevice=True, pseudoDeviceController=pdc) 33 | 34 | #self.bci = PyBCI(minimumEpochsRequired = self.min_epochs_train, createPseudoDevice=self.createPseudoDevice) 35 | main_thread = threading.Thread(target=self.loop) 36 | main_thread.start() 37 | if self.timeout: 38 | print("PyBCI: [CLI] - starting timeout thread") 39 | self.timeout_thread = threading.Thread(target=self.stop_after_timeout) 40 | self.timeout_thread.start() 41 | main_thread.join() 42 | if timeout is not None: 43 | self.timeout_thread.join() 44 | 45 | def loop(self): 46 | while not self.bci.connected: # check to see if lsl marker and datastream are available 47 | self.bci.Connect() 48 | time.sleep(1) 49 | self.bci.TrainMode() # now both marker and datastreams available start training on received epochs 50 | self.accuracy = 0 51 | test = False 52 | try: 53 | while not stop_signal.is_set(): # Add the check here 54 | if test is False: 55 | self.currentMarkers = self.bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 56 | time.sleep(0.5) # wait for marker updates 57 | print("Markers received: " + str(self.currentMarkers) +" Accuracy: " + str(round(self.accuracy,2))) 58 | if len(self.currentMarkers) > 1: # check there is more then one marker type received 59 | if min([self.currentMarkers[key][1] for key in self.currentMarkers]) > self.bci.minimumEpochsRequired: 60 | classInfo = self.bci.CurrentClassifierInfo() # hangs if called too early 61 | self.accuracy = classInfo["accuracy"] 62 | if min([self.currentMarkers[key][1] for key in self.currentMarkers]) > self.min_epochs_test: 63 | self.bci.TestMode() 64 | test = True 65 | else: 66 | markerGuess = self.bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 67 | guess = [key for key, value in self.currentMarkers.items() if value[0] == markerGuess] 68 | print("Current marker estimation: " + str(guess)) 69 | time.sleep(0.2) 70 | self.bci.StopThreads() 71 | except KeyboardInterrupt: # allow user to break while loop 72 | print("\nLoop interrupted by user.") 73 | 74 | def stop_after_timeout(self): 75 | time.sleep(self.timeout) 76 | stop_signal.set() 77 | print("\nTimeout reached. Stopping threads.") 78 | 79 | # Add these methods in CLI_testSimpleWrapper class 80 | def get_accuracy(self): 81 | return self.accuracy 82 | 83 | def get_current_markers(self): 84 | return self.currentMarkers 85 | 86 | def main(createPseudoDevice=True, min_epochs_train=4, min_epochs_test=10, timeout=None): 87 | command_thread = threading.Thread(target=command_listener) 88 | command_thread.daemon = True 89 | command_thread.start() 90 | 91 | my_bci_wrapper = CLI_testSimpleWrapper(createPseudoDevice, min_epochs_train, min_epochs_test,timeout) 92 | command_thread.join() 93 | return my_bci_wrapper # Return this instance 94 | 95 | if __name__ == '__main__': 96 | parser = argparse.ArgumentParser(description="Runs simple setup where sklearn support-vector-machine is used for model and pseudodevice generates 8 channels of 3 marker types and a baseline. Similar to the testSimple.py in the examples folder.") 97 | parser.add_argument("--createPseudoDevice", default=True, type=bool, help="Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.") 98 | parser.add_argument("--min_epochs_train", default=4, type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 99 | parser.add_argument("--min_epochs_test", default=14, type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 100 | parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 101 | 102 | args = parser.parse_args() 103 | main(**vars(args)) 104 | -------------------------------------------------------------------------------- /pybci/CliTests/testSklearn.py: -------------------------------------------------------------------------------- 1 | from ..pybci import PyBCI 2 | import time 3 | import argparse 4 | from sklearn.neural_network import MLPClassifier 5 | import threading 6 | 7 | stop_signal = threading.Event() # Global event to control the main loop 8 | 9 | def command_listener(): 10 | while not stop_signal.is_set(): 11 | command = input("PyBCI: [CLI] - Enter 'stop' to terminate\n") 12 | if command == 'stop': 13 | stop_signal.set() 14 | break 15 | 16 | class CLI_testSklearnWrapper: 17 | def __init__(self, createPseudoDevice, min_epochs_train, min_epochs_test, timeout): 18 | self.createPseudoDevice = createPseudoDevice 19 | self.timeout = timeout 20 | self.min_epochs_train = min_epochs_train 21 | self.min_epochs_test = min_epochs_test 22 | self.accuracy = 0 23 | self.currentMarkers = {} 24 | if self.min_epochs_test <= self.min_epochs_train: 25 | self.min_epochs_test = self.min_epochs_train+1 26 | clf = MLPClassifier(max_iter = 1000, solver ="lbfgs")#solver=clf, alpha=alpha,hidden_layer_sizes=hid) 27 | #current_os = get_os() 28 | #if current_os == "Windows": 29 | self.bci = PyBCI(minimumEpochsRequired = 3, createPseudoDevice=True, clf = clf) 30 | #else: 31 | # pdc = PseudoDeviceController(execution_mode="process") 32 | # pdc.BeginStreaming() 33 | # time.sleep(10) 34 | # self.bci = PyBCI(minimumEpochsRequired = 3, createPseudoDevice=True, pseudoDeviceController=pdc, clf = clf) 35 | 36 | #self.bci = PyBCI(minimumEpochsRequired = min_epochs_train, createPseudoDevice=createPseudoDevice, clf = clf) 37 | main_thread = threading.Thread(target=self.loop) 38 | main_thread.start() 39 | if self.timeout: 40 | print("PyBCI: [CLI] - starting timeout thread") 41 | self.timeout_thread = threading.Thread(target=self.stop_after_timeout) 42 | self.timeout_thread.start() 43 | main_thread.join() 44 | if timeout is not None: 45 | self.timeout_thread.join() 46 | 47 | def loop(self): 48 | while not self.bci.connected: # check to see if lsl marker and datastream are available 49 | self.bci.Connect() 50 | time.sleep(1) 51 | self.bci.TrainMode() # now both marker and datastreams available start training on received epochs 52 | self.accuracy = 0 53 | test = False 54 | try: 55 | while not stop_signal.is_set(): # Add the check here 56 | if test is False: 57 | self.currentMarkers = self.bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 58 | time.sleep(0.5) # wait for marker updates 59 | print("Markers received: " + str(self.currentMarkers) +" Accuracy: " + str(round(self.accuracy,2))) 60 | if len(self.currentMarkers) > 1: # check there is more then one marker type received 61 | if min([self.currentMarkers[key][1] for key in self.currentMarkers]) > self.bci.minimumEpochsRequired: 62 | classInfo = self.bci.CurrentClassifierInfo() # hangs if called too early 63 | self.accuracy = classInfo["accuracy"] 64 | if min([self.currentMarkers[key][1] for key in self.currentMarkers]) > self.min_epochs_test: 65 | self.bci.TestMode() 66 | test = True 67 | else: 68 | markerGuess = self.bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 69 | guess = [key for key, value in self.currentMarkers.items() if value[0] == markerGuess] 70 | print("Current marker estimation: " + str(guess)) 71 | time.sleep(0.2) 72 | self.bci.StopThreads() 73 | except KeyboardInterrupt: # allow user to break while loop 74 | print("\nLoop interrupted by user.") 75 | 76 | def stop_after_timeout(self): 77 | time.sleep(self.timeout) 78 | stop_signal.set() 79 | print("\nTimeout reached. Stopping threads.") 80 | 81 | # Add these methods in CLI_testSimpleWrapper class 82 | def get_accuracy(self): 83 | return self.accuracy 84 | 85 | def get_current_markers(self): 86 | return self.currentMarkers 87 | 88 | 89 | def main(createPseudoDevice=True, min_epochs_train=4, min_epochs_test=10, timeout=None): 90 | command_thread = threading.Thread(target=command_listener) 91 | command_thread.daemon = True 92 | command_thread.start() 93 | 94 | my_bci_wrapper = CLI_testSklearnWrapper(createPseudoDevice, min_epochs_train, min_epochs_test,timeout) 95 | command_thread.join() 96 | return my_bci_wrapper # Return this instance 97 | 98 | if __name__ == '__main__': 99 | parser = argparse.ArgumentParser(description="Sklearn multi-layer perceptron is used for model and pseudodevice generates 8 channels of 3 marker types and a baseline. Similar to the testSimple.py in the examples folder.") 100 | parser.add_argument("--createPseudoDevice", default=True, type=bool, help="Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.") 101 | parser.add_argument("--min_epochs_train", default=4, type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 102 | parser.add_argument("--min_epochs_test", default=14, type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 103 | parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 104 | 105 | args = parser.parse_args() 106 | main(**vars(args)) -------------------------------------------------------------------------------- /pybci/CliTests/testTensorflow.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | import argparse 4 | from ..pybci import PyBCI 5 | import tensorflow as tf# bring in tf for custom model creation 6 | import os 7 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 8 | 9 | 10 | stop_signal = threading.Event() # Global event to control the main loop 11 | 12 | def command_listener(): 13 | while not stop_signal.is_set(): 14 | command = input("PyBCI: [CLI] - Enter 'stop' to terminate\n") 15 | if command == 'stop': 16 | stop_signal.set() 17 | break 18 | 19 | 20 | 21 | class CLI_testPytorchWrapper: 22 | def __init__(self, createPseudoDevice, min_epochs_train, min_epochs_test,num_chs, num_feats, num_classes, timeout): 23 | if createPseudoDevice: 24 | self.num_chs = 8 # 8 channels are created in the PseudoLSLGenerator 25 | self.num_feats = 2 # default is mean freq and rms to keep it simple 26 | self.num_classes = 4 # number of different triggers (can include baseline) sent, defines if we use softmax of binary 27 | 28 | self.createPseudoDevice = createPseudoDevice 29 | self.timeout = timeout 30 | self.min_epochs_train = min_epochs_train 31 | self.min_epochs_test = min_epochs_test 32 | self.accuracy = 0 33 | self.currentMarkers = {} 34 | if self.min_epochs_test <= self.min_epochs_train: 35 | self.min_epochs_test = self.min_epochs_train+1 36 | # Define the GRU model 37 | model = tf.keras.Sequential() 38 | model.add(tf.keras.layers.Reshape((num_chs*num_feats, 1), input_shape=(num_chs*num_feats,))) 39 | model.add(tf.keras.layers.GRU(units=256))#, input_shape=num_chs*num_feats)) # maybe should show this example as 2d with toggleable timesteps disabled 40 | model.add(tf.keras.layers.Dense(units=512, activation='relu')) 41 | model.add(tf.keras.layers.Flatten())# )tf.keras.layers.Dense(units=128, activation='relu')) 42 | model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')) # softmax as more then binary classification (sparse_categorical_crossentropy) 43 | #model.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) # sigmoid as ninary classification (binary_crossentropy) 44 | model.summary() 45 | model.compile(loss='sparse_categorical_crossentropy',# using sparse_categorical as we expect multi-class (>2) output, sparse because we encode targetvalues with integers 46 | optimizer='adam', 47 | metrics=['accuracy']) 48 | #current_os = get_os() 49 | #if current_os == "Windows": 50 | self.bci = PyBCI(minimumEpochsRequired = 3, createPseudoDevice=True, model = model) 51 | #else: 52 | # pdc = PseudoDeviceController(execution_mode="process") 53 | # pdc.BeginStreaming() 54 | # time.sleep(10) 55 | # self.bci = PyBCI(minimumEpochsRequired = 3, createPseudoDevice=True, pseudoDeviceController=pdc, model = model) 56 | 57 | #self.bci = PyBCI(minimumEpochsRequired = min_epochs_train, createPseudoDevice=createPseudoDevice, model = model) 58 | main_thread = threading.Thread(target=self.loop) 59 | main_thread.start() 60 | if self.timeout: 61 | print("PyBCI: [CLI] - starting timeout thread") 62 | self.timeout_thread = threading.Thread(target=self.stop_after_timeout) 63 | self.timeout_thread.start() 64 | main_thread.join() 65 | if timeout is not None: 66 | self.timeout_thread.join() 67 | 68 | 69 | def loop(self): 70 | while not self.bci.connected: # check to see if lsl marker and datastream are available 71 | self.bci.Connect() 72 | time.sleep(1) 73 | self.bci.TrainMode() # now both marker and datastreams available start training on received epochs 74 | self.accuracy = 0 75 | test = False 76 | try: 77 | while not stop_signal.is_set(): # Add the check here 78 | if test is False: 79 | self.currentMarkers = self.bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 80 | time.sleep(0.5) # wait for marker updates 81 | print("Markers received: " + str(self.currentMarkers) +" Accuracy: " + str(round(self.accuracy,2))) 82 | if len(self.currentMarkers) > 1: # check there is more then one marker type received 83 | if min([self.currentMarkers[key][1] for key in self.currentMarkers]) > self.bci.minimumEpochsRequired: 84 | classInfo = self.bci.CurrentClassifierInfo() # hangs if called too early 85 | self.accuracy = classInfo["accuracy"] 86 | if min([self.currentMarkers[key][1] for key in self.currentMarkers]) > self.min_epochs_test: 87 | self.bci.TestMode() 88 | test = True 89 | else: 90 | markerGuess = self.bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 91 | guess = [key for key, value in self.currentMarkers.items() if value[0] == markerGuess] 92 | print("Current marker estimation: " + str(guess)) 93 | time.sleep(0.2) 94 | self.bci.StopThreads() 95 | except KeyboardInterrupt: # allow user to break while loop 96 | print("\nLoop interrupted by user.") 97 | 98 | def stop_after_timeout(self): 99 | time.sleep(self.timeout) 100 | stop_signal.set() 101 | print("\nTimeout reached. Stopping threads.") 102 | 103 | # Add these methods in CLI_testSimpleWrapper class 104 | def get_accuracy(self): 105 | return self.accuracy 106 | 107 | def get_current_markers(self): 108 | return self.currentMarkers 109 | 110 | def main(createPseudoDevice=True, min_epochs_train=4, min_epochs_test=10, num_chs = 8, num_feats = 2, num_classes = 4, timeout=None): 111 | global num_chs_g, num_feats_g, num_classes_g 112 | num_chs_g = num_chs 113 | num_feats_g = num_feats 114 | num_classes_g = num_classes 115 | command_thread = threading.Thread(target=command_listener) 116 | command_thread.daemon = True 117 | command_thread.start() 118 | 119 | my_bci_wrapper = CLI_testPytorchWrapper(createPseudoDevice, min_epochs_train, min_epochs_test,num_chs, num_feats, num_classes,timeout) 120 | command_thread.join() 121 | return my_bci_wrapper # Return this instance 122 | 123 | if __name__ == '__main__': 124 | parser = argparse.ArgumentParser(description="Tensorflow GRU is used for model and pseudodevice generates 8 channels of 3 marker types and baseline. Similar to the testTensorflow.py in the examples folder.") 125 | parser.add_argument("--createPseudoDevice", default=True, type=bool, help="Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.") 126 | parser.add_argument("--min_epochs_train", default=4, type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 127 | parser.add_argument("--min_epochs_test", default=14, type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 128 | parser.add_argument("--num_chs", default=8, type=int, help='Num of channels in data stream to configure tensorflow model, if PseudoDevice==True defaults to 8.') 129 | parser.add_argument("--num_classes", default=4, type=int, help='Num of classes in marker stream to configure tensorflow model, if PseudoDevice==True defaults to 4.') 130 | parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 131 | 132 | args = parser.parse_args() 133 | main(**vars(args)) 134 | -------------------------------------------------------------------------------- /pybci/Configuration/EpochSettings.py: -------------------------------------------------------------------------------- 1 | class GlobalEpochSettings: 2 | splitCheck = True # checks whether or not subdivide epochs 3 | tmin = 0 # time in seconds to capture samples before trigger 4 | tmax = 1 # time in seconds to capture samples after trigger 5 | windowLength = 0.5 # if splitcheck true - time in seconds to split epoch 6 | windowOverlap = 0.5 #if splitcheck true percentage value > 0 and < 1, example if epoch has tmin of 0 and tmax of 1 with window 7 | # length of 0.5 we have 1 epoch between t 0 and t0.5 another at 0.25 to 0.75, 0.5 to 1 8 | 9 | # customWindowSettings should be dict with marker name and IndividualEpochSetting 10 | class IndividualEpochSetting: 11 | splitCheck = True # checks whether or not subdivide epochs 12 | tmin = 0 # time in seconds to capture samples before trigger 13 | tmax= 1 # time in seconds to capture samples after trigger -------------------------------------------------------------------------------- /pybci/Configuration/FeatureSettings.py: -------------------------------------------------------------------------------- 1 | class GeneralFeatureChoices: 2 | psdBand = False 3 | #appr_entropy = False 4 | perm_entropy = False 5 | spec_entropy = False 6 | svd_entropy = False 7 | rms = True 8 | meanPSD = True 9 | medianPSD = False 10 | variance = False 11 | meanAbs = False 12 | waveformLength = False 13 | zeroCross = False 14 | slopeSignChange = False -------------------------------------------------------------------------------- /pybci/Configuration/PseudoDeviceSettings.py: -------------------------------------------------------------------------------- 1 | 2 | class PseudoDataConfig: 3 | duration = 1.0 4 | noise_level = 1 5 | amplitude = 2 6 | frequency = 3 7 | 8 | class PseudoMarkerConfig: 9 | markerName = "PyBCIPseudoMarkers" 10 | markerType = "Markers" 11 | baselineMarkerString = "baseline" 12 | repeat = True 13 | autoplay = True 14 | num_baseline_markers = 10 15 | number_marker_iterations = 10 16 | seconds_between_markers = 5 17 | seconds_between_baseline_marker = 10 18 | baselineConfig = PseudoDataConfig() -------------------------------------------------------------------------------- /pybci/Configuration/__init__.py: -------------------------------------------------------------------------------- 1 | #from .EpochSettings import EpochSettings 2 | #from .GeneralFeatureChoices import GeneralFeatureChoices 3 | 4 | -------------------------------------------------------------------------------- /pybci/Examples/ArduinoHandGrasp/ArduinoToLSL.py: -------------------------------------------------------------------------------- 1 | import serial 2 | from pylsl import StreamInfo, StreamOutlet, StreamInlet, resolve_stream 3 | # Setup serial connection 4 | ser = serial.Serial('COM9', 9600) # change '/dev/ttyACM0' to your serial port name 5 | # Setup LSL 6 | info = StreamInfo('ArduinoHandData', 'EMG', 1, 100, 'float32', 'myuid34234') 7 | outlet = StreamOutlet(info) 8 | 9 | # Look for the marker stream 10 | print("looking for a marker stream...") 11 | streams = resolve_stream('name', 'MarkerHandGrasps') 12 | 13 | inlet = StreamInlet(streams[0]) 14 | 15 | ser.write("1\r".encode()) 16 | print("Beginning transmission...") 17 | while True: 18 | try: 19 | # Read data from the Arduino and send it to LSL 20 | if ser.in_waiting: 21 | data = ser.readline().strip() # read a '\n' terminated line, strip newline characters 22 | #print(data) 23 | if data: 24 | try: 25 | data = float(data) # convert data to float 26 | outlet.push_sample([data]) # send data to LSL 27 | except ValueError: 28 | pass # ignore this reading 29 | # Read data from the LSL marker stream and send it to the Arduino 30 | marker, timestamp = inlet.pull_sample(0.0) 31 | if marker: 32 | grasp_pattern = str(marker[0]) 33 | ser.write(grasp_pattern.encode()) # send grasp pattern to the Arduino 34 | 35 | except KeyboardInterrupt: 36 | break -------------------------------------------------------------------------------- /pybci/Examples/ArduinoHandGrasp/MarkerMaker.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | import pylsl 3 | 4 | # customisable variables! 5 | stimuli = ["open", "rock", "thumb"]#, "pinky"]#, "peace", "pinky"] 6 | stimuliTime = [3000, 3000, 3000]#, 3000]#, 3000, 3000] 7 | stimuliCount = [12, 12, 12]#, 8, 8] 8 | 9 | 10 | 11 | markerStreamName = "TestMarkers" # should be targetted with pybci 12 | streamType = 'Markers' 13 | 14 | class App: 15 | def __init__(self, root): 16 | markerInfo = pylsl.StreamInfo(markerStreamName, streamType, 1, 0, 'string', 'Dev') # creates lsl marker info 17 | self.markerOutlet = pylsl.StreamOutlet(markerInfo) # creates lsl marker outlet from marker info 18 | self.originalstimuliCount = stimuliCount 19 | self.root = root 20 | self.root.state("zoomed") # Maximize the window 21 | self.root.grid_rowconfigure(0, weight=1) # Configure row 0 to expand vertically 22 | self.root.grid_columnconfigure(0, weight=1) # Configure column 0 to expand horizontally 23 | self.root.grid_columnconfigure(1, weight=1) # Configure column 1 to expand horizontally 24 | 25 | self.label = tk.Label(root, text="", font=("Helvetica", 24)) 26 | self.label.grid(row=0, column=0, columnspan=2, padx=20, pady=20, sticky="nsew") 27 | 28 | self.button = tk.Button(root, text="Start", command=self.toggle_iteration, font=("Helvetica", 18)) 29 | self.button.grid(row=1, column=0, padx=20, pady=20, sticky="nsew") 30 | self.custom_button = tk.Button(root, text="Restart Mark Counts", command=self.reset_function, font=("Helvetica", 18)) 31 | self.custom_button.grid(row=1, column=1, padx=20, pady=20, sticky="nsew") 32 | self.close_button = tk.Button(root, text="Close", command=self.root.destroy, font=("Helvetica", 18)) 33 | self.close_button.grid(row=2, column=0, columnspan=2, padx=20, pady=20, sticky="nsew") 34 | 35 | self.index = 0 36 | self.iterating = False # Variable to track the iteration state 37 | self.after_id = None # Variable to store the after() call ID 38 | 39 | def toggle_iteration(self): 40 | if not self.iterating: 41 | self.iterating = True 42 | self.button.configure(text="Stop") 43 | self.next_stimulus() 44 | else: 45 | self.iterating = False 46 | self.button.configure(text="Start") 47 | if self.after_id is not None: 48 | self.root.after_cancel(self.after_id) 49 | self.after_id = None 50 | 51 | def next_stimulus(self): 52 | if self.iterating: 53 | if len(stimuli) == 0: 54 | self.label['text'] = "Finished" 55 | else: 56 | self.label['text'] = stimuli[self.index] 57 | self.markerOutlet.push_sample([stimuli[self.index]]) 58 | print("sent marker") 59 | self.after_id = self.root.after(stimuliTime[self.index], self.next_stimulus) 60 | stimuliCount[self.index] -= 1 61 | if stimuliCount[self.index] == 0: 62 | self.remove_stimulus(self.index) 63 | else: 64 | self.index = (self.index + 1) % len(stimuli) # Increment index and wrap around when it reaches the end 65 | 66 | def remove_stimulus(self, index): 67 | del stimuli[index] 68 | del stimuliTime[index] 69 | del stimuliCount[index] 70 | if len(stimuli) == 0: 71 | self.iterating = False 72 | self.button.configure(text="Start") 73 | if self.after_id is not None: 74 | self.root.after_cancel(self.after_id) 75 | self.after_id = None 76 | 77 | def reset_function(self): 78 | # Define your custom function here 79 | print("Custom function called") 80 | global stimuli 81 | stimuli = ["open", "rock", "fist"]#, "pinky"]#, "peace", "pinky"] 82 | global stimuliTime 83 | stimuliTime = [3000, 3000, 3000]#, 3000]#, 3000, 3000] 84 | global stimuliCount 85 | stimuliCount = [12, 12, 12]#, 8, 8] 86 | self.index = 0 87 | if self.after_id is not None: 88 | self.root.after_cancel(self.after_id) 89 | self.after_id = None 90 | self.iterating = True 91 | self.button.configure(text="Stop") 92 | self.next_stimulus() 93 | 94 | 95 | root = tk.Tk() 96 | app = App(root) 97 | root.mainloop() 98 | -------------------------------------------------------------------------------- /pybci/Examples/ArduinoHandGrasp/README.md: -------------------------------------------------------------------------------- 1 | # Arduino hand-grasp example 2 | 3 | | File | Description | 4 | |------------------|-------------| 5 | | ArduinoToLSL.py | Grabs data from arduino via serial port and puts on to LSL data stream, also receives LSL marker stream from testArduinoHand.py to send hand servo motor position commands to the arduino. | 6 | | MarkerMaker.py | Used to deliver training stimulus via PyQt5 GUI, sends LSL markers to train PyBCI. Receives Markers via LSL from testArduinoHand.py when in test mode. | 7 | | ServoControl.ino | Arduino script used to capture data from myoware sensor and send via serial port. Also, controls servo motors and finger positions from serial commands. | 8 | | testArduinoHand.py | PyBCI script which receives lsl datastream from ArduinoToLSL.py and uses MarkerMaker.py to deliver training stimulus markers. When in test mdoe outputs LSL marker stream to send commands to the arduino via ArduinoToLSL.py. | 9 | | testArduinoPytorch.py | Similar to testArduinoHand.py but using PyTorch. | 10 | 11 | This folder contains an example of how to push ADC recordings from an arduino to the LSL, use the PyBCI package to classify the incoming data, then pushes command data back to the arduino in test moded to control servo positions. More channels and devices with more stable sample rates can potentially perform more classes more accurately. 12 | 13 | This is an extremely simple example setup, using a single [Myoware Sensor](https://myoware.com/products/muscle-sensor/) as an input to an arduino ADC, then the 3D printed finger positions are controlled with pulleys on servomotors. 14 | 15 | An example video can be found here: 16 | 17 | [![PyBCI Arduino Hand Demo](http://i3.ytimg.com/vi/InEbiykeinQ/hqdefault.jpg)](https://www.youtube.com/watch?v=InEbiykeinQ) 18 | -------------------------------------------------------------------------------- /pybci/Examples/ArduinoHandGrasp/ServoControl.ino: -------------------------------------------------------------------------------- 1 | #include 2 | Servo handservos[5]; // create an array of servo objects 3 | #define ANALOG_PIN A0 4 | void SetHand(Servo servos[], int pos[]){ 5 | for (int i = 0; i < 5; i++) { 6 | servos[i].write(pos[i]); 7 | } 8 | } 9 | int handPos[6][5] ={ 10 | {0, 0, 0, 0, 0},// open 11 | {180,180,180,180,180}, // closed 12 | {0,180,180,0,180}, // rock 13 | {180,180,0,0,180}, // peace 14 | {0,180,180,180,180}, // pinky 15 | {180,180,180,180,0} // thumb 16 | }; 17 | 18 | void setup() { 19 | Serial.begin(9600); // start serial communication at 9600bps 20 | handservos[0].attach(3); //little 21 | handservos[1].attach(5); 22 | handservos[2].attach(6); // middle 23 | handservos[3].attach(9); 24 | handservos[4].attach(10); // thumb 25 | SetHand(handservos, handPos[0]); 26 | } 27 | 28 | void loop() { 29 | if (Serial.available() > 0) { // if there's data available to read 30 | char handIndex = Serial.read(); // read the incoming byte as a char 31 | //Serial.print(handIndex); 32 | if (handIndex >= '0' && handIndex <= '5') { 33 | int incomingInt = handIndex - '0'; 34 | SetHand(handservos, handPos[incomingInt]); 35 | } 36 | } 37 | int analogValue = analogRead(ANALOG_PIN); // read the input on analog pin 38 | Serial.println(analogValue); 39 | } 40 | -------------------------------------------------------------------------------- /pybci/Examples/ArduinoHandGrasp/testArduinoHand.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | from pybci.Configuration.EpochSettings import GlobalEpochSettings 4 | from pybci.Utils.Logger import Logger 5 | import numpy as np 6 | from scipy.fft import fft 7 | # We control the arduino via lsl python script which is responsible for COM connection 8 | from pylsl import StreamInfo, StreamOutlet 9 | # Set up the LSL stream info 10 | info = StreamInfo('MarkerHandGrasps', 'Markers', 1, 0, 'string', 'myuniquemarkerid2023') 11 | # Create the outlet 12 | outlet = StreamOutlet(info) 13 | gs = GlobalEpochSettings() 14 | gs.tmax = 2.5 # grab 1 second after marker 15 | gs.tmin = -0.5 # grab 0.5 seconds before marker 16 | gs.splitCheck = True # splits samples between tmin and tmax 17 | gs.windowLength = 0.5 # 18 | gs.windowOverlap = 0.5 # windows overap by 50%, so for a total len 19 | #from sklearn.neural_network import MLPClassifier 20 | #clf = MLPClassifier(max_iter = 1000, solver ="lbfgs")#solver=clf, alpha=alpha,hidden_layer_sizes=hid) 21 | class EMGClassifier(): 22 | def ProcessFeatures(self, epochData, sr, epochNum): # Every custom class requires a function with this name and structure to extract the featur data and epochData is always [Samples, Channels] 23 | print(epochData.shape) 24 | 25 | rmsCh1 = np.sqrt(np.mean(np.array(epochData[:,0])**2)) 26 | rangeCh1 = max(epochData[:,0])-min(epochData[:,0]) 27 | varCh1 = np.var(epochData[:,0]) 28 | meanAbsCh1 = np.mean(np.abs(epochData[:,0])) 29 | zeroCrossCh1 = ((epochData[:,0][:-1] * epochData[:,0][1:]) < 0).sum() 30 | fft_result = fft(epochData[:,0]) 31 | frequencies = np.fft.fftfreq(len(epochData[:,0]), 1/ 192) # approximate sample rate 32 | 33 | delta_mask = (frequencies >= 0.5) & (frequencies <= 2) 34 | delta_power = np.mean(np.abs(fft_result[delta_mask])**2) 35 | delta2_mask = (frequencies >= 2) & (frequencies <= 4) 36 | delta2_power = np.mean(np.abs(fft_result[delta2_mask])**2) 37 | 38 | theta_mask = (frequencies >= 4) & (frequencies <= 7) 39 | theta_power = np.mean(np.abs(fft_result[theta_mask])**2) 40 | 41 | alpha_mask = (frequencies >= 7) & (frequencies <= 10) 42 | alpha_power = np.mean(np.abs(fft_result[alpha_mask])**2) 43 | 44 | beta_mask = (frequencies >= 10) & (frequencies <= 15) 45 | beta_power = np.mean(np.abs(fft_result[beta_mask])**2) 46 | beta2_mask = (frequencies >= 15) & (frequencies <= 20) 47 | beta2_power = np.mean(np.abs(fft_result[beta2_mask])**2) 48 | 49 | gamma_mask = (frequencies >= 20) & (frequencies <= 25) 50 | gamma_power = np.mean(np.abs(fft_result[gamma_mask])**2) 51 | 52 | a = np.array([rmsCh1, varCh1,rangeCh1, meanAbsCh1, zeroCrossCh1, max(epochData[:,0]), min(epochData[:,0]), 53 | alpha_power, delta_power,delta2_power, theta_power, beta_power,beta2_power, gamma_power]).T 54 | 55 | return np.nan_to_num(a) 56 | #[rmsCh1, rmsCh2,varCh1,varCh2,rangeCh1, rangeCh2, meanAbsCh1, meanAbsCh2, zeroCrossCh1,zeroCrossCh2]) 57 | 58 | streamCustomFeatureExtract = {"ArduinoHandData":EMGClassifier()} 59 | dataStreams = ["ArduinoHandData"] 60 | bci = PyBCI(minimumEpochsRequired = 6, loggingLevel= Logger.INFO, globalEpochSettings=gs,dataStreams=dataStreams, markerStream="TestMarkers", streamCustomFeatureExtract=streamCustomFeatureExtract)#, loggingLevel = Logger.NONE) 61 | while not bci.connected: # check to see if lsl marker and datastream are available 62 | bci.Connect() 63 | time.sleep(1) 64 | 65 | #ser.write(b'0\r') # Convert the string to bytes and send 66 | 67 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 68 | accuracy = 0 69 | try: 70 | while(True): 71 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 72 | time.sleep(0.2) # wait for marker updates 73 | print("Markers received: " + str(currentMarkers) +" Class accuracy: " + str(accuracy), end="\r") 74 | if len(currentMarkers) > 1: # check there is more then one marker type received 75 | #print(bci.CurrentFeaturesTargets()) 76 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 77 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 78 | accuracy = classInfo["accuracy"] 79 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+30: 80 | #time.sleep(2) 81 | bci.TestMode() 82 | break 83 | while True: 84 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 85 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 86 | print("Current marker estimation: " + str(guess), end="\r") 87 | time.sleep(0.2) 88 | if len(guess)>0: 89 | outlet.push_sample([str(markerGuess)]) 90 | if guess[0] == "open": 91 | #print("sending 0") 92 | outlet.push_sample(["0\r"]) 93 | #ser.write(b"0\r") # Convert the string to bytes and send 94 | elif guess[0] == "fist": 95 | outlet.push_sample(["1\r"]) 96 | #ser.write(b"1\r") # Convert the string to bytes and send 97 | elif guess[0] == "rock": 98 | outlet.push_sample(["2\r"]) 99 | #ser.write(b"2\r") # Convert the string to bytes and send 100 | elif guess[0] == "peace": 101 | outlet.push_sample(["3\r"]) 102 | #ser.write(b"3\r") # Convert the string to bytes and send 103 | elif guess[0] == "pinky": 104 | outlet.push_sample(["4\r"]) 105 | #ser.write(b"4\r") # Convert the string to bytes and send 106 | elif guess[0] == "thumb": 107 | outlet.push_sample(["5\r"]) 108 | #ser.write(b"5\r") # Convert the string to bytes and send 109 | 110 | #["open", "fist", "rock"]#, "peace", "pinky"] 111 | except KeyboardInterrupt: # allow user to break while loop 112 | pass -------------------------------------------------------------------------------- /pybci/Examples/ArduinoHandGrasp/testArduinoPytorch.py: -------------------------------------------------------------------------------- 1 | import time 2 | import torch 3 | from torch.utils.data import DataLoader, TensorDataset 4 | from torch import nn 5 | from pybci import PyBCI 6 | from pybci.Configuration.EpochSettings import GlobalEpochSettings 7 | from pybci.Utils.Logger import Logger 8 | import numpy as np 9 | # We control the arduino via lsl python script which is responsible for COM connection 10 | from pylsl import StreamInfo, StreamOutlet 11 | # Set up the LSL stream info 12 | info = StreamInfo('MarkerHandGrasps', 'Markers', 1, 0, 'string', 'myuniquemarkerid2023') 13 | # Create the outlet 14 | outlet = StreamOutlet(info) 15 | gs = GlobalEpochSettings() 16 | gs.tmax = 2.5 # grab 1 second after marker 17 | gs.tmin = -0.5 # grab 0.5 seconds after marker 18 | gs.splitCheck = True # splits samples between tmin and tmax 19 | gs.windowLength = 1 # 20 | gs.windowOverlap = 0.5 # windows overap by 50%, so for a total len 21 | 22 | num_chs = 1 # 8 channels re created in the PsuedoLSLGwnerator, but we drop 7 as time series is computationally expensive! 23 | num_samps = 192 # sample rate is 250 in the PsuedoLSLGwnerator 24 | num_classes = 3 # number of different triggers (can include baseline) sent, defines if we use softmax of binary 25 | 26 | class LSTMNet(nn.Module): 27 | def __init__(self, input_dim, hidden_dim, num_layers, num_classes): 28 | super(LSTMNet, self).__init__() 29 | self.hidden_dim = hidden_dim 30 | self.num_layers = num_layers 31 | # LSTM layer 32 | self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True) 33 | # Fully connected layer 34 | self.fc = nn.Linear(hidden_dim, num_classes) 35 | 36 | def forward(self, x): 37 | # Initialize hidden and cell states 38 | h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(x.device) 39 | c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(x.device) 40 | # LSTM forward pass 41 | out, _ = self.lstm(x, (h0, c0)) 42 | # Only take the output from the final timestep 43 | out = out[:, -1, :] 44 | # Pass through the fully connected layer 45 | out = self.fc(out) 46 | return out 47 | 48 | def PyTorchModel(x_train, x_test, y_train, y_test ): 49 | # Define the hyperparameters 50 | input_dim = num_chs 51 | hidden_dim = 128 52 | num_layers = 2 53 | model = LSTMNet(input_dim, hidden_dim, num_layers, num_classes) 54 | model.train() 55 | criterion = nn.CrossEntropyLoss() 56 | optimizer = torch.optim.Adam(model.parameters(), lr=0.01) 57 | epochs = 10 58 | # Reshape the input data to be [batch, sequence, feature] 59 | x_train = x_train.reshape(-1, num_samps, input_dim) 60 | x_test = x_test.reshape(-1, num_samps, input_dim) 61 | print(x_train.shape) 62 | print(y_train.shape) 63 | train_data = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train).long()) 64 | train_loader = DataLoader(dataset=train_data, batch_size=8, shuffle=True) 65 | for epoch in range(epochs): 66 | for inputs, labels in train_loader: 67 | optimizer.zero_grad() 68 | outputs = model(inputs) 69 | loss = criterion(outputs, labels) 70 | loss.backward() 71 | optimizer.step() 72 | model.eval() 73 | accuracy = 0 74 | with torch.no_grad(): 75 | test_outputs = model(torch.Tensor(x_test)) 76 | _, predicted = torch.max(test_outputs.data, 1) 77 | correct = (predicted == torch.Tensor(y_test).long()).sum().item() 78 | accuracy = correct / len(y_test) 79 | return accuracy, model 80 | 81 | class RawDecode(): 82 | desired_length = num_samps 83 | def ProcessFeatures(self, d, sr, target): 84 | #print(epochData.T.shape) 85 | #d = epochData #.T 86 | print("rawdecode pre shape: ",d.shape) 87 | #if self.desired_length == 0: # needed as windows may be differing sizes due to timestamp varience on LSL 88 | # self.desired_length = d.shape[1] 89 | if d.shape[0] != self.desired_length: 90 | #for ch in range(d.shape[0]): 91 | d = np.resize(d, (self.desired_length, 1)) 92 | #print("rawdecode shape: ", d.shape) 93 | return d # we tranposeas using forloop for standardscalar normalises based on [channel,feature], whereas pull_chunk is [sample, channel] 94 | # for time series data we want to normalise each channel relative to itself 95 | 96 | streamCustomFeatureExtract = {"ArduinoHandData":RawDecode()} 97 | dataStreams = ["ArduinoHandData"] 98 | bci = PyBCI(minimumEpochsRequired = 6, loggingLevel= Logger.INFO,torchModel=PyTorchModel, globalEpochSettings=gs,dataStreams=dataStreams, markerStream="TestMarkers", streamCustomFeatureExtract=streamCustomFeatureExtract)#, loggingLevel = Logger.NONE) 99 | while not bci.connected: # check to see if lsl marker and datastream are available 100 | bci.Connect() 101 | time.sleep(1) 102 | 103 | #ser.write(b'0\r') # Convert the string to bytes and send 104 | 105 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 106 | accuracy = 0 107 | try: 108 | while(True): 109 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 110 | time.sleep(0.1) # wait for marker updates 111 | print("Markers received: " + str(currentMarkers) +" Class accuracy: " + str(accuracy), end="\r") 112 | if len(currentMarkers) > 1: # check there is more then one marker type received 113 | #print(bci.CurrentFeaturesTargets()) 114 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 115 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 116 | accuracy = classInfo["accuracy"] 117 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+20: 118 | #time.sleep(2) 119 | bci.TestMode() 120 | break 121 | while True: 122 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 123 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 124 | print("Current marker estimation: " + str(guess), end="\r") 125 | time.sleep(0.5) 126 | if len(guess)>0: 127 | #outlet.push_sample([str(markerGuess)]) 128 | if guess[0] == "open": 129 | #print("sending 0") 130 | outlet.push_sample(["0\r"]) 131 | #ser.write(b"0\r") # Convert the string to bytes and send 132 | elif guess[0] == "fist": 133 | outlet.push_sample(["1\r"]) 134 | #ser.write(b"1\r") # Convert the string to bytes and send 135 | elif guess[0] == "rock": 136 | outlet.push_sample(["2\r"]) 137 | #ser.write(b"2\r") # Convert the string to bytes and send 138 | elif guess[0] == "peace": 139 | outlet.push_sample(["3\r"]) 140 | #ser.write(b"3\r") # Convert the string to bytes and send 141 | elif guess[0] == "pinky": 142 | outlet.push_sample(["4\r"]) 143 | #ser.write(b"4\r") # Convert the string to bytes and send 144 | elif guess[0] == "thumb": 145 | outlet.push_sample(["5\r"]) 146 | #ser.write(b"5\r") # Convert the string to bytes and send 147 | 148 | #["open", "fist", "rock"]#, "peace", "pinky"] 149 | except KeyboardInterrupt: # allow user to break while loop 150 | pass -------------------------------------------------------------------------------- /pybci/Examples/MultimodalPupilLabsEEG/testMultimodal.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | import numpy as np 4 | from scipy.fft import fft 5 | 6 | class PupilGazeDecode(): 7 | # pupil-labs channels: 8 | #['confidence', 1 9 | # 'norm_pos_x', 'norm_pos_y', 'gaze_point_3d_x', 'gaze_point_3d_y', 'gaze_point_3d_z', + 5 10 | # 'eye_center0_3d_x', 'eye_center0_3d_y', 'eye_center0_3d_z', 'eye_center1_3d_x', 'eye_center1_3d_y', + 5 11 | # 'eye_center1_3d_z', # 'gaze_normal0_x', 'gaze_normal0_y', 'gaze_normal0_z', 'gaze_normal1_x', + 5 12 | # 'gaze_normal1_y', 'gaze_normal1_z', 'diameter0_2d', 'diameter1_2d', 'diameter0_3d', 'diameter1_3d'] + 6 = 22 channels 13 | def __init__(self): 14 | super().__init__() 15 | def ProcessFeatures(self, epochData, sr, epochNum): # This is the required function name and variables that are passed to all 16 | epochData = np.nan_to_num(epochData) # sklearn doesnt like nan 17 | #print(epochData.shape) 18 | if len(epochData[0]) == 0: 19 | return [0,0,0] 20 | else: 21 | confidence = np.mean(epochData[0]) # channel 21 is 3d pupil diameter right, get mean 22 | rightmean = np.mean(epochData[1]) # channel 20 is 3d pupil diameter right, get mean 23 | leftmean = np.mean(epochData[2]) # channel 21 is 3d pupil diameter right, get mean 24 | bothmean = np.mean([(epochData[1][i] + epochData[2][i]) / 2 for i in range(len(epochData[1]))]) # mean of both eyes in 3d 25 | return np.nan_to_num([confidence, rightmean,leftmean,bothmean]) # expects 2d 26 | 27 | 28 | class EOGClassifier(): 29 | # used Fp1 and Fp2 from io:bio EEG device 30 | def ProcessFeatures(self, epochData, sr, epochNum): # Every custom class requires a function with this name and structure to extract the featur data and epochData is always [Samples, Channels] 31 | #print(epochData.shape) 32 | rmsCh1 = np.sqrt(np.mean(np.array(epochData[:,0])**2)) 33 | rangeCh1 = max(epochData[:,0])-min(epochData[:,0]) 34 | varCh1 = np.var(epochData[:,0]) 35 | meanAbsCh1 = np.mean(np.abs(epochData[:,0])) 36 | zeroCrossCh1 = ((epochData[:,0][:-1] * epochData[:,0][1:]) < 0).sum() 37 | fft_result = fft(epochData[:,0]) 38 | frequencies = np.fft.fftfreq(len(epochData[:,0]), 1/ sr) 39 | delta_mask = (frequencies >= 0.5) & (frequencies <= 2) 40 | delta_power = np.mean(np.abs(fft_result[delta_mask])**2) 41 | delta2_mask = (frequencies >= 2) & (frequencies <= 4) 42 | delta2_power = np.mean(np.abs(fft_result[delta2_mask])**2) 43 | theta_mask = (frequencies >= 4) & (frequencies <= 7) 44 | theta_power = np.mean(np.abs(fft_result[theta_mask])**2) 45 | alpha_mask = (frequencies >= 7) & (frequencies <= 10) 46 | alpha_power = np.mean(np.abs(fft_result[alpha_mask])**2) 47 | beta_mask = (frequencies >= 10) & (frequencies <= 15) 48 | beta_power = np.mean(np.abs(fft_result[beta_mask])**2) 49 | beta2_mask = (frequencies >= 15) & (frequencies <= 20) 50 | beta2_power = np.mean(np.abs(fft_result[beta2_mask])**2) 51 | gamma_mask = (frequencies >= 20) & (frequencies <= 25) 52 | gamma_power = np.mean(np.abs(fft_result[gamma_mask])**2) 53 | a = np.array([rmsCh1, varCh1,rangeCh1, meanAbsCh1, zeroCrossCh1, max(epochData[:,0]), min(epochData[:,0]), 54 | alpha_power, delta_power,delta2_power, theta_power, beta_power,beta2_power, gamma_power]).T 55 | return np.nan_to_num(a) 56 | 57 | hullUniEEGLSLStreamName = "EEGStream"#EEGStream" 58 | pupilLabsLSLName = "pupil_capture" 59 | markerstream = "TestMarkers" # using pupillabs rightleftmarkers example 60 | streamCustomFeatureExtract = {pupilLabsLSLName: PupilGazeDecode(), hullUniEEGLSLStreamName: EOGClassifier()} #GenericFeatureExtractor 61 | dataStreamNames = [pupilLabsLSLName, hullUniEEGLSLStreamName] 62 | # to reduce overall computational complexity we are going to drop irrelevant channels 63 | streamChsDropDict = {hullUniEEGLSLStreamName : [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,19,20,21,22,23],#0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,19,20,21,22,23], # for our device we have Fp1 and Fp2 on channels 18 and 19, so list values 17 and 18 removed 64 | pupilLabsLSLName: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17, 20, 21] # pupil labs we only wan left and right 3d pupil diameter, drop rest 65 | } 66 | bci = PyBCI(dataStreams = dataStreamNames, markerStream=markerstream, minimumEpochsRequired = 4, 67 | streamChsDropDict = streamChsDropDict, 68 | streamCustomFeatureExtract=streamCustomFeatureExtract ) #model = model, 69 | 70 | while not bci.connected: 71 | bci.Connect() 72 | time.sleep(1) 73 | print(bci.markerStream.info().name()) 74 | bci.TrainMode() 75 | accuracy = 0 76 | try: 77 | while(True): 78 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 79 | time.sleep(1) # wait for marker updates 80 | print("Markers received: " + str(currentMarkers) +" Class accuracy: " + str(accuracy))#, end="\r") 81 | if len(currentMarkers) > 1: # check there is more then one marker type received 82 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 83 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 84 | accuracy = classInfo["accuracy"] 85 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+6: 86 | bci.TestMode() 87 | break 88 | while True: 89 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 90 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 91 | print("Current marker estimation: " + str(guess), end="\r") 92 | time.sleep(0.5) 93 | except KeyboardInterrupt: # allow user to break while loop 94 | pass 95 | -------------------------------------------------------------------------------- /pybci/Examples/PupilLabsRightLeftEyeClose/README.md: -------------------------------------------------------------------------------- 1 | # Pupil Labs Right Left Eye Close Example 2 | 3 | The Pupil Labs Right Left Eye Close Example in [bciGazeExample.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/PupilLabsRightLeftEyeClose/bciGazeExample.py) illustrates how a 'simple' custom pupil-labs feature extractor class can be passed for the gaze data, where the mean pupil diameter is taken for each eye and both eyes and used as feature data, where nans for no confidence are set to a value of 0. 4 | 5 | The [RightLeftMarkers.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/PupilLabsRightLeftEyeClose/RightLeftMarkers.py) script generates LSLMarkers and an onscreen stimulus in pythons built-in tkinter to inform the user when to shut what eye for training the bci. 6 | 7 | It's advised to have a play with the custom decoding class PupilGazeDecode() in [bciGazeExample.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/PupilLabsRightLeftEyeClose/bciGazeExample.py) and see if other filtering/feature extraction methods can be used to improve the classifier. 8 | 9 | An example video using the multimodal example (pupil labs with Fp1 and Fp2 from EEG) can be found here: 10 | 11 | [![PyBCI Multi-modal demo!](http://i3.ytimg.com/vi/SSmFU_Esayg/hqdefault.jpg)](https://www.youtube.com/watch?v=SSmFU_Esayg) 12 | -------------------------------------------------------------------------------- /pybci/Examples/PupilLabsRightLeftEyeClose/RightLeftMarkers.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | import pylsl 3 | 4 | # customisable variables! 5 | stimuli = ["Both Open", "Right Open", "Left Open"] 6 | stimuliTime = [3000, 3000, 3000] 7 | stimuliCount = [8, 8, 8] 8 | 9 | markerStreamName = "TestMarkers" # should be targetted with pybci 10 | streamType = 'Markers' 11 | 12 | class App: 13 | def __init__(self, root): 14 | markerInfo = pylsl.StreamInfo(markerStreamName, streamType, 1, 0, 'string', 'Dev') # creates lsl marker info 15 | self.markerOutlet = pylsl.StreamOutlet(markerInfo) # creates lsl marker outlet from marker info 16 | self.originalstimuliCount = stimuliCount 17 | self.root = root 18 | self.root.state("zoomed") # Maximize the window 19 | self.root.grid_rowconfigure(0, weight=1) # Configure row 0 to expand vertically 20 | self.root.grid_columnconfigure(0, weight=1) # Configure column 0 to expand horizontally 21 | self.root.grid_columnconfigure(1, weight=1) # Configure column 1 to expand horizontally 22 | 23 | self.label = tk.Label(root, text="", font=("Helvetica", 24)) 24 | self.label.grid(row=0, column=0, columnspan=2, padx=20, pady=20, sticky="nsew") 25 | 26 | self.button = tk.Button(root, text="Start", command=self.toggle_iteration, font=("Helvetica", 18)) 27 | self.button.grid(row=1, column=0,columnspan=2, padx=20, pady=20, sticky="nsew") 28 | #self.custom_button = tk.Button(root, text="Start Testing", command=self.custom_function, font=("Helvetica", 18)) 29 | #self.custom_button.grid(row=1, column=1, padx=20, pady=20, sticky="nsew") 30 | self.close_button = tk.Button(root, text="Close", command=self.root.destroy, font=("Helvetica", 18)) 31 | self.close_button.grid(row=2, column=0, columnspan=2, padx=20, pady=20, sticky="nsew") 32 | 33 | self.index = 0 34 | self.iterating = False # Variable to track the iteration state 35 | self.after_id = None # Variable to store the after() call ID 36 | 37 | def toggle_iteration(self): 38 | if not self.iterating: 39 | self.iterating = True 40 | self.button.configure(text="Stop") 41 | self.next_stimulus() 42 | else: 43 | self.iterating = False 44 | self.button.configure(text="Start") 45 | if self.after_id is not None: 46 | self.root.after_cancel(self.after_id) 47 | self.after_id = None 48 | 49 | def next_stimulus(self): 50 | if self.iterating: 51 | if len(stimuli) == 0: 52 | self.label['text'] = "Finished" 53 | else: 54 | self.label['text'] = stimuli[self.index] 55 | self.markerOutlet.push_sample([stimuli[self.index]]) 56 | print("sent marker") 57 | self.after_id = self.root.after(stimuliTime[self.index], self.next_stimulus) 58 | stimuliCount[self.index] -= 1 59 | if stimuliCount[self.index] == 0: 60 | self.remove_stimulus(self.index) 61 | else: 62 | self.index = (self.index + 1) % len(stimuli) # Increment index and wrap around when it reaches the end 63 | 64 | def remove_stimulus(self, index): 65 | del stimuli[index] 66 | del stimuliTime[index] 67 | del stimuliCount[index] 68 | if len(stimuli) == 0: 69 | self.iterating = False 70 | self.button.configure(text="Start") 71 | if self.after_id is not None: 72 | self.root.after_cancel(self.after_id) 73 | self.after_id = None 74 | 75 | #def custom_function(self): 76 | # # Define your custom function here 77 | # print("Custom function called") 78 | 79 | root = tk.Tk() 80 | app = App(root) 81 | root.mainloop() 82 | -------------------------------------------------------------------------------- /pybci/Examples/PupilLabsRightLeftEyeClose/bciGazeExample.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | import numpy as np 4 | # pupil-labs channels: 5 | #['confidence', 1 6 | # 'norm_pos_x', 'norm_pos_y', 'gaze_point_3d_x', 'gaze_point_3d_y', 'gaze_point_3d_z', + 5 7 | # 'eye_center0_3d_x', 'eye_center0_3d_y', 'eye_center0_3d_z', 'eye_center1_3d_x', 'eye_center1_3d_y', + 5 8 | # 'eye_center1_3d_z', # 'gaze_normal0_x', 'gaze_normal0_y', 'gaze_normal0_z', 'gaze_normal1_x', + 5 9 | # 'gaze_normal1_y', 'gaze_normal1_z', 'diameter0_2d', 'diameter1_2d', 'diameter0_3d', 'diameter1_3d'] + 6 = 22 channels 10 | class PupilGazeDecode(): 11 | def __init__(self): 12 | super().__init__() 13 | def ProcessFeatures(self, epochData, sr, epochNum): # This is the required function name and variables that are passed to all 14 | #print(epochData.shape) 15 | epochData = np.nan_to_num(epochData) # sklearn doesnt like nan 16 | if epochData.shape[0] == 0: 17 | return np.array([0,0,0]) 18 | else: 19 | rightmean = np.mean(epochData[:,20]) # channel 20 is 3d pupil diameter right, get mean 20 | leftmean = np.mean(epochData[:,21]) # channel 21 is 3d pupil diameter right, get mean 21 | bothmean = np.mean([(epochData[:,20][i] + epochData[:,21][i]) / 2 for i in range(len(epochData[:,20]))]) # mean of both eyes in 3d 22 | #print(np.nan_to_num([rightmean,leftmean,bothmean])) 23 | return np.nan_to_num([rightmean,leftmean,bothmean]) # expects 1d 24 | 25 | streamCustomFeatureExtract = {"pupil_capture" : PupilGazeDecode()} 26 | dataStreamName = ["pupil_capture"] 27 | # Can drop unused channels to save some CPU time, 28 | # if finding performance issues or markers not received consider add to streamChsDropDict = {"pupil_capture" = range(20)} to PyBCI intiialise, 29 | # then changeephcData[20] and [21] to 0 and 1 in PupilGazeDecode() 30 | bci = PyBCI(dataStreams = dataStreamName,markerStream="TestMarkers", minimumEpochsRequired = 4, streamCustomFeatureExtract=streamCustomFeatureExtract) 31 | while not bci.connected: # check to see if lsl marker and datastream are available 32 | bci.Connect() 33 | time.sleep(1) 34 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 35 | accuracy = 0 36 | try: 37 | while(True): 38 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 39 | time.sleep(1) # wait for marker updates 40 | print("Markers received: " + str(currentMarkers) +" Class accuracy: " + str(accuracy), end="\r") 41 | if len(currentMarkers) > 1: # check there is more then one marker type received 42 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 43 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 44 | accuracy = classInfo["accuracy"] 45 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+15: 46 | bci.TestMode() 47 | break 48 | while True: 49 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 50 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 51 | print("Current marker estimation: " + str(guess), end="\r") 52 | time.sleep(0.5) 53 | except KeyboardInterrupt: # allow user to break while loop 54 | pass 55 | -------------------------------------------------------------------------------- /pybci/Examples/README.md: -------------------------------------------------------------------------------- 1 | # PyBCI Examples 2 | 3 | This folder holds multiple scripts illustrating the functions and configurations available within the PyBCI package. 4 | 5 | NOTE: The examples have shields describing whether they work with PyBCI's pseudoDevice class and what additional external hardware is required. If using your own LSL-capable hardware and marker stream set `createPseudoDevice=False` or optionally pass `True` or `False` as an arguement to each script. 6 | 7 | PyBCI requires an LSL marker stream for defining when time series data should be attributed to an action/marker/epoch and an LSL data stream to create time-series data. 8 | 9 | If the user has no available LSL hardware to hand they can set `createPseudoDevice=True` when instantiating the PyBCI object to enable a pseudo LSL data stream to generate time-series data and LSL marker stream for epoching the data. More information on PyBCI's Pseudo Device class can be found here: :ref:`what-pseudo-device`. 10 | 11 | The (example scripts)[https://pybci.readthedocs.io/en/latest/BackgroundInformation/Examples.html] illustrate various applied ML libraries (SKLearn, Tensorflow, PyTorch) or provide examples of how to integrate LSL hardware. 12 | 13 | The code snippet can be used below to run a simple classification task using the Pseudo Device, alternatively call pybci in the command line to get a list of CLI commands and tests: 14 | 15 | 16 | | Example File | Description | 17 | |--------------|-------------| 18 | | [ArduinoHandGrasp/](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/ArduinoHandGrasp/)
![pseudo device not available shield](https://img.shields.io/badge/Pseudo_Device-Not_Available-blue) ![arduino required shield](https://img.shields.io/badge/Arduino-Required-blue) ![Myoware required shield](https://img.shields.io/badge/Myoware_Muscle_Sensor-Required-blue) | Folder contains an LSL marker creator in [MarkerMaker.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/ArduinoHandGrasp/MarkerMaker.py) using PyQt5 as an on screen text stimulus, illustrates how LSL markers can be used to train. [ServoControl.ino](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/ArduinoHandGrasp/ServoControl/ServoControl.ino) is designed for an arduino uno which controls 5 servo motors, each of which control the position of an indidividual finger for a 3D printed hand which can be controlled via serial commands. There is also a [Myoware Muscle Sensor](https://myoware.com/products/muscle-sensor/) attached to analog pin A0 being read continuously over the serial connection. [ArduinoToLSL.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/ArduinoHandGrasp/ArduinoToLSL.py) is used to send and receive serial data to and from the arduino, whilst pushing the A0 data to an LSL outlet which is classified in [testArduinoHand.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/ArduinoHandGrasp/testArduinoHand.py), whilst simultaneously receiving a marker stream from testArduinoHand.py to inform which hand position to do.| 19 | | [MultimodalPupilLabsEEG/](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/MultimodalPupilLabsEEG)
![pseudo device not available shield](https://img.shields.io/badge/Pseudo_Device-Not_Available-blue) ![pupil required shield](https://img.shields.io/badge/Pupil_Labs_Hardware-Required-blue) ![iobio EEG device required shield](https://img.shields.io/badge/ioBio_EEG_Device-Required-blue) | Advanced example illustrating two devices, pupil labs gaze device stream wth custom feature extractor class and Hull University ioBio EEG device, specifically channels Fp1 and Fp2. The PupilLabsRightLeftEyeClose folder has a youtube video illustrating the multimodal example in action. | 20 | | [PupilLabsRightLeftEyeClose/](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/PupilLabsRightLeftEyeClose/)
![pseudo device not available shield](https://img.shields.io/badge/Pseudo_Device-Not_Available-blue) ![pupil required shield](https://img.shields.io/badge/Pupil_Labs_Hardware-Required-blue) | Folder contains example basic pupil labs example as LSL input device, classifying left and right eye closed with a custom extractor class. RightLeftMarkers.py uses tkinter to generate visual on-screen stimuli for only right, left or both eyes open, sends same onscreen stimuli as LSL markers, ideal for testing pupil-labs eyes classifier test. bciGazeExample.py Illustrates how a 'simple' custom pupil-labs feature extractor class can be passed for the gaze data, where the mean pupil diameter is taken for each eye and both eyes and used as feature data, where nans for no confidence are set to a value of 0. | 21 | | [testEpochTimingsConfig.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/testEpochTimingsConfig.py)
![pseudo device available shield](https://img.shields.io/badge/Pseudo_Device-Available-blue) | Simple example showing custom global epoch settings changed on initialisation. Instead of epoching data from 0 to 1 second after the marker we take it from 0.5 seconds before to 0.5 seconds after the marker. | 22 | | [testPyTorch.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/testPyTorch.py)
![pseudo device available shield](https://img.shields.io/badge/Pseudo_Device-Available-blue) | Provides an example of how to use a Pytorch Neural net Model as the classifier. (testRaw.py also has a Pytorch example with a C-NN). | 23 | | [testRaw.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/testRaw.py)
![pseudo device available shield](https://img.shields.io/badge/Pseudo_Device-Available-blue) | This example shows how raw time series across multiple channels can be used as an input by utilising a custom feature extractor class, combined with a custom C-NN Pytorch model when initialising PyBCI. The raw data from the data receiver thread comes in the form [samples, channels], the data receiver threads slice data based on relative timestamps meaning depending on the devices frequency for pushing LSL chunks can vary the number of samples received in the buffer for each window, to mitigate this a desired length is set and data should be trimmed based on the expected data for the created model. Multiple channels are also dropped (with the PsuedoLSLSreamGenerator in mind) to save computational complexity as raw time series over large windows can give a lot of parameters for the neural net to train.| 24 | | [testSimple.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/testSimple.py)
![pseudo device available shield](https://img.shields.io/badge/Pseudo_Device-Available-blue) | Provides the simplest setup, where no specific streams or epoch settings are given, all default to sklearn SVM classifier and GeneralEpochSettings. | 25 | | [testSklearn.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/testSklearn.py)
![pseudo device available shield](https://img.shields.io/badge/Pseudo_Device-Available-blue) | Similar to testSimple.py, but allows a custom sklearn classifier to be used. It sets individual time windows and configures data stream channels, epoch window settings, and machine learning settings before connecting to BCI and switching between training and test modes. | 26 | | [testTensorflow.py](https://github.com/LMBooth/pybci/blob/main/pybci/Examples/testTensorflow.py)
![pseudo device available shield](https://img.shields.io/badge/Pseudo_Device-Available-blue) | Similar to testSimple.py, but allows for a custom TensorFlow model to be used. It establishes a connection to BCI, starts training on received epochs, checks the classifier's accuracy, and then switches to test mode to predict the current marker. | 27 | 28 | -------------------------------------------------------------------------------- /pybci/Examples/separatePseudoDevice.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci.Utils.PseudoDevice import PseudoDeviceController 3 | 4 | # Initialize pd globally 5 | pd = None 6 | 7 | def run_pseudo(): 8 | global pd 9 | pd = PseudoDeviceController(execution_mode="process") 10 | pd.BeginStreaming() 11 | 12 | if __name__ == '__main__': 13 | try: 14 | run_pseudo() 15 | while True: # Loop indefinitely 16 | time.sleep(0.5) # Sleep to prevent this loop from consuming too much CPU 17 | except KeyboardInterrupt: 18 | # Safely handle pd to ensure it's not None and StopStreaming method is available 19 | if pd and hasattr(pd, 'StopStreaming'): 20 | pd.StopStreaming() 21 | print("KeyboardInterrupt has been caught. Stopping the script.") 22 | -------------------------------------------------------------------------------- /pybci/Examples/testEpochTimingsConfig.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | from pybci.Configuration.EpochSettings import GlobalEpochSettings 4 | 5 | gs = GlobalEpochSettings() 6 | gs.tmax = 1 # grab 1 second after marker 7 | gs.tmin = 0 # grab 0 seconds before marker 8 | gs.splitCheck = True # splits samples between tmin and tmax 9 | gs.windowLength = 0.5 # 10 | gs.windowOverlap = 0.5 # windows overap by 50%, so for a total len 11 | 12 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 13 | bci = PyBCI(minimumEpochsRequired = 4, createPseudoDevice=True, globalEpochSettings=gs, loggingLevel = "TIMING") 14 | while not bci.connected: # check to see if lsl marker and datastream are available 15 | bci.Connect() 16 | time.sleep(1) 17 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 18 | accuracy = 0 19 | try: 20 | while(True): 21 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 22 | time.sleep(0.5) # wait for marker updates 23 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 24 | if len(currentMarkers) > 1: # check there is more then one marker type received 25 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 26 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 27 | accuracy = classInfo["accuracy"] 28 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 29 | bci.TestMode() 30 | break 31 | while True: 32 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 33 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 34 | print("Current marker estimation: " + str(guess), end=" \r") 35 | time.sleep(0.2) 36 | except KeyboardInterrupt: # allow user to break while loop 37 | print("\nLoop interrupted by user.") 38 | bci.StopThreads() 39 | -------------------------------------------------------------------------------- /pybci/Examples/testPyTorch.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | import torch 4 | from torch.utils.data import DataLoader, TensorDataset 5 | from torch import nn 6 | 7 | class SimpleNN(nn.Module): 8 | def __init__(self, input_size, hidden_size, num_classes): 9 | super(SimpleNN, self).__init__() 10 | self.fc1 = nn.Linear(input_size, hidden_size) 11 | self.bn1 = nn.BatchNorm1d(hidden_size) 12 | self.relu = nn.ReLU(inplace=True) # In-place operation 13 | self.fc2 = nn.Linear(hidden_size, hidden_size) 14 | self.bn2 = nn.BatchNorm1d(hidden_size) 15 | self.fc3 = nn.Linear(hidden_size, num_classes) 16 | 17 | def forward(self, x): 18 | out = self.fc1(x) 19 | if out.shape[0] > 1: # Skip BatchNorm if batch size is 1 20 | out = self.bn1(out) 21 | out = self.relu(out) 22 | out = self.fc2(out) 23 | if out.shape[0] > 1: # Skip BatchNorm if batch size is 1 24 | out = self.bn2(out) 25 | out = self.relu(out) 26 | out = self.fc3(out) 27 | return out 28 | def PyTorchModel(x_train, x_test, y_train, y_test): 29 | input_size = 2*8 # num of channels multipled by number of default features (rms and mean freq) 30 | hidden_size = 100 31 | num_classes = 4 # default in pseudodevice 32 | model = SimpleNN(input_size, hidden_size, num_classes) 33 | model.train() 34 | criterion = nn.CrossEntropyLoss() 35 | optimizer = torch.optim.Adam(model.parameters(), lr=0.001) 36 | epochs = 10 37 | train_data = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train).long()) 38 | train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True, drop_last=True) # Drop last incomplete batch 39 | for epoch in range(epochs): 40 | for inputs, labels in train_loader: 41 | optimizer.zero_grad() 42 | outputs = model(inputs) 43 | loss = criterion(outputs, labels) 44 | loss.backward() 45 | optimizer.step() 46 | model.eval() 47 | accuracy = 0 48 | with torch.no_grad(): 49 | test_outputs = model(torch.Tensor(x_test)) 50 | _, predicted = torch.max(test_outputs.data, 1) 51 | correct = (predicted == torch.Tensor(y_test).long()).sum().item() 52 | accuracy = correct / len(y_test) 53 | return accuracy, model 54 | 55 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 56 | bci = PyBCI(minimumEpochsRequired = 4, createPseudoDevice=True, torchModel = PyTorchModel) 57 | while not bci.connected: # check to see if lsl marker and datastream are available 58 | bci.Connect() 59 | time.sleep(1) 60 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 61 | accuracy = 0 62 | try: 63 | while(True): 64 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 65 | time.sleep(0.5) # wait for marker updates 66 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 67 | if len(currentMarkers) > 1: # check there is more then one marker type received 68 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 69 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 70 | accuracy = classInfo["accuracy"] 71 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 72 | bci.TestMode() 73 | break 74 | while True: 75 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 76 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 77 | print("Current marker estimation: " + str(guess), end=" \r") 78 | time.sleep(0.2) 79 | except KeyboardInterrupt: # allow user to break while loop 80 | print("\nLoop interrupted by user.") 81 | bci.StopThreads() 82 | -------------------------------------------------------------------------------- /pybci/Examples/testRaw.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import time 3 | from torch.utils.data import DataLoader, TensorDataset 4 | from torch import nn 5 | from pybci import PyBCI 6 | import numpy as np 7 | 8 | num_chs = 3 # 8 channels are created in the PseudoLSLGenerator, but we drop 5 to save compute (real-time CNN can be computationally heavy!) 9 | sum_samps = 125 # sample rate is 250 in the PseudoLSLGwnerator 10 | num_classes = 4 # number of different triggers (can include baseline) sent, defines if we use softmax of binary 11 | class ConvNet(nn.Module): 12 | def __init__(self, num_channels, num_samples, num_classes): 13 | super(ConvNet, self).__init__() 14 | self.conv1 = nn.Conv1d(num_channels, 64, kernel_size=5, stride=1, padding=2) 15 | self.relu = nn.ReLU() 16 | self.pool = nn.MaxPool1d(kernel_size=2) 17 | self.conv2 = nn.Conv1d(64, 128, kernel_size=5, stride=1, padding=2) 18 | self.fc = nn.Linear(int(num_samples/2/2)*128, num_classes) # Depending on your pooling and stride you might need to adjust the input size here 19 | def forward(self, x): 20 | out = self.conv1(x) 21 | out = self.relu(out) 22 | out = self.pool(out) 23 | out = self.conv2(out) 24 | out = self.relu(out) 25 | out = self.pool(out) 26 | out = out.reshape(out.size(0), -1) 27 | out = self.fc(out) 28 | return out 29 | 30 | def PyTorchModel(x_train, x_test, y_train, y_test ): 31 | model = ConvNet(num_chs, sum_samps, num_classes) 32 | model.train() 33 | criterion = nn.CrossEntropyLoss() 34 | optimizer = torch.optim.Adam(model.parameters(), lr=0.001) 35 | epochs = 10 36 | train_data = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train).long()) 37 | train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True) 38 | for epoch in range(epochs): 39 | for inputs, labels in train_loader: 40 | optimizer.zero_grad() 41 | outputs = model(inputs) 42 | loss = criterion(outputs, labels) 43 | loss.backward() 44 | optimizer.step() 45 | model.eval() 46 | accuracy = 0 47 | with torch.no_grad(): 48 | test_outputs = model(torch.Tensor(x_test)) 49 | _, predicted = torch.max(test_outputs.data, 1) 50 | correct = (predicted == torch.Tensor(y_test).long()).sum().item() 51 | accuracy = correct / len(y_test) 52 | return accuracy, model # must return accuracy and model for pytorch! 53 | 54 | class RawDecode(): 55 | desired_length = sum_samps 56 | def ProcessFeatures(self, epochData, sr, target): 57 | d = epochData.T 58 | if self.desired_length == 0: # needed as windows may be differing sizes due to timestamp varience on LSL 59 | self.desired_length = d.shape[1] 60 | if d.shape[1] != self.desired_length: 61 | d = np.resize(d, (d.shape[0],self.desired_length)) 62 | return d 63 | 64 | dropchs = [x for x in range(3,8)] # drop last 5 channels to save on compute time 65 | streamChsDropDict={"PyBCIPseudoDataStream":dropchs} #streamChsDropDict=streamChsDropDict, 66 | streamCustomFeatureExtract = {"PyBCIPseudoDataStream" : RawDecode()} # we select psuedolslgenerator example 67 | 68 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 69 | bci = PyBCI(minimumEpochsRequired = 4, createPseudoDevice=True, streamCustomFeatureExtract=streamCustomFeatureExtract, torchModel = PyTorchModel,streamChsDropDict=streamChsDropDict)#, loggingLevel = Logger.TIMING) 70 | while not bci.connected: # check to see if lsl marker and datastream are available 71 | bci.Connect() 72 | time.sleep(1) 73 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 74 | accuracy = 0 75 | try: 76 | while(True): 77 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 78 | time.sleep(0.5) # wait for marker updates 79 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 80 | if len(currentMarkers) > 1: # check there is more then one marker type received 81 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 82 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 83 | accuracy = classInfo["accuracy"] 84 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 85 | bci.TestMode() 86 | break 87 | while True: 88 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 89 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 90 | print("Current marker estimation: " + str(guess), end=" \r") 91 | time.sleep(0.2) 92 | except KeyboardInterrupt: # allow user to break while loop 93 | print("\nLoop interrupted by user.") 94 | bci.StopThreads() -------------------------------------------------------------------------------- /pybci/Examples/testSimple.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | import time 3 | 4 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 5 | bci = PyBCI(minimumEpochsRequired = 5, createPseudoDevice=True) 6 | while not bci.connected: # check to see if lsl marker and datastream are available 7 | bci.Connect() 8 | time.sleep(1) 9 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 10 | accuracy = 0 11 | try: 12 | while(True): 13 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 14 | time.sleep(0.1) # wait for marker updates 15 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 16 | if len(currentMarkers) > 1: # check there is more then one marker type received 17 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 18 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 19 | accuracy = classInfo["accuracy"] 20 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 21 | bci.TestMode() 22 | break 23 | while True: 24 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 25 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 26 | print("Current marker estimation: " + str(guess), end=" \r") 27 | time.sleep(0.2) 28 | except KeyboardInterrupt as e: # allow user to break while loop 29 | print("\nLoop interrupted by user. " + str(e)) 30 | bci.StopThreads() 31 | -------------------------------------------------------------------------------- /pybci/Examples/testSklearn.py: -------------------------------------------------------------------------------- 1 | from pybci import PyBCI 2 | import time 3 | from sklearn.neural_network import MLPClassifier 4 | clf = MLPClassifier(max_iter = 1000, solver ="lbfgs")#solver=clf, alpha=alpha,hidden_layer_sizes=hid) 5 | 6 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 7 | bci = PyBCI(minimumEpochsRequired = 4, createPseudoDevice=True, clf = clf) 8 | while not bci.connected: # check to see if lsl marker and datastream are available 9 | bci.Connect() 10 | time.sleep(1) 11 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 12 | accuracy = 0 13 | try: 14 | while(True): 15 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 16 | time.sleep(0.5) # wait for marker updates 17 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 18 | if len(currentMarkers) > 1: # check there is more then one marker type received 19 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 20 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 21 | accuracy = classInfo["accuracy"] 22 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 23 | bci.TestMode() 24 | break 25 | while True: 26 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 27 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 28 | print("Current marker estimation: " + str(guess), end=" \r") 29 | time.sleep(0.2) 30 | except KeyboardInterrupt: # allow user to break while loop 31 | print("\nLoop interrupted by user.") 32 | bci.StopThreads() 33 | -------------------------------------------------------------------------------- /pybci/Examples/testTensorflow.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pybci import PyBCI 3 | import tensorflow as tf# bring in tf for custom model creation 4 | import os 5 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 6 | num_chs = 8 # 8 channels are created in the PseudoLSLGenerator 7 | num_feats = 2 # default is mean freq and rms to keep it simple 8 | num_classes = 4 # number of different triggers (can include baseline) sent, defines if we use softmax of binary 9 | # Define the GRU model 10 | model = tf.keras.Sequential() 11 | model.add(tf.keras.layers.Reshape((num_chs*num_feats, 1), input_shape=(num_chs*num_feats,))) 12 | model.add(tf.keras.layers.GRU(units=256))#, input_shape=num_chs*num_feats)) # maybe should show this example as 2d with toggleable timesteps disabled 13 | model.add(tf.keras.layers.Dense(units=512, activation='relu')) 14 | model.add(tf.keras.layers.Flatten())# )tf.keras.layers.Dense(units=128, activation='relu')) 15 | model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')) # softmax as more then binary classification (sparse_categorical_crossentropy) 16 | #model.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) # sigmoid as ninary classification (binary_crossentropy) 17 | model.summary() 18 | model.compile(loss='sparse_categorical_crossentropy',# using sparse_categorical as we expect multi-class (>2) output, sparse because we encode targetvalues with integers 19 | optimizer='adam', 20 | metrics=['accuracy']) 21 | 22 | if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation 23 | bci = PyBCI(minimumEpochsRequired = 4, createPseudoDevice=True, model = model) 24 | while not bci.connected: # check to see if lsl marker and datastream are available 25 | bci.Connect() 26 | time.sleep(1) 27 | bci.TrainMode() # now both marker and datastreams available start training on received epochs 28 | accuracy = 0 29 | try: 30 | while(True): 31 | currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing 32 | time.sleep(0.5) # wait for marker updates 33 | print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r") 34 | if len(currentMarkers) > 1: # check there is more then one marker type received 35 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired: 36 | classInfo = bci.CurrentClassifierInfo() # hangs if called too early 37 | accuracy = classInfo["accuracy"] 38 | if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10: 39 | bci.TestMode() 40 | break 41 | while True: 42 | markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned 43 | guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess] 44 | print("Current marker estimation: " + str(guess), end=" \r") 45 | time.sleep(0.2) 46 | except KeyboardInterrupt: # allow user to break while loop 47 | print("\nLoop interrupted by user.") 48 | bci.StopThreads() 49 | 50 | -------------------------------------------------------------------------------- /pybci/ThreadClasses/ClassifierThread.py: -------------------------------------------------------------------------------- 1 | from ..Utils.Classifier import Classifier 2 | from ..Utils.Logger import Logger 3 | import queue 4 | import threading 5 | import time 6 | import numpy as np 7 | 8 | class ClassifierThread(threading.Thread): 9 | features = np.array([])#[] 10 | targets = np.array([]) 11 | mode = "train" 12 | guess = " " 13 | #epochCountsc = {} 14 | def __init__(self, closeEvent,trainTestEvent, featureQueueTest,featureQueueTrain, classifierInfoQueue, classifierInfoRetrieveEvent, 15 | classifierGuessMarkerQueue, classifierGuessMarkerEvent, queryFeaturesQueue, queryFeaturesEvent, 16 | logger = Logger(Logger.INFO), numStreamDevices = 1, 17 | minRequiredEpochs = 10, clf = None, model = None, torchModel = None): 18 | super().__init__() 19 | self.trainTestEvent = trainTestEvent # responsible for tolling between train and test mode 20 | self.closeEvent = closeEvent # responsible for cosing threads 21 | self.featureQueueTest = featureQueueTest # gets feature data from feature processing thread 22 | self.featureQueueTrain = featureQueueTrain # gets feature data from feature processing thread 23 | self.classifier = Classifier(clf = clf, model = model, torchModel = torchModel) # sets classifier class, if clf and model passed, defaults to clf and sklearn 24 | self.minRequiredEpochs = minRequiredEpochs # the minimum number of epochs required for classifier attempt 25 | self.classifierInfoRetrieveEvent = classifierInfoRetrieveEvent 26 | self.classifierInfoQueue = classifierInfoQueue 27 | self.classifierGuessMarkerQueue = classifierGuessMarkerQueue 28 | self.classifierGuessMarkerEvent = classifierGuessMarkerEvent 29 | self.queryFeaturesQueue = queryFeaturesQueue 30 | self.queryFeaturesEvent = queryFeaturesEvent 31 | self.numStreamDevices = numStreamDevices 32 | self.logger = logger 33 | 34 | def run(self): 35 | epochCountsc={} 36 | if self.numStreamDevices > 1: 37 | tempdatatrain = {} 38 | tempdatatest = {} 39 | while not self.closeEvent.is_set(): 40 | if self.trainTestEvent.is_set(): # We're training! 41 | if self.featureQueueTrain.empty(): 42 | if len(epochCountsc) > 1: # check if there is more then one test condition 43 | minNumKeyEpochs = min([epochCountsc[key][1] for key in epochCountsc]) # check minimum viable number of training eochs have been obtained 44 | if minNumKeyEpochs < self.minRequiredEpochs: 45 | pass 46 | else: 47 | start = time.time() 48 | self.classifier.TrainModel(self.features, self.targets) 49 | if (self.logger.level == Logger.TIMING): 50 | end = time.time() 51 | self.logger.log(Logger.TIMING, f" classifier training time {end - start}") 52 | if self.classifierGuessMarkerEvent.is_set(): 53 | self.classifierGuessMarkerQueue.put(self.guess) 54 | else: 55 | try: 56 | featuresSingle, devCount, target, epochCountsc = self.featureQueueTrain.get_nowait() #[dataFIFOs, self.currentMarker, self.sr, self.dataType] 57 | if self.numStreamDevices > 1: # Collects multiple data strems feature sets and synchronise here 58 | tempdatatrain[devCount] = featuresSingle 59 | if len(tempdatatrain) == self.numStreamDevices: 60 | flattened_list = np.array([item for sublist in tempdatatrain.values() for item in sublist]) 61 | tempdatatrain = {} 62 | self.targets = np.append(self.targets, [target], axis = 0) 63 | #self.features = np.append(self.features, [flattened_list], axis = 0) 64 | if self.features.shape[0] == 0: 65 | self.features = self.features.reshape((0,) + flattened_list.shape) 66 | self.features = np.append(self.features, [flattened_list], axis=0) 67 | # need to check if all device data is captured, then flatten and append 68 | else: # Only one device to collect from 69 | if self.features.shape[0] == 0: 70 | self.features = self.features.reshape((0,) + featuresSingle.shape) 71 | self.targets = np.append(self.targets, [target], axis = 0) 72 | self.features = np.append(self.features, [featuresSingle], axis = 0) 73 | except queue.Empty: 74 | pass 75 | else: # We're testing! 76 | try: 77 | featuresSingle, devCount = self.featureQueueTest.get_nowait() #[dataFIFOs, self.currentMarker, self.sr, self.dataType] 78 | if self.numStreamDevices > 1: 79 | tempdatatest[devCount] = featuresSingle 80 | if len(tempdatatest) == self.numStreamDevices: 81 | flattened_list = [] 82 | flattened_list = np.array([item for sublist in tempdatatest.values() for item in sublist]) 83 | tempdatatest = {} 84 | start = time.time() 85 | self.guess = self.classifier.TestModel(flattened_list) 86 | if (self.logger.level == Logger.TIMING): 87 | end = time.time() 88 | self.logger.log(Logger.TIMING, f" classifier testing time {end - start}") 89 | else: 90 | start = time.time() 91 | self.guess = self.classifier.TestModel(featuresSingle) 92 | if (self.logger.level == Logger.TIMING): 93 | end = time.time() 94 | self.logger.log(Logger.TIMING, f" classifier testing time {end - start}") 95 | if self.classifierGuessMarkerEvent.is_set(): 96 | self.classifierGuessMarkerQueue.put(self.guess) 97 | except queue.Empty: 98 | pass 99 | if self.classifierInfoRetrieveEvent.is_set(): 100 | a = self.classifier.accuracy 101 | classdata = { 102 | "clf":self.classifier.clf, 103 | "model":self.classifier.model, 104 | "torchModel":self.classifier.torchModel, 105 | "accuracy":a 106 | } 107 | self.classifierInfoQueue.put(classdata) 108 | if self.queryFeaturesEvent.is_set(): 109 | featureData = { 110 | "features":self.features, 111 | "targets":self.targets 112 | } 113 | self.queryFeaturesQueue.put(featureData) -------------------------------------------------------------------------------- /pybci/ThreadClasses/FeatureProcessorThread.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import queue 3 | import time 4 | 5 | from ..Utils.Logger import Logger 6 | from ..Configuration.EpochSettings import GlobalEpochSettings 7 | 8 | class FeatureProcessorThread(threading.Thread): 9 | tempDeviceEpochLogger = [] 10 | def __init__(self, closeEvent, trainTestEvent, dataQueueTrain,dataQueueTest, 11 | featureQueueTest,featureQueueTrain, totalDevices,markerCountRetrieveEvent,markerCountQueue, customEpochSettings = {}, 12 | globalEpochSettings = GlobalEpochSettings(),logger = Logger(Logger.INFO), 13 | featureExtractor = None): 14 | super().__init__() 15 | if featureExtractor is None: 16 | from ..Utils.FeatureExtractor import GenericFeatureExtractor 17 | featureExtractor = GenericFeatureExtractor() 18 | self.markerCountQueue = markerCountQueue 19 | self.trainTestEvent = trainTestEvent 20 | self.closeEvent = closeEvent 21 | self.dataQueueTrain = dataQueueTrain 22 | self.dataQueueTest = dataQueueTest 23 | self.featureQueueTrain = featureQueueTrain 24 | self.featureQueueTest = featureQueueTest 25 | self.featureExtractor = featureExtractor 26 | self.logger = logger 27 | self.totalDevices = totalDevices 28 | self.markerCountRetrieveEvent = markerCountRetrieveEvent 29 | self.epochCounts = {} 30 | self.customEpochSettings = customEpochSettings 31 | self.globalWindowSettings = globalEpochSettings 32 | self.tempDeviceEpochLogger = [0 for x in range(self.totalDevices)] 33 | 34 | def run(self): 35 | while not self.closeEvent.is_set(): 36 | if self.markerCountRetrieveEvent.is_set(): 37 | self.markerCountQueue.put(self.epochCounts) 38 | if self.trainTestEvent.is_set(): # We're training! 39 | try: 40 | dataFIFOs, currentMarker, sr, devCount = self.dataQueueTrain.get_nowait() #[sliceDataFIFOs, self.currentMarker, self.sr, self.devCount 41 | if currentMarker in self.epochCounts: 42 | self.epochCounts[currentMarker][1] += 1 43 | else: 44 | self.epochCounts[currentMarker] = [len(self.epochCounts.keys()),1] 45 | target = self.epochCounts[currentMarker][0] 46 | start = time.time() 47 | features = self.featureExtractor.ProcessFeatures(dataFIFOs, sr, target) # allows custom epoch class to be passed 48 | if (self.logger.level == Logger.TIMING): 49 | end = time.time() 50 | self.logger.log(Logger.TIMING, " Feature Extraction time "+str(end - start)) 51 | if (end-start) >self.globalWindowSettings.windowLength: 52 | self.logger.log(Logger.WARNING, " Feature Extraction time > globalEpochSetting.windowLength, will create lag in classification output. Recommended to reduce channels, smapling rate, and features or reduce feature computational complexity.") 53 | self.featureQueueTrain.put( [features, devCount, target, dict(self.epochCounts)] ) 54 | except queue.Empty: 55 | pass 56 | else: 57 | try: 58 | dataFIFOs, sr, devCount = self.dataQueueTest.get_nowait() #[dataFIFOs, self.currentMarker, self.sr, ] 59 | start = time.time() 60 | features = self.featureExtractor.ProcessFeatures(dataFIFOs, sr, None) 61 | if (self.logger.level == Logger.TIMING): 62 | end = time.time() 63 | self.logger.log(Logger.TIMING, " Feature Extraction time "+str(end - start)) 64 | if (end-start) >self.globalWindowSettings.windowLength: 65 | self.logger.log(Logger.WARNING, " Feature Extraction time > globalEpochSetting.windowLength, will create lag in classification output. Recommended to reduce channels, smapling rate, and features or reduce feature computational complexity.") 66 | self.featureQueueTest.put([features, devCount]) 67 | except queue.Empty: 68 | pass 69 | -------------------------------------------------------------------------------- /pybci/ThreadClasses/MarkerThread.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | class MarkerThread(threading.Thread): 4 | """Receives Marker on chosen lsl Marker outlet. Pushes marker to data threads for framing epochs, 5 | also sends markers to featureprocessing thread for epoch counting and multiple device synchronisation. 6 | """ 7 | def __init__(self,closeEvent, trainTestEvent, markerStreamInlet, dataThreads, featureThreads):#, lock): 8 | super().__init__() 9 | self.trainTestEvent = trainTestEvent 10 | self.closeEvent = closeEvent 11 | self.markerStreamInlet = markerStreamInlet 12 | self.dataThreads = dataThreads 13 | self.featureThreads= featureThreads 14 | 15 | def run(self): 16 | while not self.closeEvent.is_set(): 17 | marker, timestamp = self.markerStreamInlet.pull_sample(timeout = 10) 18 | if self.trainTestEvent.is_set(): # We're training! 19 | if marker is not None: 20 | marker = marker[0] 21 | for thread in self.dataThreads: 22 | thread.ReceiveMarker(marker, timestamp) 23 | #for thread in self.featureThreads: 24 | # thread.ReceiveMarker(marker, timestamp) 25 | else: 26 | pass 27 | # add levels of debug 28 | # print("PyBCI: LSL pull_sample timed out, no marker on stream...") 29 | -------------------------------------------------------------------------------- /pybci/ThreadClasses/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LMBooth/pybci/76c0f9f1dd77a83fda0cef003a5bedef13eb74bc/pybci/ThreadClasses/__init__.py -------------------------------------------------------------------------------- /pybci/Utils/Classifier.py: -------------------------------------------------------------------------------- 1 | from sklearn.preprocessing import StandardScaler 2 | from sklearn.model_selection import train_test_split 3 | import numpy as np 4 | from sklearn import svm 5 | from sklearn.metrics import accuracy_score 6 | from torch import Tensor, no_grad, argmax 7 | class Classifier(): 8 | classifierLibrary = "sklearn" # current default, should be none or somthing different? 9 | clf = svm.SVC(kernel = "rbf")#C=c, kernel=k, degree=d, gamma=g, coef0=c0, tol=t, max_iter=i) 10 | accuracy = 0 11 | model = None 12 | torchModel = None 13 | 14 | def __init__(self, clf = None, model = None, torchModel = None): 15 | super().__init__() 16 | if clf is not None: 17 | 18 | self.clf = clf 19 | elif model is not None: 20 | self.model = model 21 | elif torchModel is not None: 22 | self.torchModel = torchModel 23 | self.CheckClassifierLibrary() 24 | 25 | def CheckClassifierLibrary(self): 26 | if self.model is not None: # maybe requires actual check for tensorflow model 27 | self.classifierLibrary = "tensor" 28 | elif self.torchModel is not None: # maybe requires actual check for sklearn clf 29 | self.classifierLibrary = "pyTorch" 30 | elif self.clf is not None: # maybe requires actual check for sklearn clf 31 | self.classifierLibrary = "sklearn" 32 | 33 | def TrainModel(self, features, targets): 34 | x_train, x_test, y_train, y_test = train_test_split(features, targets, shuffle = True, test_size=0.2) 35 | #print(features.shape) 36 | #print(x_train.shape) 37 | if len(features.shape)==3: 38 | self.scaler = [StandardScaler() for scaler in range(features.shape[2])] # normalise our data (everything is a 0 or a 1 if you think about it, cheers georgey boy boole) 39 | for e in range(features.shape[2]): # this would normalise the channel, maybe better to normalise across other dimension 40 | x_train_channel = x_train[:,:,e].reshape(-1, 1) 41 | x_test_channel = x_test[:,:,e].reshape(-1, 1) 42 | x_train[:,:,e] = self.scaler[e].fit_transform(x_train_channel).reshape(x_train[:,:,e].shape) 43 | x_test[:,:,e] = self.scaler[e].transform(x_test_channel).reshape(x_test[:,:,e].shape) 44 | #x_train[:,:,e] = self.scaler[e].fit_transform(x_train[:,:,e]) # Compute the mean and standard deviation based on the training data 45 | #x_test[:,:,e] = self.scaler[e].transform(x_test[:,:,e]) # Scale the test data 46 | elif len(features.shape)== 2: 47 | self.scaler = StandardScaler() # normalise our data (everything is a 0 or a 1 if you think about it, cheers georgey boy boole) 48 | x_train = self.scaler.fit_transform(x_train) # Compute the mean and standard deviation based on the training data 49 | x_test = self.scaler.transform(x_test) # Scale the test data 50 | if all(item == y_train[0] for item in y_train): 51 | pass 52 | else: 53 | #print(x_train, y_train) 54 | if self.classifierLibrary == "pyTorch": 55 | self.accuracy, self.pymodel = self.torchModel(x_train, x_test, y_train, y_test) 56 | elif self.classifierLibrary == "sklearn": 57 | self.clf.fit(x_train, y_train) 58 | y_predictions = self.clf.predict(x_test) 59 | self.accuracy = accuracy_score(y_test, y_predictions) 60 | elif self.classifierLibrary == "tensor": 61 | self.model.fit(np.array(x_train), np.array(y_train), verbose=0) # epochs and batch_size should be customisable 62 | self.loss, self.accuracy = self.model.evaluate(np.array(x_test), np.array(y_test), verbose=0) 63 | else: 64 | # no classifier library selected, print debug? 65 | pass 66 | 67 | def TestModel(self, x): 68 | if len(x.shape)==2: 69 | for e in range(x.shape[1]): 70 | x[:,e] = self.scaler[e].transform(x[:,e].reshape(-1, 1)).reshape(x[:,e].shape) 71 | #x[:,e] = self.scaler[e].transform([x[:,e]])[0] 72 | elif len(x.shape)== 1: 73 | x = self.scaler.transform([x])[0] # Scale the test data 74 | if self.classifierLibrary == "sklearn": 75 | x = np.expand_dims(x, axis=0) 76 | return self.clf.predict(x) 77 | elif self.classifierLibrary == "tensor": 78 | x = np.expand_dims(x, axis=0) 79 | predictions = self.model.predict(x, verbose=0) 80 | if len (predictions[0]) == 1: # assume binary classification 81 | return 1 if predictions[0] > 0.5 else 0 82 | else: # assume multi-classification 83 | return np.argmax(predictions[0]) 84 | elif self.classifierLibrary == "pyTorch": 85 | x = Tensor(np.expand_dims(x, axis=0)) 86 | self.pymodel.eval() 87 | with no_grad(): 88 | predictions = self.pymodel(x) 89 | if len (predictions[0]) == 1: # assume binary classification 90 | return 1 if predictions[0] > 0.5 else 0 91 | else: # assume multi-classification 92 | return argmax(predictions).item() 93 | 94 | else: 95 | print("no classifier library selected") 96 | # no classifier library selected, print debug? 97 | pass 98 | 99 | ''' 100 | def UpdateModel(self, featuresSingle, target): 101 | # function currently not used, may be redundant, means thread function hold feature and target variables and passes reference to here, 102 | # would be better to hold in classifier class? 103 | featuresSingle = np.where(np.isnan(featuresSingle), 0, featuresSingle) 104 | if (len(np.array(self.features).shape) ==3): 105 | features = np.array(features).reshape(np.array(features).shape[0], -1) 106 | self.features = np.vstack([self.features, featuresSingle]) 107 | self.targets = np.hstack([self.targets, target]) 108 | if self.classifierLibrary == "sklearn": 109 | # Update the model with new data using partial_fit 110 | self.clf.fit(self.features, self.targets) #, classes=np.unique(target)) 111 | self.accuracy = self.clf.score(self.x_test, self.y_test) 112 | elif self.classifierLibrary == "tensor": 113 | self.model.fit(featuresSingle, target, epochs=1, batch_size=32) 114 | self.loss, self.accuracy = self.model.evaluate(self.x_test, self.y_test) 115 | else: 116 | # no classifier library selected, print debug? 117 | pass 118 | ''' -------------------------------------------------------------------------------- /pybci/Utils/LSLScanner.py: -------------------------------------------------------------------------------- 1 | from pylsl import StreamInlet, resolve_stream 2 | from ..Utils.Logger import Logger 3 | 4 | class LSLScanner: 5 | streamTypes = ["EEG", "ECG", "EMG", "Gaze"] # list of strings, holds desired LSL stream types 6 | markerTypes = ["Markers"] # list of strings, holds desired LSL marker types 7 | dataStreams = [] # list of data StreamInlets, available on LSL as chosen by streamTypes 8 | markerStream = [] # list of marker StreamInlets, available on LSL as chosen by markerTypes 9 | markerStreamPredefined = False 10 | dataStreamPredefined = False 11 | 12 | def __init__(self,parent, dataStreamsNames = None, markerStreamName = None, streamTypes = None, markerTypes = None, logger = Logger(Logger.INFO)): 13 | """ 14 | Intiialises LSLScanner, accepts custom data and marker stream strings to search for, if valid can be obtained after scans with LSLScanner.dataStreams and LSLScanner.makerStream. 15 | Parameters: 16 | streamTypes (List of strings): allows user to set custom acceptable EEG stream definitions, if None defaults to streamTypes scan 17 | markerTypes (List of strings): allows user to set custom acceptable Marker stream definitions, if None defaults to markerTypes scan 18 | streamTypes (List of strings): allows user to set custom acceptable EEG type definitions, ignored if streamTypes not None 19 | markerTypes (List of strings): allows user to set custom acceptable Marker type definitions, ignored if markerTypes not None 20 | logger (pybci.Logger): Custom Logger class or PyBCI, defaults to logger.info if not set, which prints all pybci messages. 21 | """ 22 | self.parent = parent 23 | if streamTypes is not None: 24 | self.streamTypes = streamTypes 25 | if markerTypes is not None: 26 | self.markerTypes = markerTypes 27 | self.logger = logger 28 | if dataStreamsNames is not None: 29 | self.dataStreamPredefined = True 30 | self.dataStreamsNames = dataStreamsNames 31 | else: 32 | self.ScanDataStreams() 33 | if markerStreamName is not None: 34 | self.markerStreamPredefined = True 35 | self.markerStreamName = markerStreamName 36 | else: 37 | self.ScanMarkerStreams() 38 | 39 | def ScanStreams(self): 40 | """Scans LSL for both data and marker channels.""" 41 | self.ScanDataStreams() 42 | self.ScanMarkerStreams() 43 | 44 | def ScanDataStreams(self): 45 | """Scans available LSL streams and appends inlet to self.dataStreams""" 46 | streams = resolve_stream() 47 | dataStreams = [] 48 | self.dataStreams = [] 49 | for stream in streams: 50 | if stream.type() in self.streamTypes: 51 | dataStreams.append(StreamInlet(stream)) 52 | if self.dataStreamPredefined: 53 | for s in dataStreams: 54 | name = s.info().name() 55 | if name not in self.dataStreamsNames: 56 | self.logger.log(Logger.WARNING," Predefined LSL Data Stream name not present.") 57 | self.logger.log(Logger.WARNING, " Available Streams: "+str([s.info().name() for s in dataStreams])) 58 | else: 59 | self.dataStreams.append(s) 60 | else: # just add all datastreams as none were specified 61 | self.dataStreams = dataStreams 62 | 63 | def ScanMarkerStreams(self): 64 | """Scans available LSL streams and appends inlet to self.markerStreams""" 65 | streams = resolve_stream() 66 | markerStreams = [] 67 | self.markerStream = None 68 | for stream in streams: 69 | self.logger.log(Logger.INFO," Found stream name: " + stream.name()) 70 | self.logger.log(Logger.INFO," Found stream type: " + stream.type()) 71 | if stream.type() in self.markerTypes: 72 | markerStreams.append(StreamInlet(stream)) 73 | if self.markerStreamPredefined: 74 | if len(markerStreams) > 1: 75 | self.logger.log(Logger.WARNING," Too many Marker streams available, set single desired markerStream in bci.lslScanner.markerStream correctly.") 76 | for s in markerStreams: 77 | name = s.info().name() 78 | if name != self.markerStreamName: 79 | self.logger.log(Logger.WARNING," Predefined LSL Marker Stream name not present.") 80 | self.logger.log(Logger.WARNING, " Available Streams: "+str([s.info().name() for s in markerStreams])) 81 | else: 82 | self.markerStream = s 83 | else: 84 | if len(markerStreams) > 0: 85 | self.markerStream = markerStreams[0] # if none specified grabs first avaialble marker stream 86 | 87 | def CheckAvailableLSL(self): 88 | """Checks streaminlets available, 89 | Returns 90 | ------- 91 | bool : 92 | True if 1 marker stream present and available datastreams are present. 93 | False if no datastreams are present and/or more or less then one marker stream is present, requires hard selection or markser stream if too many. 94 | """ 95 | self.ScanStreams() 96 | if self.markerStream is None: 97 | self.logger.log(Logger.WARNING," No Marker streams available, make sure your accepted marker data Type have been set in bci.lslScanner.markerTypes correctly.") 98 | if len(self.dataStreams) == 0: 99 | self.logger.log(Logger.WARNING," No data streams available, make sure your streamTypes have been set in bci.lslScanner.dataStream correctly.") 100 | if len(self.dataStreams) > 0 and self.markerStream is not None: 101 | self.logger.log(Logger.INFO," Success - "+str(len(self.dataStreams))+" data stream(s) found, 1 marker stream found") 102 | 103 | if len(self.dataStreams) > 0 and self.markerStream is not None: 104 | self.parent.dataStreams = self.dataStreams 105 | self.parent.markerStream = self.markerStream 106 | return True 107 | else: 108 | return False -------------------------------------------------------------------------------- /pybci/Utils/Logger.py: -------------------------------------------------------------------------------- 1 | class Logger: 2 | INFO = "INFO" 3 | WARNING = "WARNING" 4 | NONE = "NONE" 5 | TIMING = "TIMING" 6 | 7 | def __init__(self, level=INFO, log_queue=None): 8 | self.queue = log_queue 9 | self.level = level 10 | self.check_level(level) 11 | #print(self.level) 12 | def set_level(self, level): 13 | self.level = level 14 | self.check_level(level) 15 | 16 | def check_level(self,level): 17 | if level != self.WARNING and level != self.INFO and level != self.NONE and level != self.TIMING : 18 | print("PyBCI: [INFO] - Invalid or no log level selected, defaulting to info. (options: info, warning, none)") 19 | level = self.INFO 20 | self.level = level 21 | 22 | def log(self, level, message): 23 | if self.level == self.NONE: 24 | return None 25 | if level == self.INFO: 26 | if self.level != self.NONE and self.level != self.WARNING: 27 | print('PyBCI: [INFO] -' + message) 28 | elif level == self.WARNING: 29 | if self.level != self.NONE: 30 | print('PyBCI: [WARNING] -' + message) 31 | elif level == self.TIMING: 32 | if self.level == self.TIMING: 33 | print('PyBCI: [TIMING] -' + message) 34 | -------------------------------------------------------------------------------- /pybci/Utils/__init__.py: -------------------------------------------------------------------------------- 1 | #from .Classifier import Classifier 2 | #from .FeatureExtractor import FeatureExtractor 3 | #from .LSLScanner import LSLScanner 4 | 5 | -------------------------------------------------------------------------------- /pybci/__init__.py: -------------------------------------------------------------------------------- 1 | from .pybci import PyBCI # noqa: F401 2 | -------------------------------------------------------------------------------- /pybci/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | 4 | 5 | def import_and_run_testSimple(**kwargs): 6 | from .CliTests.testSimple import main as testSimple 7 | testSimple(**kwargs) 8 | 9 | def import_and_run_testSklearn(**kwargs): 10 | from .CliTests.testSklearn import main as testSklearn 11 | testSklearn(**kwargs) 12 | 13 | def import_and_run_testTensorflow(**kwargs): 14 | from .CliTests.testTensorflow import main as testTensorflow 15 | testTensorflow(**kwargs) 16 | 17 | def import_and_run_testPyTorch(**kwargs): 18 | from .CliTests.testPyTorch import main as testPyTorch 19 | testPyTorch(**kwargs) 20 | 21 | def RunPseudo(): 22 | from .Utils.PseudoDevice import PseudoDeviceController 23 | pseudoDevice = PseudoDeviceController() 24 | pseudoDevice.BeginStreaming() 25 | while True: 26 | time.sleep(1) 27 | 28 | def main(): 29 | parser = argparse.ArgumentParser(description='PyBCI: A Brain-Computer Interface package. Visit https://pybci.readthedocs.io/en/latest/ for more information!') 30 | 31 | subparsers = parser.add_subparsers(title='Commands', description='Available example commands') 32 | 33 | testSimple_parser = subparsers.add_parser('testSimple', help='Runs simple setup where sklearn support-vector-machine is used for model and pseudodevice generates 8 channels of 3 marker types and baseline. Similar to the testSimple.py in the examples folder.') 34 | testSimple_parser.add_argument('--createPseudoDevice',default=True, type=bool, help='Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.') 35 | testSimple_parser.add_argument('--min_epochs_train', default=4, type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 36 | testSimple_parser.add_argument('--min_epochs_test', default=14, type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 37 | testSimple_parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 38 | 39 | #testSimple_parser.set_defaults(func=testSimple) 40 | 41 | testSklearn_parser = subparsers.add_parser('testSklearn', help='Sklearn multi-layer perceptron is used for model and pseudodevice generates 8 channels of 3 marker types and baseline. Similar to the testSklearn.py in the examples folder.') 42 | testSklearn_parser.add_argument('--createPseudoDevice',default=True, type=bool, help='Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.') 43 | testSklearn_parser.add_argument('--min_epochs_train', default=4,type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 44 | testSklearn_parser.add_argument('--min_epochs_test', default=14,type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 45 | testSklearn_parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 46 | 47 | #testSklearn_parser.set_defaults(func=testSklearn) 48 | 49 | testTensorflow_parser = subparsers.add_parser('testTensorflow', help='Tensorflow GRU is used for model and pseudodevice generates 8 channels of 3 marker types and baseline. Similar to the testTensorflow.py in the examples folder.') 50 | testTensorflow_parser.add_argument("--createPseudoDevice", default=True, type=bool, help="Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.") 51 | testTensorflow_parser.add_argument("--min_epochs_train", default=4, type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 52 | testTensorflow_parser.add_argument("--min_epochs_test", default=14, type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 53 | testTensorflow_parser.add_argument("--num_chs", default=8, type=int, help='Num of channels in data stream to configure tensorflow model, if PseudoDevice==True defaults to 8.') 54 | testTensorflow_parser.add_argument("--num_classes", default=4, type=int, help='Num of classes in marker stream to configure tensorflow model, if PseudoDevice==True defaults to 4.') 55 | testTensorflow_parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 56 | 57 | #testTensorflow_parser.set_defaults(func=testTensorflow) 58 | 59 | testPyTorch_parser = subparsers.add_parser('testPyTorch', help='PyTorch neural network is used for model. Similar to the testPytorch.py in the examples folder.') 60 | testPyTorch_parser.add_argument("--createPseudoDevice", default=True, type=bool, help="Set to True or False to enable or disable pseudo device creation. pseudodevice generates 8 channels of 3 marker types and baseline.") 61 | testPyTorch_parser.add_argument("--min_epochs_train", default=4, type=int, help='Minimum epochs to collect before model training commences, must be less than, min_epochs_test. If less than min_epochs_test defaults to min_epochs_test+1.') 62 | testPyTorch_parser.add_argument("--min_epochs_test", default=14, type=int, help='Minimum epochs to collect before model testing commences, if less than min_epochs_test defaults to min_epochs_test+1.') 63 | testPyTorch_parser.add_argument("--num_chs", default=8, type=int, help='Num of channels in data stream to configure tensorflow model, if PseudoDevice==True defaults to 8.') 64 | testPyTorch_parser.add_argument("--num_classes", default=4, type=int, help='Num of classes in marker stream to configure tensorflow model, if PseudoDevice==True defaults to 4.') 65 | testPyTorch_parser.add_argument("--timeout", default=None, type=int, help="Timeout in seconds for the script to automatically stop.") 66 | 67 | #testPyTorch_parser.set_defaults(func=testPyTorch) 68 | 69 | 70 | testPseudo = subparsers.add_parser('createPseudoStreams', help='Creates basic Pseudo Device data and marker Lab Streaming Layer (LSL) streams.') 71 | testPseudo.set_defaults(func=RunPseudo) 72 | 73 | testSimple_parser.set_defaults(func=import_and_run_testSimple) 74 | testSklearn_parser.set_defaults(func=import_and_run_testSklearn) 75 | testTensorflow_parser.set_defaults(func=import_and_run_testTensorflow) 76 | testPyTorch_parser.set_defaults(func=import_and_run_testPyTorch) 77 | 78 | args = parser.parse_args() 79 | if not hasattr(args, 'func'): 80 | parser.print_help() 81 | else: 82 | arg_dict = vars(args) 83 | func = arg_dict.pop('func') # Remove 'func' and store the actual function 84 | func(**arg_dict) # Call the function with the remaining arguments 85 | 86 | 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /pybci/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '1.5.1' 2 | 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "pybci-package" 7 | dynamic = ["version"] 8 | description = "A Python interface to create a BCI with the Lab Streaming Layer, Pytorch, SciKit-Learn and Tensorflow packages" 9 | readme = "README.md" 10 | authors = [ 11 | {name = "Liam Booth", email = "liambooth123@hotmail.co.uk"} 12 | ] 13 | license = {text = "MIT"} 14 | classifiers = [ 15 | "Development Status :: 5 - Production/Stable", 16 | "Intended Audience :: Developers", 17 | "Intended Audience :: Science/Research", 18 | "Intended Audience :: Healthcare Industry", 19 | "Topic :: Scientific/Engineering :: Human Machine Interfaces", 20 | "Topic :: Scientific/Engineering :: Bio-Informatics", 21 | "Topic :: Scientific/Engineering :: Medical Science Apps.", 22 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 23 | "Topic :: Scientific/Engineering", 24 | "License :: OSI Approved :: MIT License", 25 | "Operating System :: Microsoft :: Windows", 26 | "Operating System :: POSIX :: Linux", 27 | "Operating System :: MacOS", 28 | "Programming Language :: Python :: 3.9", 29 | "Programming Language :: Python :: 3.10", 30 | "Programming Language :: Python :: 3.11", 31 | "Programming Language :: Python :: 3.12" 32 | ] 33 | keywords = ["machine-learning", "tensorflow", "sklearn", "pytorch", "human-computer-interaction", "bci", "lsl", "brain-computer-interface", "labstreaminglayer"] 34 | requires-python = ">=3.9,<3.13" 35 | dependencies = [ 36 | "numpy>=1.21", 37 | "pylsl==1.16.1", 38 | "scipy>=1.11.1", 39 | "antropy>=0.1.6", 40 | "tensorflow>=2.13.0", 41 | "scikit-learn>=1.3.0", 42 | "torch>=2.0.1", 43 | ] 44 | 45 | [project.urls] 46 | Homepage = "https://github.com/lmbooth/pybci" 47 | 48 | [project.scripts] 49 | pybci = "pybci.cli:main" 50 | 51 | [tool.setuptools] 52 | packages = ["pybci"] 53 | 54 | [tool.setuptools.dynamic] 55 | version = {attr = "pybci.version.__version__"} 56 | 57 | -------------------------------------------------------------------------------- /requirements-devel.txt: -------------------------------------------------------------------------------- 1 | pytest>=7.4.3 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | antropy>=0.1.6 2 | pylsl>=1.16.1 3 | scikit-learn>=1.6.0 4 | scipy>=1.14.1 5 | setuptools>=75.6.0 6 | tensorflow>=2.18.0 7 | torch>=2.5.1 8 | --------------------------------------------------------------------------------