├── .gitignore ├── README.md ├── bionic_apps ├── __init__.py ├── ai │ ├── __init__.py │ ├── classifier.py │ ├── interface.py │ ├── keras_networks.py │ ├── kolcs_neural_networks.py │ ├── old_neural_network.py │ ├── pusar_neural_networks.py │ ├── sklearn_classifiers.py │ └── svm.py ├── artifact_filtering │ ├── __init__.py │ ├── blinking_detection.py │ └── faster.py ├── databases │ ├── __init__.py │ ├── coreg_mindrove │ │ ├── __init__.py │ │ └── prepare.py │ ├── eeg │ │ ├── __init__.py │ │ ├── defaults.py │ │ ├── offline.py │ │ ├── online_braindriver.py │ │ └── standardize_database.py │ └── emg │ │ ├── __init__.py │ │ ├── prepare_putemg.py │ │ └── putemg_download.py ├── external_connections │ ├── __init__.py │ ├── brainvision │ │ ├── BrainVision_RDA.py │ │ ├── __init__.py │ │ └── remote_control.py │ ├── emotiv │ │ ├── __init__.py │ │ ├── epoc_plus_lsl.py │ │ ├── epoch.py │ │ └── mne_import_xdf.py │ ├── hpc │ │ ├── __init__.py │ │ ├── example_params.py │ │ └── utils.py │ └── lsl │ │ ├── BCI.py │ │ ├── DataSender.py │ │ ├── ReceiveData.py │ │ └── __init__.py ├── feature_extraction │ ├── __init__.py │ ├── frequency │ │ ├── __init__.py │ │ └── fft_methods.py │ ├── time │ │ ├── __init__.py │ │ └── utils.py │ └── time_frequency │ │ └── __init__.py ├── games │ ├── __init__.py │ └── braindriver │ │ ├── __init__.py │ │ ├── commands.py │ │ ├── control.py │ │ ├── game_paradigm.py │ │ ├── logger.py │ │ ├── main_game_start.py │ │ ├── networkConfig.json │ │ ├── opponents.py │ │ └── track.py ├── handlers │ ├── __init__.py │ ├── gui.py │ ├── hdf5.py │ ├── pandas_res.py │ └── tf.py ├── legacy │ ├── __init__.py │ ├── fbcsp.py │ ├── fbcsp_test.py │ ├── fbcsp_toolbox.py │ ├── multi_svm.py │ └── sklearn_bci.py ├── model_selection.py ├── offline_analyses.py ├── preprocess │ ├── __init__.py │ ├── channel_selection.py │ ├── data_augmentation.py │ ├── dataset_generation.py │ └── io.py ├── tests │ ├── __init__.py │ ├── test_artefact.py │ ├── test_bci_system.py │ ├── test_io.py │ └── utils.py ├── utils.py └── validations.py ├── examples ├── custom_feature_classifier.py └── mulit_svm.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Please specify folders and files, which are not important to upload... 2 | 3 | # folders 4 | __pycache__/ 5 | tf_log/ 6 | .idea/ 7 | tmp/ 8 | log/ 9 | sandbox/ 10 | 11 | # code files 12 | *.log 13 | *.cfg 14 | *.[oa] 15 | 16 | # other files 17 | .directory 18 | *~ 19 | ~* 20 | *.doc 21 | *.docx 22 | *.dot 23 | *.dotx 24 | *.txt 25 | *.csv 26 | *.xls 27 | *.xlsx 28 | *trackData.json 29 | 30 | # all with 31 | *sandbox* -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [//]: # (```) 2 | 3 | [//]: # (cd existing_repo) 4 | 5 | [//]: # (git remote add origin https://dev.itk.ppke.hu/kolcs/mcc-flow.git) 6 | 7 | [//]: # (git branch -M main) 8 | 9 | [//]: # (git push -uf origin main) 10 | 11 | [//]: # (```) 12 | 13 | 14 | [//]: # (## Integrate with your tools) 15 | 16 | [//]: # (- [ ] [Set up project integrations](https://dev.itk.ppke.hu/kolcs/mcc-flow/-/settings/integrations)) 17 | 18 | 19 | [//]: # (***) 20 | 21 | # Bionic Applications 22 | 23 | A Brain-Computer Interface and EEG & EMG signal processing tool. 24 | 25 | ## Description 26 | 27 | This project is originally designed for the BCI discipline of the Cybathlon competition by the Ebrainers. 28 | 29 | ## Installation 30 | 31 | 1. Download git project 32 | 2. Download [miniconda](https://docs.conda.io/en/latest/miniconda.html) and install it. 33 | 3. Create a new environment called ''bci'' with python > 3.7 and < 3.11 34 | 35 | `conda create --name bci python=3.10` 36 | 37 | 4. activate environment 38 | 39 | `conda activate bci` 40 | 41 | 5. install requirements 42 | 43 | `pip install -r /path/to/requirements.txt` 44 | 45 | ## Public Databases 46 | 47 | - [PhysioNet](https://physionet.org/content/eegmmidb/1.0.0/) 48 | - [Giga](http://gigadb.org/dataset/100542) 49 | - [BCI Competition IV 2a](https://www.bbci.de/competition/iv/) 50 | - [TTK](https://hdl.handle.net/21.15109/CONCORDA/UOQQVK) 51 | 52 | ## Usage 53 | 54 | Use examples can be found in folder `examples/` 55 | 56 | ## Citing 57 | 58 | If you use this code in a scientific publication, please cite us as: 59 | 60 | ``` 61 | @article{kollod_closed_2023, 62 | title = {Closed loop {BCI} system for {Cybathlon} 2020}, 63 | volume = {10}, 64 | issn = {2326-263X}, 65 | doi = {10.1080/2326263X.2023.2254463}, 66 | number = {2-4}, 67 | journal = {Brain-Computer Interfaces}, 68 | author = {Köllőd, Csaba and Adolf, András and Márton, Gergely and Wahdow, Moutz and Fadel, Ward and Ulbert, István}, 69 | year = {2023}, 70 | keywords = {EEG, FFT, Normalization, SVM, — BCI}, 71 | pages = {114--128}, 72 | } 73 | ``` 74 | and 75 | ``` 76 | @article{kollod_deep_2023, 77 | title = {Deep Comparisons of Neural Networks from the {EEGNet} Family}, 78 | volume = {12}, 79 | issn = {2079-9292}, 80 | doi = {10.3390/electronics12122743}, 81 | pages = {2743}, 82 | number = {12}, 83 | journaltitle = {Electronics}, 84 | author = {Köllőd, Csaba Márton and Adolf, András and Iván, Kristóf and Márton, Gergely and Ulbert, István}, 85 | date = {2023-01}, 86 | } 87 | ``` 88 | 89 | as well as the [MNE-Python](https://mne.tools/) software that is used by bionic_apps: 90 | 91 | ``` 92 | @article{GramfortEtAl2013a, 93 | title = {{{MEG}} and {{EEG}} Data Analysis with {{MNE}}-{{Python}}}, 94 | author = {Gramfort, Alexandre and Luessi, Martin and Larson, Eric and 95 | Engemann, Denis A. and Strohmeier, Daniel and Brodbeck, Christian and 96 | Goj, Roman and Jas, Mainak and Brooks, Teon and Parkkonen, Lauri and 97 | H{\"a}m{\"a}l{\"a}inen, Matti S.}, 98 | year = {2013}, 99 | volume = {7}, 100 | pages = {1--13}, 101 | doi = {10.3389/fnins.2013.00267}, 102 | journal = {Frontiers in Neuroscience}, 103 | number = {267} 104 | } 105 | ``` 106 | 107 | [//]: # (## Contributing) 108 | 109 | [//]: # (State if you are open to contributions and what your requirements are for accepting them.) 110 | 111 | [//]: # () 112 | 113 | [//]: # (For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.) 114 | 115 | [//]: # () 116 | 117 | [//]: # (You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.) 118 | 119 | [//]: # () 120 | 121 | [//]: # (## Authors and acknowledgment) 122 | 123 | [//]: # (Show your appreciation to those who have contributed to the project.) 124 | 125 | [//]: # () 126 | 127 | ## Licensing 128 | 129 | Bionic Applications is BSD-licenced (BSD-3-Clause): 130 | 131 | > This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source 132 | > Initiative. 133 | > 134 | >Copyright (c) 2019-2025, authors of Bionic Applications. All rights reserved. 135 | > 136 | >Redistribution and use in source and binary forms, with or without modification, are permitted provided that the 137 | > following conditions are met: 138 | > - Redistributions of source code must retain the above copyright notice, this list of conditions and the following 139 | disclaimer. 140 | > - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following 141 | disclaimer in the documentation and/or other materials provided with the distribution. 142 | > - Neither the names of bionic_apps authors nor the names of any contributors may be used to endorse or promote 143 | products derived from this software without specific prior written permission. 144 | > 145 | > This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, 146 | > including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are 147 | > disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, 148 | > special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or 149 | > services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, 150 | > whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of 151 | > this software, even if advised of the possibility of such damage. 152 | 153 | 154 | [//]: # (## Project status) 155 | 156 | [//]: # (If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.) 157 | -------------------------------------------------------------------------------- /bionic_apps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kolcs/bionic_apps/0145850fcbdccaae0d9df8ca3754221e8d9827c7/bionic_apps/__init__.py -------------------------------------------------------------------------------- /bionic_apps/ai/__init__.py: -------------------------------------------------------------------------------- 1 | from .classifier import ClassifierType, init_classifier 2 | -------------------------------------------------------------------------------- /bionic_apps/ai/classifier.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, auto 2 | 3 | from sklearn.metrics import classification_report, confusion_matrix, accuracy_score 4 | 5 | from .keras_networks import EEGNet, DeepConvNet, ShallowConvNet, EEGNetFusion, MI_EEGNet 6 | from .kolcs_neural_networks import VGG, VggType, DenseNet, DenseNetType, CascadeConvRecNet, BasicNet 7 | from .sklearn_classifiers import VotingSVM, get_ensemble_clf 8 | 9 | 10 | class ClassifierType(Enum): 11 | USER_DEFINED = auto() 12 | 13 | # sklearn classifiers 14 | VOTING_SVM = auto() 15 | ENSEMBLE = auto() 16 | VOTING = auto() 17 | 18 | # neural networks 19 | DENSE_NET_121 = auto() 20 | DENSE_NET_169 = auto() 21 | DENSE_NET_201 = auto() 22 | VGG16 = auto() 23 | VGG19 = auto() 24 | CASCADE_CONV_REC = auto() 25 | KOLCS_NET = auto() 26 | 27 | EEG_NET = auto() 28 | DEEP_CONV_NET = auto() 29 | SHALLOW_CONV_NET = auto() 30 | EEG_NET_FUSION = auto() 31 | MI_EEGNET = auto() 32 | 33 | 34 | def init_classifier(classifier_type, input_shape, classes, 35 | *, fs=None, classifier=None, save_path='tf_log/', **kwargs): 36 | if classifier_type is ClassifierType.VOTING_SVM: 37 | classifier = VotingSVM(**kwargs) 38 | elif classifier_type is ClassifierType.ENSEMBLE: 39 | classifier = get_ensemble_clf() 40 | elif classifier_type is ClassifierType.VOTING: 41 | classifier = get_ensemble_clf('voting') 42 | elif classifier_type is ClassifierType.VGG16: 43 | classifier = VGG(VggType.VGG16, input_shape, classes, **kwargs) 44 | elif classifier_type is ClassifierType.VGG19: 45 | classifier = VGG(VggType.VGG19, input_shape, classes, **kwargs) 46 | elif classifier_type is ClassifierType.DENSE_NET_121: 47 | classifier = DenseNet(DenseNetType.DN121, input_shape, classes, **kwargs) 48 | elif classifier_type is ClassifierType.DENSE_NET_169: 49 | classifier = DenseNet(DenseNetType.DN169, input_shape, classes, **kwargs) 50 | elif classifier_type is ClassifierType.DENSE_NET_201: 51 | classifier = DenseNet(DenseNetType.DN201, input_shape, classes, **kwargs) 52 | elif classifier_type is ClassifierType.CASCADE_CONV_REC: 53 | classifier = CascadeConvRecNet(input_shape, classes, **kwargs) 54 | elif classifier_type is ClassifierType.KOLCS_NET: 55 | classifier = BasicNet(input_shape, classes) 56 | elif classifier_type is ClassifierType.EEG_NET: 57 | classifier = EEGNet(input_shape, classes, fs=fs, save_path=save_path, **kwargs) 58 | elif classifier_type is ClassifierType.DEEP_CONV_NET: 59 | classifier = DeepConvNet(input_shape, classes, save_path=save_path, **kwargs) 60 | elif classifier_type is ClassifierType.SHALLOW_CONV_NET: 61 | classifier = ShallowConvNet(input_shape, classes, save_path=save_path, **kwargs) 62 | elif classifier_type is ClassifierType.EEG_NET_FUSION: 63 | classifier = EEGNetFusion(input_shape, classes, fs=fs, save_path=save_path, **kwargs) 64 | elif classifier_type is ClassifierType.MI_EEGNET: 65 | classifier = MI_EEGNet(input_shape, classes, fs=fs, save_path=save_path, **kwargs) 66 | elif classifier_type is ClassifierType.USER_DEFINED: 67 | assert classifier is not None, f'classifier must be defined!' 68 | else: 69 | raise NotImplementedError('Classifier {} is not implemented.'.format(classifier_type.name)) 70 | return classifier 71 | 72 | 73 | def test_classifier(clf, x_test, y_test, le): 74 | y_pred = clf.predict(x_test) 75 | y_pred = le.inverse_transform(y_pred) 76 | y_test = le.inverse_transform(y_test) 77 | 78 | # https://scikit-learn.org/stable/modules/model_evaluation.html#precision-recall-and-f-measures 79 | class_report = classification_report(y_test, y_pred) 80 | conf_matrix = confusion_matrix(y_test, y_pred) 81 | acc = accuracy_score(y_test, y_pred) 82 | 83 | print(class_report) 84 | print(f"Confusion matrix:\n{conf_matrix}\n") 85 | print(f"Accuracy score: {acc}\n") 86 | return acc 87 | -------------------------------------------------------------------------------- /bionic_apps/ai/kolcs_neural_networks.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, auto 2 | 3 | import tensorflow as tf 4 | from tensorflow import keras 5 | 6 | from ..ai.interface import TFBaseNet 7 | 8 | 9 | class VggType(Enum): 10 | VGG16 = auto() 11 | VGG19 = auto() 12 | 13 | 14 | class VGG(TFBaseNet): 15 | 16 | def __init__(self, net_type, input_shape, classes, weights="imagenet"): 17 | self._net_type = net_type 18 | self._weights = weights 19 | super(VGG, self).__init__(input_shape, classes) 20 | # tf.compat.v1.reset_default_graph() 21 | 22 | def _build_graph(self): 23 | input_tensor = keras.layers.Input(shape=self._input_shape) 24 | x = input_tensor 25 | 26 | if len(self._input_shape) == 2: 27 | x = keras.layers.Lambda(lambda tens: tf.expand_dims(tens, axis=-1))(x) 28 | if len(self._input_shape) == 2 or len(self._input_shape) == 3 and self._input_shape[2] == 1: 29 | x = keras.layers.Lambda(lambda tens: tf.image.grayscale_to_rgb(tens))(x) 30 | 31 | model_kwargs = dict( 32 | include_top=False, 33 | weights=self._weights, 34 | input_tensor=x, 35 | input_shape=None, 36 | pooling=None, 37 | ) 38 | if self._net_type == VggType.VGG16: 39 | base_model = keras.applications.VGG16(**model_kwargs) 40 | elif self._net_type == VggType.VGG19: 41 | base_model = keras.applications.VGG19(**model_kwargs) 42 | else: 43 | raise NotImplementedError('VGG net {} is not defined'.format(self._net_type)) 44 | 45 | # add end node 46 | x = keras.layers.Flatten(name='flatten')(base_model.outputs[0]) 47 | x = keras.layers.Dense(4096, activation='relu', name='fc1')(x) 48 | x = keras.layers.Dense(4096, activation='relu', name='fc2')(x) 49 | x = keras.layers.Dense(self._output_shape, activation='softmax', name='predictions')(x) 50 | return input_tensor, x 51 | 52 | 53 | class DenseNetType(Enum): 54 | DN121 = auto() 55 | DN169 = auto() 56 | DN201 = auto() 57 | 58 | 59 | class DenseNet(TFBaseNet): 60 | 61 | def __init__(self, net_type, input_shape, classes, weights="imagenet"): 62 | # tf.compat.v1.reset_default_graph() 63 | self._net_type = net_type 64 | self._weights = weights 65 | super(DenseNet, self).__init__(input_shape, classes) 66 | 67 | def _build_graph(self): 68 | input_tensor = keras.layers.Input(shape=self._input_shape) 69 | x = input_tensor 70 | 71 | if len(self._input_shape) == 2: 72 | x = keras.layers.Lambda(lambda tens: tf.expand_dims(tens, axis=-1))(x) 73 | if len(self._input_shape) == 2 or len(self._input_shape) == 3 and self._input_shape[2] == 1: 74 | x = keras.layers.Lambda(lambda tens: tf.image.grayscale_to_rgb(tens))(x) 75 | 76 | model_kwargs = dict( 77 | include_top=False, 78 | weights=self._weights, 79 | input_tensor=x, 80 | input_shape=None, 81 | pooling='avg', 82 | ) 83 | if self._net_type == DenseNetType.DN121: 84 | base_model = keras.applications.DenseNet121(**model_kwargs) 85 | elif self._net_type == DenseNetType.DN169: 86 | base_model = keras.applications.DenseNet169(**model_kwargs) 87 | elif self._net_type == DenseNetType.DN201: 88 | base_model = keras.applications.DenseNet201(**model_kwargs) 89 | else: 90 | raise NotImplementedError('DenseNet {} is not defined'.format(self._net_type)) 91 | 92 | # add end node 93 | x = keras.layers.Dense(self._output_shape, activation='softmax', name='predictions')(base_model.outputs[0]) 94 | return input_tensor, x 95 | 96 | 97 | class CascadeConvRecNet(TFBaseNet): # https://github.com/Kearlay/research/blob/master/py/eeg_main.py 98 | 99 | def __init__(self, input_shape, classes, conv_2d_filters=None, lstm_layers=2): 100 | if conv_2d_filters is None: 101 | conv_2d_filters = [32, 64, 128] 102 | self._conv_2d_filters = conv_2d_filters 103 | self._lstm_layers = lstm_layers 104 | super(CascadeConvRecNet, self).__init__(input_shape, classes) 105 | 106 | def _build_graph(self): 107 | input_tensor = keras.layers.Input(shape=self._input_shape) 108 | x = input_tensor 109 | x = keras.layers.BatchNormalization(name='batch_norm')(x) 110 | 111 | # Convolutional Block 112 | for i, filter_num in enumerate(self._conv_2d_filters): 113 | x = keras.layers.TimeDistributed( 114 | keras.layers.Conv2D(filters=filter_num, kernel_size=(3, 3), padding='same', 115 | activation=tf.nn.leaky_relu), 116 | name='conv{}'.format(i) 117 | )(x) 118 | x = keras.layers.TimeDistributed(keras.layers.Flatten(), name='flatten')(x) 119 | 120 | # Fully connected 121 | x = keras.layers.TimeDistributed( 122 | keras.layers.Dense(units=1024, activation=tf.nn.leaky_relu), 123 | name='FC1' 124 | )(x) 125 | x = keras.layers.TimeDistributed( 126 | keras.layers.Dropout(0.5), 127 | name='dropout1' 128 | )(x) 129 | 130 | # LSTM block 131 | for i in range(self._lstm_layers): 132 | ret_seq = i < self._lstm_layers - 1 133 | x = keras.layers.LSTM(self._input_shape[0], return_sequences=ret_seq, name='LSTM{}'.format(i))(x) 134 | 135 | # Fully connected layer block 136 | x = keras.layers.Dense(1024, activation=tf.nn.leaky_relu, name='FC2')(x) 137 | x = keras.layers.Dropout(0.5, name='dropout2')(x) 138 | 139 | # Output layer 140 | outputs = keras.layers.Dense(self._output_shape, activation='softmax')(x) 141 | return input_tensor, outputs 142 | 143 | 144 | class BasicNet(TFBaseNet): 145 | 146 | def __init__(self, input_shape, classes, resize_shape=(64, 64), resize_method='bilinear'): 147 | self._resize_shape = resize_shape 148 | self._resize_method = resize_method 149 | super(BasicNet, self).__init__(input_shape, classes) 150 | 151 | def _build_graph(self): 152 | input_tensor = keras.layers.Input(shape=self._input_shape) 153 | x = input_tensor 154 | if len(self._input_shape) == 2: 155 | x = keras.layers.Lambda(lambda tens: tf.expand_dims(tens, axis=-1))(x) 156 | 157 | x = keras.layers.BatchNormalization()(x) 158 | x = keras.layers.Lambda(lambda tens: tf.image.resize(tens, self._resize_shape, self._resize_method))(x) 159 | 160 | conv_filters = [16, 32, 64] 161 | kernel_sizes = [(3, 3), (5, 5)] 162 | x_list = list() 163 | for kernel in kernel_sizes: 164 | y = x 165 | for filt in conv_filters: 166 | y = keras.layers.Conv2D(filt, kernel, activation=keras.layers.LeakyReLU())(y) 167 | y = keras.layers.Flatten()(y) 168 | x_list.append(y) 169 | x = keras.layers.concatenate(x_list) 170 | 171 | x = keras.layers.Dense(units=1024, activation=tf.nn.leaky_relu)(x) 172 | x = keras.layers.Dense(units=512, activation=tf.nn.leaky_relu)(x) 173 | x = keras.layers.Dense(units=256, activation=tf.nn.leaky_relu)(x) 174 | x = keras.layers.Dense(self._output_shape, activation='softmax')(x) 175 | return input_tensor, x 176 | 177 | 178 | if __name__ == '__main__': 179 | nn = BasicNet((64, 120), 2) 180 | nn.summary() 181 | -------------------------------------------------------------------------------- /bionic_apps/ai/sklearn_classifiers.py: -------------------------------------------------------------------------------- 1 | from sklearn.base import ClassifierMixin 2 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis 3 | from sklearn.ensemble import StackingClassifier, ExtraTreesClassifier, RandomForestClassifier, VotingClassifier 4 | from sklearn.naive_bayes import GaussianNB 5 | from sklearn.neighbors import KNeighborsClassifier 6 | from sklearn.pipeline import make_pipeline 7 | from sklearn.preprocessing import StandardScaler, FunctionTransformer 8 | from sklearn.svm import SVC, NuSVC 9 | 10 | from .interface import ClassifierInterface 11 | 12 | 13 | def _select_fft(x, i): 14 | return x[:, i, :] 15 | 16 | 17 | class VotingSVM(ClassifierInterface, ClassifierMixin): 18 | 19 | def __init__(self, norm=StandardScaler, voting='soft'): 20 | self.norm = norm 21 | self.voting = voting 22 | self._model = None 23 | 24 | def fit(self, x, y=None, **kwargs): 25 | n_svms = x.shape[1] 26 | inner_clfs = [(f'unit{i}', make_pipeline(FunctionTransformer(_select_fft, kw_args={'i': i}), 27 | self.norm(), SVC(probability=True))) 28 | for i in range(n_svms)] 29 | self._model = VotingClassifier(inner_clfs, voting=self.voting, n_jobs=len(inner_clfs)) \ 30 | if len(inner_clfs) > 1 else inner_clfs[0][1] 31 | self._model.fit(x, y) 32 | 33 | def predict(self, x): 34 | return self._model.predict(x) 35 | 36 | 37 | def get_ensemble_clf(mode='ensemble'): 38 | level0 = [ 39 | ('SVM', SVC(C=15, gamma=.01, cache_size=512, probability=True)), 40 | ('nuSVM', NuSVC(nu=.32, gamma=.015, cache_size=512, probability=True)), 41 | ('Extra Tree', ExtraTreesClassifier(n_estimators=500, criterion='gini')), 42 | ('Random Forest', RandomForestClassifier(n_estimators=500, criterion='gini')), 43 | ('Naive Bayes', GaussianNB()), 44 | ('KNN', KNeighborsClassifier()) 45 | ] 46 | 47 | if mode == 'ensemble': 48 | level1 = LinearDiscriminantAnalysis() 49 | final_clf = StackingClassifier(level0, level1, n_jobs=len(level0)) 50 | elif mode == 'voting': 51 | final_clf = VotingClassifier(level0, voting='soft', n_jobs=len(level0)) 52 | else: 53 | raise ValueError(f'Mode {mode} is not an ensemble mode.') 54 | 55 | clf = make_pipeline( 56 | # PCA(n_components=.97), 57 | StandardScaler(), 58 | final_clf 59 | ) 60 | return clf 61 | -------------------------------------------------------------------------------- /bionic_apps/ai/svm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ai.interface import ClassifierInterface 3 | from joblib import Parallel, delayed 4 | from sklearn.pipeline import Pipeline 5 | from sklearn.preprocessing import Normalizer, MinMaxScaler, StandardScaler, MaxAbsScaler, RobustScaler, \ 6 | PowerTransformer, QuantileTransformer 7 | from sklearn.svm import SVC 8 | 9 | 10 | class OnlinePipeline(Pipeline): 11 | 12 | def __init__(self, steps, memory=None, verbose=False): 13 | super().__init__(steps, memory, verbose) 14 | self._init_fit = True 15 | 16 | def fit(self, X, y=None, **fit_params): 17 | if self._init_fit: 18 | super().fit(X, y, **fit_params) 19 | self._init_fit = False 20 | else: 21 | for i, step in enumerate(self.steps): 22 | name, est = step 23 | if i < len(self.steps) - 1: 24 | X = est.transform(X) 25 | else: 26 | est.partial_fit(X, y) 27 | 28 | return self 29 | 30 | 31 | def _init_one_svm(svm, pipeline=('norm',), **svm_kargs): 32 | if svm is None: 33 | # svm = OnlinePipeline([('norm', Normalizer()), ('svm', SGDClassifier(**svm_kargs))]) 34 | pipe_list = list() 35 | for el in pipeline: 36 | if el == 'norm': 37 | element = (el, Normalizer()) 38 | elif el == 'standard': 39 | element = (el, StandardScaler()) 40 | elif el == 'minmax': 41 | element = (el, MinMaxScaler()) 42 | elif el == 'maxabs': 43 | element = (el, MaxAbsScaler()) 44 | elif el == 'robust': 45 | element = (el, RobustScaler()) 46 | elif el == 'power': 47 | element = (el, PowerTransformer()) 48 | elif el == 'quantile': 49 | element = (el, QuantileTransformer(output_distribution='normal')) 50 | else: 51 | raise ValueError(f'{el} is not in SVM pipeline options.') 52 | pipe_list.append(element) 53 | pipe_list.append(('svm', SVC(**svm_kargs))) 54 | svm = Pipeline(pipe_list) 55 | return svm 56 | 57 | 58 | def _fit_one_svm(svm, data, label, num): 59 | svm.fit(data, label) 60 | return num, svm 61 | 62 | 63 | class MultiSVM(ClassifierInterface): 64 | def __init__(self, **svm_kwargs): 65 | self._svm_kargs = svm_kwargs 66 | self._svms = dict() 67 | 68 | def _predict(self, i, data): 69 | svm = self._svms[i] 70 | return svm.predict(data) 71 | 72 | def fit(self, X, y, **kwargs): 73 | """ 74 | 75 | Parameters 76 | ---------- 77 | X : numpy.ndarray 78 | EEG data to be processed. shape: (n_samples, n_svm, n_features) 79 | y : numpy.ndarray 80 | labels 81 | 82 | Returns 83 | ------- 84 | 85 | """ 86 | X = np.array(X) 87 | n_svms = X.shape[1] 88 | self._svms = {i: _init_one_svm(self._svms.get(i), **self._svm_kargs) for i in range(n_svms)} 89 | # self._svms = [SVM(*self._svm_args) for _ in range(n_svms)] # serial: 3 times slower 90 | # for i in range(len(self._svms)): 91 | # self._fit_svm(i, X[:, i, :], y) 92 | if len(y.shape) == 2 and y.shape[1] == 1: 93 | y = np.ravel(y) 94 | if n_svms > 1: 95 | svms = Parallel(n_jobs=-2)( 96 | delayed(_fit_one_svm)(self._svms[i], X[:, i, :], y, i) for i in range(n_svms)) 97 | else: 98 | svms = [_fit_one_svm(self._svms[0], X[:, 0, :], y, 0)] 99 | self._svms = dict(svms) 100 | 101 | def predict(self, X): 102 | X = np.array(X) 103 | votes = [self._predict(i, X[:, i, :]) for i in range(X.shape[1])] 104 | # votes.extend([self._predict(X.shape[1] + i, X[:, :, i]) for i in range(X.shape[2])]) 105 | # votes = [self._predict(i, X[:, :, i]) for i in range(X.shape[2])] 106 | 107 | # votes = Parallel(n_jobs=-2)(delayed(self._predict)(i, X[:, i, :]) for i in range(len(self._svms))) 108 | votes = np.array(votes) 109 | res = list() 110 | for i in range(votes.shape[1]): # counting votes 111 | unique, count = np.unique(votes[:, i], return_counts=True) 112 | res.append(unique[np.argmax(count)]) 113 | return res 114 | 115 | 116 | if __name__ == '__main__': 117 | pass 118 | -------------------------------------------------------------------------------- /bionic_apps/artifact_filtering/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kolcs/bionic_apps/0145850fcbdccaae0d9df8ca3754221e8d9827c7/bionic_apps/artifact_filtering/__init__.py -------------------------------------------------------------------------------- /bionic_apps/artifact_filtering/blinking_detection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.signal import butter, lfilter, find_peaks, sosfilt 3 | 4 | from ..databases import GameDB 5 | from ..handlers.gui import select_files_in_explorer 6 | from ..preprocess.io import get_epochs_from_files 7 | 8 | 9 | def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, fmode='ba'): 10 | if fmode == 'ba': 11 | b, a = butter(order, (lowcut, highcut), btype='bandpass', fs=fs) 12 | y = lfilter(b, a, data) 13 | elif fmode == 'sos': 14 | sos = butter(order, (lowcut, highcut), btype='bandpass', output=fmode, fs=fs) 15 | y = sosfilt(sos, data) 16 | else: 17 | raise AttributeError('Filter mode {} is not defined'.format(fmode)) 18 | return y 19 | 20 | 21 | def _test_blinking_detection(filename, blink_list=None): 22 | epoch_length = 4 23 | baseline = tuple([None, 0.1]) 24 | task_dict = GameDB.TRIGGER_TASK_CONVERTER # {'Rest': 1, 'left fist/both fists': 2, 'right fist/both feet': 3} 25 | epochs, fs = get_epochs_from_files(filename, 26 | task_dict=task_dict, 27 | epoch_tmin=0, epoch_tmax=epoch_length, baseline=baseline, get_fs=True, 28 | prefilter_signal=True) 29 | epochs.load_data() 30 | ch_list = ['Fp1', 'Fp2', 'Af7', 'Af8', 'Afz'] 31 | epochs.pick_channels(ch_list) 32 | # epochs.plot(block=True) # check blinks visually here 33 | 34 | detected = list() 35 | for i, ep in enumerate(epochs): 36 | if is_there_blinking(ep, fs, threshold=4, ch_list=ch_list): 37 | print('Epoch {} contains blinking'.format(i + 1)) 38 | detected.append(i + 1) 39 | 40 | if blink_list is not None: 41 | print('\nSummary:') 42 | missed_blinks = [b for b in blink_list if b not in detected] 43 | wrongly_detected = [b for b in detected if b not in blink_list] 44 | print("Missed blinks: {}".format(missed_blinks)) 45 | print("Detected but not blink: {}".format(wrongly_detected)) 46 | epochs.plot(block=True) # check the error... 47 | 48 | 49 | def is_there_blinking(eeg, fs, threshold=4, ch_list=None): 50 | filt_data = butter_bandpass_filter(eeg, .5, 30, fs, order=5, fmode='ba') 51 | is_there_blink = False 52 | for ch_num, ch_data in enumerate(filt_data): 53 | ind, peaks = find_peaks(ch_data, height=0) 54 | avg_peak = np.mean(peaks['peak_heights']) 55 | ind_, peaks_ = find_peaks(ch_data, height=avg_peak * threshold) 56 | if len(ind_) != 0: 57 | if ch_list is not None: 58 | print("\tChannel {} contains blinking.".format(ch_list[ch_num])) 59 | is_there_blink = True 60 | 61 | return is_there_blink 62 | 63 | 64 | if __name__ == '__main__': 65 | # on: Game/mixed/subject1 66 | blink_list = [2, 4, 7, 9, 10, 11, 15, 16, 17, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 33, 35, 37, 39, 40, 43, 44, 67 | 47, 48, 49, 50, 51, 52, 54, 55, 57, 58, 61, 62, 63, 64, 68, 70, 72, 74, 75, 77, 79, 81, 82, 84, 86, 68 | 87, 69 | 88, 89, 90, 92, 94, 95, 97, 99, 100, 101, 103, 105, 106, 107, 109, 111, 113, 115, 117, 119, 120, 121, 70 | 122, 123, 125, 127, 128, 129, 131, 133, 134, 135, 137, 138, 139, 140, 141] 71 | filename = select_files_in_explorer()[0] 72 | _test_blinking_detection(filename, blink_list) 73 | -------------------------------------------------------------------------------- /bionic_apps/databases/__init__.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from pathlib import Path 3 | 4 | from .coreg_mindrove import MindRoveCoreg 5 | from .eeg.offline import * 6 | from .eeg.online_braindriver import * 7 | from .emg import PutEMG 8 | 9 | 10 | # db selection options 11 | class Databases(Enum): 12 | PHYSIONET = 'physionet' 13 | PILOT_PAR_A = 'pilot_par_a' 14 | PILOT_PAR_B = 'pilot_par_b' 15 | TTK = 'ttk' 16 | GAME = 'game' 17 | GAME_PAR_C = 'game_par_c' 18 | GAME_PAR_D = 'game_par_d' 19 | BCI_COMP_IV_1 = 'BCICompIV1' 20 | BCI_COMP_IV_2A = 'BCICompIV2a' 21 | BCI_COMP_IV_2B = 'BCICompIV2b' 22 | ParadigmC = 'par_c' 23 | EMOTIV_PAR_C = 'emotiv_par_c' 24 | GIGA = 'giga' 25 | 26 | MINDROVE_COREG = 'mindrove' 27 | PUTEMG = 'putemg' 28 | 29 | 30 | def get_eeg_db_name_by_filename(filename): 31 | filename = Path(filename).as_posix() 32 | if Game_ParadigmC().DIR in filename: 33 | db_name = Databases.GAME_PAR_C 34 | elif Game_ParadigmD().DIR in filename: 35 | db_name = Databases.GAME_PAR_D 36 | elif PilotDB_ParadigmA().DIR in filename: 37 | db_name = Databases.PILOT_PAR_A 38 | elif PilotDB_ParadigmB().DIR in filename: 39 | db_name = Databases.PILOT_PAR_B 40 | elif Physionet().DIR in filename: 41 | db_name = Databases.PHYSIONET 42 | elif ParadigmC().DIR in filename: 43 | db_name = Databases.ParadigmC 44 | elif BciCompIV1().DIR in filename: 45 | db_name = Databases.BCI_COMP_IV_1 46 | elif BciCompIV2a().DIR in filename: 47 | db_name = Databases.BCI_COMP_IV_2A 48 | elif BciCompIV2b().DIR in filename: 49 | db_name = Databases.BCI_COMP_IV_2B 50 | elif TTK_DB().DIR in filename: 51 | db_name = Databases.TTK 52 | elif Giga().DIR in filename: 53 | db_name = Databases.GIGA 54 | elif MindRoveCoreg().DIR in filename: 55 | db_name = Databases.MINDROVE_COREG 56 | elif PutEMG().DIR in filename: 57 | db_name = Databases.PUTEMG 58 | else: 59 | raise ValueError('No database defined with path {}'.format(filename)) 60 | return db_name 61 | -------------------------------------------------------------------------------- /bionic_apps/databases/coreg_mindrove/__init__.py: -------------------------------------------------------------------------------- 1 | class MindRoveCoreg: 2 | 3 | def __init__(self, config_ver=-1): 4 | self.DIR = "MindRove-coreg" 5 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 6 | 7 | self.FILE_PATH = 'subject{subj}_raw.fif' 8 | self.SUBJECT_NUM = 6 9 | self.DROP_SUBJECTS = [] 10 | -------------------------------------------------------------------------------- /bionic_apps/databases/coreg_mindrove/prepare.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | from bionic_apps.databases.eeg.standardize_database import _create_raw 7 | 8 | DATA_PATH = Path(r'D:\Users\Csabi\OneDrive - Pázmány Péter Katolikus Egyetem\MindRove project\database\Coreg_data') 9 | 10 | EMG_CHS = ['EMG1', 'EMG2', 'EMG3', 'EMG4', 'EMG5', 'EMG6', 'EMG7', 'EMG8'] 11 | EEG_CHS = ['EEG1', 'EEG2', 'EEG3', 'EEG4', 'EEG5', 'EEG6'] 12 | LABEL_CONVERTER = ( 13 | ['Idle'] + 14 | (['Rest'] + ['Thumb'] * 3) * 3 + 15 | (['Rest'] + ['Index'] * 3) * 3 + 16 | (['Rest'] + ['Middle'] * 3) * 3 + 17 | (['Rest'] + ['Ring'] * 3) * 3 + 18 | (['Rest'] + ['Small'] * 3) * 3 + 19 | (['Rest'] + ['Wrist FW'] * 3) * 3 + 20 | (['Rest'] + ['Wrist BCK'] * 3) * 3 + 21 | ['Idle'] 22 | ) 23 | 24 | FS = 500 25 | 26 | 27 | def get_annotated_mindrove_raw(file, mode='merged', plot=False): 28 | assert file.suffix == '.csv', f'only .csv files are accepted' 29 | df = pd.read_csv(file, sep=',', encoding='utf8') 30 | data = df[EEG_CHS + EMG_CHS] * 1e-6 31 | task_numbers = df['Task_number'].values 32 | 33 | tr = np.ediff1d(task_numbers) 34 | tr_start = np.insert(tr, 0, 1) 35 | tr_end = np.append(tr, 1) 36 | tr_start = np.arange(len(tr_start))[tr_start > 0] 37 | tr_end = np.arange(len(tr_end))[tr_end > 0] + 1 38 | assert len(tr_start) == len(tr_end) 39 | 40 | if mode == 'distinct': 41 | pass 42 | elif mode == 'merged': 43 | tr_start_merged, tr_end_merged = [], [] 44 | for i in range(len(tr_start)): 45 | if i == 0 or i + 1 == len(tr_start) or i % 4 == 1 or i % 4 == 2: 46 | tr_start_merged.append(tr_start[i]) 47 | if i == 0 or i + 1 == len(tr_start) or i % 4 == 0 or i % 4 == 1: 48 | tr_end_merged.append(tr_end[i]) 49 | if len(tr_start_merged) > len(tr_end_merged): 50 | tr_start_merged.pop(-2) 51 | elif len(tr_start_merged) < len(tr_end_merged): 52 | tr_end_merged.pop(-2) 53 | tr_start, tr_end = np.array(tr_start_merged), np.array(tr_end_merged) 54 | else: 55 | raise NotImplementedError(f'Mode {mode} is not implemented.') 56 | 57 | onset = tr_start / FS 58 | duration = tr_end / FS - onset 59 | ep_labels = np.array([LABEL_CONVERTER[int(lab)] for lab in task_numbers[tr_start]]) 60 | 61 | raw = _create_raw(data.T, 62 | ch_names=EEG_CHS + EMG_CHS, 63 | ch_types=['eeg'] * len(EEG_CHS) + ['emg'] * len(EMG_CHS), 64 | fs=FS, onset=onset, duration=duration, 65 | description=ep_labels 66 | ) 67 | 68 | if plot: 69 | raw.plot(block=True) 70 | 71 | return raw 72 | 73 | 74 | def reannotate_mindrove(base_dir=DATA_PATH, ep_mode='merged'): 75 | assert base_dir.exists(), f'Path {base_dir} is not available.' 76 | emg_files = sorted(base_dir.rglob('*.csv')) 77 | for j, file in enumerate(emg_files): 78 | j += 1 79 | subj = file.stem.split('_')[-1] 80 | print(f'Subject{j} - {subj}') 81 | 82 | raw = get_annotated_mindrove_raw(file, mode=ep_mode, plot=False) 83 | 84 | raw_cp = raw.copy() 85 | iir_params = dict(order=5, ftype='butter', output='sos') 86 | raw_cp.filter(l_freq=1, h_freq=40, method='iir', iir_params=iir_params, skip_by_annotation='edge', 87 | n_jobs=-1, picks='eeg') 88 | raw_cp.filter(l_freq=20, h_freq=150, method='iir', iir_params=iir_params, skip_by_annotation='edge', 89 | n_jobs=-1, picks='emg') 90 | 91 | raw_cp.plot(block=True) 92 | 93 | raw.set_annotations(raw_cp.annotations) 94 | file = str(base_dir.joinpath(f'subject{j:03d}_raw.fif')) 95 | raw.save(file) 96 | 97 | # # testing: 98 | # from mne.io import read_raw_fif 99 | # sraw = read_raw_fif(file) 100 | # 101 | # raw.plot(title='Modified, before save') 102 | # sraw.plot(block=True, title='After save') 103 | 104 | 105 | if __name__ == '__main__': 106 | reannotate_mindrove() 107 | -------------------------------------------------------------------------------- /bionic_apps/databases/eeg/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kolcs/bionic_apps/0145850fcbdccaae0d9df8ca3754221e8d9827c7/bionic_apps/databases/eeg/__init__.py -------------------------------------------------------------------------------- /bionic_apps/databases/eeg/defaults.py: -------------------------------------------------------------------------------- 1 | EYE_OPEN = 'eye open' 2 | EYE_CLOSED = 'eye closed' 3 | LEFT_HAND = 'left hand' 4 | RIGHT_HAND = 'right hand' 5 | BOTH_HANDS = 'both hands' 6 | LEFT_LEG = 'left leg' 7 | RIGHT_LEG = 'right leg' 8 | BOTH_LEGS = 'both legs' 9 | REST = 'rest' 10 | ACTIVE = 'active' 11 | CALM = 'calm' 12 | TONGUE = 'tongue' 13 | 14 | SUBJECT = 'subject' 15 | 16 | # Record types: 17 | IMAGINED_MOVEMENT = "imagined" 18 | REAL_MOVEMENT = "real" 19 | BASELINE = 'baseline' 20 | -------------------------------------------------------------------------------- /bionic_apps/databases/eeg/offline.py: -------------------------------------------------------------------------------- 1 | from .defaults import LEFT_HAND, RIGHT_HAND, BOTH_HANDS, BOTH_LEGS, EYE_OPEN, EYE_CLOSED, \ 2 | REST, TONGUE, BASELINE, REAL_MOVEMENT, IMAGINED_MOVEMENT 3 | 4 | 5 | class Physionet: 6 | 7 | def __init__(self, config_ver=-1.): 8 | self.DIR = "physionet.org/" 9 | self.SUBJECT_NUM = 109 10 | 11 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 12 | if self.CONFIG_VER >= 1: 13 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 14 | self.SUBJECT_EXP = {subj: (subj,) for subj in range(1, self.SUBJECT_NUM + 1)} 15 | 16 | self.TRIGGER_TASK_CONVERTER = { # imagined 17 | # REST: 1, 18 | LEFT_HAND: 2, 19 | RIGHT_HAND: 3, 20 | BOTH_HANDS: 4, 21 | BOTH_LEGS: 5 22 | } 23 | 24 | self.TRIGGER_EVENT_ID = {'T{}'.format(i): i + 1 for i in range(5)} 25 | """ 26 | DROP_SUBJECTS: list of subjects, whose records are corrupted 27 | 89 - wrong baseline session (T0 with T1) 28 | 88, 92, 100 - wrong intervals (1,375, 5,125) and freq 128Hz instead of 160Hz 29 | """ 30 | self.DROP_SUBJECTS = [89, 88, 92, 100] # todo: remove 89? 31 | 32 | else: 33 | self.FILE_PATH = 'physiobank/database/eegmmidb/S{subj}/S{subj}R{rec}.edf' 34 | self.TRIGGER_EVENT_ID = {'T{}'.format(i): i + 1 for i in range(3)} 35 | 36 | TASK_EYE_OPEN = {EYE_OPEN: 1} 37 | TASK_EYE_CLOSED = {EYE_CLOSED: 1} 38 | 39 | TASK_12 = { 40 | REST: 1, 41 | LEFT_HAND: 2, 42 | RIGHT_HAND: 3 43 | } 44 | 45 | TASK_34 = { 46 | REST: 1, 47 | BOTH_HANDS: 2, 48 | BOTH_LEGS: 3 49 | } 50 | 51 | self.TRIGGER_CONV_REC_TO_TYPE = { 52 | 1: BASELINE, 53 | 2: BASELINE, 54 | 3: REAL_MOVEMENT, 55 | 4: IMAGINED_MOVEMENT, 56 | 5: REAL_MOVEMENT, 57 | 6: IMAGINED_MOVEMENT, 58 | 7: REAL_MOVEMENT, 59 | 8: IMAGINED_MOVEMENT, 60 | 9: REAL_MOVEMENT, 61 | 10: IMAGINED_MOVEMENT, 62 | 11: REAL_MOVEMENT, 63 | 12: IMAGINED_MOVEMENT, 64 | 13: REAL_MOVEMENT, 65 | 14: IMAGINED_MOVEMENT 66 | } 67 | 68 | self.TYPE_TO_REC = { 69 | BASELINE: [1, 2], 70 | REAL_MOVEMENT: [i for i in range(3, 15, 2)], 71 | IMAGINED_MOVEMENT: [i for i in range(4, 15, 2)] 72 | } 73 | 74 | self.TRIGGER_CONV_REC_TO_TASK = { # rec_num : {taskID: task} 75 | 1: TASK_EYE_OPEN, 76 | 2: TASK_EYE_CLOSED, 77 | 3: TASK_12, 78 | 4: TASK_12, 79 | 5: TASK_34, 80 | 6: TASK_34, 81 | 7: TASK_12, 82 | 8: TASK_12, 83 | 9: TASK_34, 84 | 10: TASK_34, 85 | 11: TASK_12, 86 | 12: TASK_12, 87 | 13: TASK_34, 88 | 14: TASK_34 89 | } 90 | 91 | self.TASK_TO_REC = { # same trigger in leg-hand and left-right 92 | # REST: [i for i in range(4, 15, 2)], 93 | LEFT_HAND: [i for i in range(4, 15, 4)], 94 | RIGHT_HAND: [i for i in range(4, 15, 4)], 95 | BOTH_HANDS: [i for i in range(6, 15, 4)], 96 | BOTH_LEGS: [i for i in range(6, 15, 4)] 97 | } 98 | 99 | self.MAX_DURATION = 4 # seconds --> creating strictly formatted data window 100 | 101 | """ 102 | DROP_SUBJECTS: list of subjects, whose records are corrupted 103 | 89 - wrong baseline session (T0 with T1) 104 | 88, 92, 100 - wrong intervals (1,375, 5,125) and freq 128Hz instead of 160Hz 105 | """ 106 | self.DROP_SUBJECTS = [89, 88, 92, 100] 107 | 108 | # # source: 109 | # list(range(35, 42)) + list(range(46, 53)) + list(range(57, 64)) + list(range(4, 7)) + list( 110 | # range(14, 19)) + list(range(23, 32)) + [34, 42, 45, 53, 44, 54, 56, 64] + list(range(67, 76)) + list( 111 | # range(80, 85)) + list(range(92, 95)) + [104] 112 | self.CHANNEL_TRANSFORMATION = [35, 36, 37, 38, 39, 40, 41, 46, 47, 48, 49, 50, 51, 52, 57, 58, 59, 60, 61, 113 | 62, 63, 4, 5, 6, 14, 15, 16, 17, 18, 23, 24, 25, 26, 27, 28, 29, 30, 31, 34, 114 | 42, 45, 53, 44, 54, 56, 64, 67, 68, 69, 70, 71, 72, 73, 74, 75, 80, 81, 82, 115 | 83, 84, 92, 93, 94, 104] 116 | 117 | 118 | class BciCompIV2a: 119 | 120 | def __init__(self, config_ver=-1): 121 | self.DIR = "BCI_comp/4/2a" 122 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 123 | 124 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 125 | self.SUBJECT_NUM = 2 * 9 126 | self.SUBJECT_EXP = {s + 1: [s * 2 + 1, (s + 1) * 2] for s in range(9)} # must be sorted! 127 | 128 | self.TRIGGER_TASK_CONVERTER = { # imagined 129 | LEFT_HAND: 4, 130 | RIGHT_HAND: 5, 131 | BOTH_LEGS: 6, 132 | TONGUE: 7 133 | } 134 | 135 | self.TRIGGER_EVENT_ID = {str(el): i + 1 for i, el in enumerate([276, 277, 768, 769, 770, 771, 772, 783, 136 | 1023, 1072, 32766])} 137 | 138 | self.DROP_SUBJECTS = [] 139 | 140 | 141 | class BciCompIV2b: 142 | 143 | def __init__(self, config_ver=-1): 144 | self.DIR = "BCI_comp/4/2b" 145 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 146 | 147 | self.FILE_PATH = 'S{subj}/S{subj}{rec}_raw.fif' 148 | self.SUBJECT_NUM = 5 * 9 149 | self.SUBJECT_EXP = {s + 1: [s * 5 + i for i in range(1, 6)] for s in range(9)} # must be sorted! 150 | 151 | self.TRIGGER_TASK_CONVERTER = { # imagined 152 | LEFT_HAND: 4, 153 | RIGHT_HAND: 5, 154 | } 155 | 156 | self.TRIGGER_EVENT_ID = {str(el): i + 1 for i, el in enumerate([276, 277, 768, 769, 770, 781, 783, 1023, 157 | 1077, 1078, 1079, 1081, 32766])} 158 | 159 | self.DROP_SUBJECTS = [] 160 | 161 | 162 | class BciCompIV1: 163 | """ 164 | In this dataset there are only 2 classes out of left, right, foot at each subject. 165 | It is not suggested to train it in a cross subject fashion... 166 | """ 167 | 168 | def __init__(self, config_ver=-1): 169 | self.DIR = "BCI_comp/4/1" 170 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 171 | 172 | self.FILE_PATH = 'S{subj}/S{subj}{rec}_raw.fif' 173 | self.SUBJECT_NUM = 7 174 | self.SUBJECT_EXP = {subj: (subj,) for subj in range(1, self.SUBJECT_NUM + 1)} 175 | 176 | self.TRIGGER_TASK_CONVERTER = { # imagined 177 | 'class1': 1, 178 | 'class2': 2, 179 | } 180 | 181 | self.TRIGGER_EVENT_ID = 'auto' 182 | 183 | self.DROP_SUBJECTS = [3, 4, 5] # artificial data 184 | 185 | 186 | class Giga: 187 | 188 | def __init__(self, config_ver=-1): 189 | self.DIR = "giga" 190 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 191 | 192 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 193 | self.SUBJECT_NUM = 2 * 54 194 | self.SUBJECT_EXP = {s + 1: [s * 2 + 1, (s + 1) * 2] for s in range(54)} # must be sorted! 195 | 196 | self.TRIGGER_TASK_CONVERTER = { # imagined 197 | RIGHT_HAND: 1, 198 | LEFT_HAND: 2, 199 | } 200 | 201 | self.TRIGGER_EVENT_ID = {el: i + 1 for i, el in enumerate(['right', 'left'])} 202 | 203 | self.DROP_SUBJECTS = [] 204 | -------------------------------------------------------------------------------- /bionic_apps/databases/eeg/online_braindriver.py: -------------------------------------------------------------------------------- 1 | from .defaults import LEFT_HAND, RIGHT_HAND, BOTH_HANDS, BOTH_LEGS, REST, CALM, ACTIVE, \ 2 | LEFT_LEG, RIGHT_LEG 3 | from ...games.braindriver.control import ControlCommand 4 | 5 | DIR_FEATURE_DB = 'tmp/' 6 | 7 | 8 | class GameDB: 9 | 10 | def __init__(self, config_ver=-1): 11 | self.DIR = "Game/mixed" 12 | self.FILE_PATH = 'subject{subj}/rec{rec}.vhdr' 13 | self.CONFIG_VER = 0 if config_ver == -1. else config_ver 14 | 15 | self.TRIGGER_TASK_CONVERTER = { # imagined 16 | REST: 1, 17 | RIGHT_HAND: 5, 18 | LEFT_HAND: 7, 19 | # RIGHT_LEG: 9, 20 | # LEFT_LEG: 11 21 | # BOTH_HANDS: 9, 22 | BOTH_LEGS: 11 23 | } 24 | 25 | self.COMMAND_CONV = { 26 | REST: ControlCommand.STRAIGHT, 27 | RIGHT_HAND: ControlCommand.RIGHT, 28 | LEFT_HAND: ControlCommand.LEFT, 29 | BOTH_LEGS: ControlCommand.HEADLIGHT 30 | } 31 | 32 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 33 | 34 | self.DROP_SUBJECTS = [] 35 | 36 | 37 | class Game_ParadigmC: 38 | 39 | def __init__(self, config_ver=-1): 40 | self.DIR = "Game/paradigmC/" 41 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 42 | 43 | if self.CONFIG_VER > 1: 44 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 45 | self.SUBJECT_EXP = { # must be sorted! 46 | 1: list(range(1, 6)), 47 | } 48 | else: 49 | self.FILE_PATH = 'subject{subj}/rec{rec}.vhdr' 50 | 51 | self.TRIGGER_TASK_CONVERTER = { # imagined 52 | # REST: 1, 53 | # EYE_OPEN: 2, 54 | # EYE_CLOSED: 3, 55 | RIGHT_HAND: 5, 56 | LEFT_HAND: 7, 57 | CALM: 9, 58 | BOTH_LEGS: 11 59 | } 60 | 61 | self.COMMAND_CONV = { 62 | CALM: ControlCommand.STRAIGHT, 63 | RIGHT_HAND: ControlCommand.RIGHT, 64 | LEFT_HAND: ControlCommand.LEFT, 65 | BOTH_LEGS: ControlCommand.HEADLIGHT 66 | } 67 | 68 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 69 | 70 | self.DROP_SUBJECTS = [] 71 | 72 | 73 | class ParadigmC: 74 | 75 | def __init__(self, config_ver=-1): 76 | self.DIR = "ParC/" 77 | self.FILE_PATH = 'subject{subj}/rec{rec}.vhdr' 78 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 79 | 80 | self.TRIGGER_TASK_CONVERTER = { # imagined 81 | # REST: 1, 82 | # EYE_OPEN: 2, 83 | # EYE_CLOSED: 3, 84 | RIGHT_HAND: 5, 85 | LEFT_HAND: 7, 86 | CALM: 9, 87 | BOTH_LEGS: 11 88 | } 89 | 90 | self.COMMAND_CONV = { 91 | CALM: ControlCommand.STRAIGHT, 92 | RIGHT_HAND: ControlCommand.RIGHT, 93 | LEFT_HAND: ControlCommand.LEFT, 94 | BOTH_LEGS: ControlCommand.HEADLIGHT 95 | } 96 | 97 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 98 | 99 | self.DROP_SUBJECTS = [1] 100 | 101 | 102 | class Game_ParadigmD: 103 | 104 | def __init__(self, config_ver=-1): 105 | self.DIR = "Game/paradigmD/" 106 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 107 | 108 | if self.CONFIG_VER > 1: 109 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 110 | self.SUBJECT_EXP = { # must be sorted! 111 | 1: [1, 2, 5, 6, 7, 8, 10, 11, 13, 15], 112 | 2: [3, 4, 9, 12, 14, 16], 113 | } 114 | else: 115 | self.FILE_PATH = 'subject{subj}/rec{rec}.vhdr' 116 | 117 | self.TRIGGER_TASK_CONVERTER = { # imagined 118 | # REST: 1, 119 | # EYE_OPEN: 2, 120 | # EYE_CLOSED: 3, 121 | ACTIVE + '1': 5, 122 | ACTIVE + '2': 9, 123 | CALM + '1': 7, 124 | CALM + '2': 11 125 | } 126 | 127 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 128 | 129 | self.DROP_SUBJECTS = [] 130 | 131 | 132 | class PilotDB_ParadigmA: 133 | 134 | def __init__(self, config_ver=-1): 135 | self.DIR = "Cybathlon_pilot/paradigmA/" 136 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 137 | 138 | if self.CONFIG_VER > 1: 139 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 140 | self.SUBJECT_EXP = { # must be sorted! 141 | 1: [1, 4], 142 | 2: [2, 3], 143 | } 144 | else: 145 | self.FILE_PATH = 'pilot{subj}/rec{rec}.vhdr' 146 | 147 | self.TRIGGER_TASK_CONVERTER = { # imagined 148 | # REST: 1, 149 | # EYE_OPEN: 2, 150 | # EYE_CLOSED: 3, 151 | RIGHT_HAND: 5, 152 | LEFT_HAND: 7, 153 | RIGHT_LEG: 9, 154 | LEFT_LEG: 11 155 | } 156 | 157 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 158 | 159 | self.DROP_SUBJECTS = [] 160 | 161 | 162 | class PilotDB_ParadigmB: 163 | 164 | def __init__(self, config_ver=-1): 165 | self.DIR = "Cybathlon_pilot/paradigmB/" 166 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 167 | 168 | if self.CONFIG_VER > 1: 169 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 170 | self.SUBJECT_EXP = { # must be sorted! 171 | 1: [1, 3], 172 | 2: [2, 4], 173 | } 174 | else: 175 | self.FILE_PATH = 'pilot{subj}/rec{rec}.vhdr' 176 | 177 | self.TRIGGER_TASK_CONVERTER = { # imagined 178 | # REST: 1, 179 | # EYE_OPEN: 2, 180 | # EYE_CLOSED: 3, 181 | RIGHT_HAND: 5, 182 | LEFT_HAND: 7, 183 | BOTH_HANDS: 9, 184 | BOTH_LEGS: 11 185 | } 186 | 187 | self.COMMAND_CONV = { 188 | BOTH_HANDS: ControlCommand.STRAIGHT, 189 | RIGHT_HAND: ControlCommand.RIGHT, 190 | LEFT_HAND: ControlCommand.LEFT, 191 | BOTH_LEGS: ControlCommand.HEADLIGHT 192 | } 193 | 194 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 195 | 196 | self.DROP_SUBJECTS = [] 197 | 198 | 199 | class TTK_DB: 200 | 201 | def __init__(self, config_ver=-1): 202 | self.DIR = "TTK/" 203 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 204 | 205 | if self.CONFIG_VER >= 1: 206 | self.SUBJECT_EXP = { # must be sorted! 207 | 1: [1, 10, 19], 208 | 2: [2], 209 | 3: [3, 11, 23], 210 | 4: [4, 6, 8], 211 | 5: [5, 7, 17, 21], 212 | 6: [9, 12, 13, 14, 20], 213 | 7: [15, 16, 18, 22], 214 | 8: [24], 215 | 9: [25], 216 | } 217 | self.FILE_PATH = 'S{subj}/S{subj}R{rec}_raw.fif' 218 | self.DROP_SUBJECTS = [] 219 | else: 220 | self.FILE_PATH = 'subject{subj}/rec{rec}.vhdr' 221 | self.DROP_SUBJECTS = [1, 9, 17] 222 | 223 | self.TRIGGER_TASK_CONVERTER = { # imagined 224 | # REST: 1, 225 | # EYE_OPEN: 2, 226 | # EYE_CLOSED: 3, 227 | RIGHT_HAND: 5, 228 | LEFT_HAND: 7, 229 | RIGHT_LEG: 9, 230 | LEFT_LEG: 11 231 | } 232 | 233 | self.TRIGGER_EVENT_ID = {f'Stimulus/S {i + 1:>2}': i + 1 for i in range(16)} 234 | 235 | 236 | class EmotivParC: 237 | 238 | def __init__(self, config_ver=-1): 239 | self.DIR = "bionic_apps/external_connections/emotiv/paradigmC/" 240 | # self.CONFIG_VER = 0 if config_ver == -1. else config_ver 241 | 242 | self.FILE_PATH = 'sub-P{subj}_run-{rec}_eeg.xdf' 243 | 244 | self.TRIGGER_TASK_CONVERTER = { # imagined 245 | # REST: 1, 246 | # EYE_OPEN: 2, 247 | # EYE_CLOSED: 3, 248 | RIGHT_HAND: 5, 249 | LEFT_HAND: 7, 250 | CALM: 9, 251 | BOTH_LEGS: 11 252 | } 253 | 254 | self.TRIGGER_EVENT_ID = {f'S {i + 1:>2}': i + 1 for i in range(16)} 255 | 256 | self.DROP_SUBJECTS = [1, 2, 3] 257 | -------------------------------------------------------------------------------- /bionic_apps/databases/emg/__init__.py: -------------------------------------------------------------------------------- 1 | class PutEMG: 2 | 3 | def __init__(self, config_ver=-1): 4 | self.DIR = "putemg" 5 | self.CONFIG_VER = 1.1 if config_ver == -1. else config_ver 6 | 7 | self.FILE_PATH = 'subject{subj}_raw.fif' 8 | self.SUBJECT_NUM = 87 9 | self.DROP_SUBJECTS = [43, 59, 63, 80] 10 | -------------------------------------------------------------------------------- /bionic_apps/databases/emg/prepare_putemg.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pandas as pd 4 | import numpy as np 5 | 6 | import putemg_download 7 | from bionic_apps.databases.eeg.standardize_database import _create_raw 8 | 9 | FS = 5120 # Hz 10 | LABEL_CONVERTER = { 11 | -1: ['Rest'], 12 | 0: ['Idle'], 13 | 1: ['Fist'], 14 | 2: ['Flexion'], 15 | 3: ['Extension'], 16 | 6: ['pinch thumb-index'], 17 | 7: ['pinch thumb-middle'], 18 | 8: ['pinch thumb-ring'], 19 | 9: ['pinch thumb-small'] 20 | } 21 | 22 | BASE_PATH = 'Data-HDF5' 23 | FILE_PATTERN = '*repeats_long*.hdf5' 24 | LABEL_NUM = 5 25 | 26 | 27 | def get_annotated_raw(file, limit, plot=False): 28 | df = pd.read_hdf(file, sep=',', encoding='utf8') 29 | emg_cols = [col for col in df if 'EMG' in col] 30 | 31 | data = df[emg_cols].to_numpy() * 5 / 2 ** 12 * 1000 / 200 * 1e-3 32 | task_numbers = df['TRAJ_GT'].to_numpy() 33 | 34 | tr = np.ediff1d(task_numbers) 35 | tr_start = np.insert(tr, 0, 1) 36 | tr_end = np.append(tr, 1) 37 | tr_start = np.arange(len(tr_start))[tr_start > 0] 38 | tr_end = np.arange(len(tr_end))[tr_end > 0] + 1 39 | assert len(tr_start) == len(tr_end) 40 | 41 | onset = tr_start / FS 42 | duration = tr_end / FS - onset 43 | ep_labels = np.array([LABEL_CONVERTER[int(lab)] for lab in task_numbers[tr_start]]).ravel() 44 | 45 | label_limit = [np.sum(label == ep_labels) == limit for label in np.unique(ep_labels) if 46 | label not in ['Rest', 'Idle']] 47 | if not all(label_limit): 48 | print(f'\nSome of the labels in {file} does not reach the minimum label limit.\n') 49 | 50 | raw = _create_raw(data.T, 51 | ch_names=emg_cols, 52 | ch_types=['emg'] * len(emg_cols), 53 | fs=FS, onset=onset, duration=duration, 54 | description=ep_labels 55 | ) 56 | 57 | if plot: 58 | raw.plot(block=True) 59 | 60 | return raw 61 | 62 | 63 | def main(): 64 | base_dir = Path(BASE_PATH) 65 | 66 | if not base_dir.exists(): 67 | from unittest.mock import patch 68 | import sys 69 | testargs = [f"{__file__}", 'emg_gestures', str(BASE_PATH).lower()] 70 | with patch.object(sys, 'argv', testargs): 71 | putemg_download.main() 72 | 73 | files = sorted(base_dir.glob(FILE_PATTERN)) 74 | for j, file in enumerate(files): 75 | print(f'Progress: {j * 100. / len(files):.2f} %') 76 | raw = get_annotated_raw(file, LABEL_NUM, plot=False) 77 | file = str(base_dir.joinpath(f'subject{j:03d}_raw.fif')) 78 | raw.save(file, overwrite=True) 79 | 80 | 81 | if __name__ == '__main__': 82 | main() 83 | -------------------------------------------------------------------------------- /bionic_apps/databases/emg/putemg_download.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import urllib.request 4 | import re 5 | 6 | BASE_URL = "https://chmura.put.poznan.pl/s/G285gnQVuCnfQAx/download?path=%2F" 7 | 8 | VIDEO_1080p_DIR = "Video-1080p" 9 | VIDEO_576p_DIR = "Video-576p" 10 | DEPTH_DIR = "Depth" 11 | DATA_HDF5_DIR = "Data-HDF5" 12 | DATA_CSV_DIR = "Data-CSV" 13 | 14 | 15 | def usage(): 16 | print("Usage: {:s} [ ...]".format(os.path.basename(__file__))) 17 | print() 18 | print("Arguments:") 19 | print(" comma-separated list of experiment types " 20 | "(supported types: emg_gestures, emg_force)") 21 | print(" comma-separated list of media " 22 | "(supported types: data-csv, data-hdf5, depth, video-1080p, video-576p)") 23 | print(" [ ...] optional list of two-digit participant IDs, fetches all if none are given") 24 | print() 25 | print("Examples:") 26 | print("{:s} emg_gestures data-hdf5,video-1080p".format(os.path.basename(__file__))) 27 | print("{:s} emg_gestures,emg_force data-csv,depth 03 04 07".format(os.path.basename(__file__))) 28 | exit(1) 29 | 30 | 31 | def parse_record(name): 32 | experiment_name_regexp = r"^(?P\w*)-(?P\d{2})-(?P\w*)-" \ 33 | r"(?P\d{4}-\d{2}-\d{2})-(?P