├── .gitignore
├── LICENSE
├── README.md
├── assets
├── Example-dataDirectory.png
├── PythonDataViewer.jpg
├── anaconda-choosing-environment.jpg
├── anaconda-launch-spyder.jpg
└── anaconda-prontpt-start.jpg
├── matlab
├── README.txt
├── emotibit
│ └── datarealigner
│ │ ├── alignData.m
│ │ ├── arrangeRawData.m
│ │ ├── getDelay.m
│ │ ├── getDelayAndAlignData.m
│ │ ├── getDelayCorr.m
│ │ ├── getRawDataFromFile.m
│ │ └── splineSubSection.m
└── examples
│ └── datarealigner_example.m
└── py
├── README.md
├── anaconda-environments
└── EmotiBit-pyenv.yml
├── emotibit
├── __init__.py
├── datarealigner.py
├── datasyncer.py
├── dataviewer.py
├── ecgHR_detector.py
├── flexcompparser.py
├── hr_scorer.py
├── info.py
├── signal.py
├── tapdetector.py
├── test_timestamp_converter.py
├── timestamp_converter.py
└── utils.py
├── examples
├── datarealigner_example
│ ├── ReadMe.md
│ └── datarealigner_example.py
├── datasyncer_example
│ ├── ReadMe.md
│ └── datasyncer_example.py
├── dataviewer_example
│ ├── ReadMe.md
│ └── dataviewer_example.py
├── docs
│ └── Spyder_datarealigner_guide_en.docx
├── ecgHR_detector_example
│ ├── README.md
│ ├── SampleDataforECG_HR_Detector.zip
│ └── ecgHR_detector_example.py
├── full_experiment_example
│ └── README.md
├── hr_scorer_example
│ ├── README.MD
│ ├── SampleDataForHRScorer.zip
│ └── hr_scorer_example.py
├── hyperscanning_viewer
│ └── hyperscanning_viewer.py
├── lsl_stream_viewer
│ ├── README.md
│ ├── lsl_viewer_ACC_GYRO.ini
│ ├── lsl_viewer_EDA.ini
│ ├── lsl_viewer_MAG_TEMP.ini
│ └── lsl_viewer_PPG.ini
├── periodizer_example
│ ├── coincidence_sum_example.py
│ └── periodizer_example.py
├── tapdetector_example
│ ├── ExampleDataForTapDetector.zip
│ ├── README.md
│ ├── tapdetector_example.py
│ ├── tapdetector_extractdata_example.py
│ └── tapdetector_loaddata_example.py
├── timestamp_converter_example
│ ├── README.md
│ ├── SampleDataForTimestampConverter.zip
│ └── timestamp_conveter_example.py
├── validation_examples
│ └── brainproducts_validation_example.py
└── wav_reading_example
│ ├── ReadMe.md
│ └── wav_reading_example.py
└── testing
├── battery_level_noise_check.py
├── data_check.py
├── dummy_data_check.py
├── timestamp_check.py
├── timestamp_file_move.py
└── timestamp_normalize.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 EmotiBit
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Data Analysis Guide
2 | - Checkout the [`py`](./py) folder to start using softwares like data_viewer to interactively analyse the recorded data.
3 | - Checkout the [`MATLAB`](./matlab) folder for start using examples written in MATLAB
4 |
--------------------------------------------------------------------------------
/assets/Example-dataDirectory.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/assets/Example-dataDirectory.png
--------------------------------------------------------------------------------
/assets/PythonDataViewer.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/assets/PythonDataViewer.jpg
--------------------------------------------------------------------------------
/assets/anaconda-choosing-environment.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/assets/anaconda-choosing-environment.jpg
--------------------------------------------------------------------------------
/assets/anaconda-launch-spyder.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/assets/anaconda-launch-spyder.jpg
--------------------------------------------------------------------------------
/assets/anaconda-prontpt-start.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/assets/anaconda-prontpt-start.jpg
--------------------------------------------------------------------------------
/matlab/README.txt:
--------------------------------------------------------------------------------
1 | These programs were done with MATLAB R2019a
2 | In MATLAB, add the emotibit libs to the path to be able to use them
3 | EX: addpath('C:\Users\marie\Documents\matlab\emotibit\datarealigner')
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/alignData.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % realign data2 with a given delay
3 | % match both waves start and stop time
4 | function [x1,y1,x2,y2] = alignData(delay,x1,y1,x2,y2)
5 |
6 | x2 = x2+delay;
7 | start = max(x1(1),x2(1));
8 | stop = min(x1(end),x2(end));
9 |
10 | x1SubIds = find(x1>=start & x1<=stop);
11 | x1 = x1(x1SubIds);
12 | y1 = y1(x1SubIds);
13 |
14 | x2SubIds = find(x2>=start & x2<=stop);
15 | x2 = x2(x2SubIds);
16 | y2 = y2(x2SubIds);
17 |
18 | end
19 |
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/arrangeRawData.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % Remove DC from signals and match amplitudes
3 | function [x1,y1,x2,y2] = arrangeRawData(invert,x1,y1,x2,y2)
4 |
5 | % removing DC
6 | y1 = y1-mean(y1);
7 | y2 = y2-mean(y2);
8 |
9 | % taking middle 1/3 of the data to get peak2peak amplitude without the first artifacts
10 | third = (x2(end) - x2(1))/3;
11 | x2SubIds = find(x2>=third & x2<=(x2(end)-third));
12 | y2MaxAmp = max(y2(x2SubIds))-min(y2(x2SubIds));
13 | x1SubIds = find(x1>=third & x1<=(x1(end)-third));
14 | y1MaxAmp = max(y1(x1SubIds))-min(y1(x1SubIds));
15 |
16 | % Mathcing Emotibit amplitude to FlexComp
17 | y2= y2/(y2MaxAmp/y1MaxAmp);
18 |
19 | % Inverting Emotibit PPG data
20 | if invert
21 | y2 = -y2;
22 | end
23 |
24 | % figure
25 | % sgtitle('Raw Data');
26 | % plot(x1,y1);
27 | % hold on;
28 | % plot(x2,y2);
29 |
30 | end
31 |
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/getDelay.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % Find optimal positive or negative delay
3 | % return delay in seconds
4 | function delay = getDelay(y1,y2, sRate, maxDelayInSec)
5 |
6 | [corrNeg, idNeg] = getDelayCorr(y1,y2,sRate*maxDelayInSec);
7 | [corrPos, idPos] = getDelayCorr(y2,y1,sRate*maxDelayInSec);
8 |
9 | [val,id]=max([corrNeg,corrPos]);
10 |
11 | if id == 1
12 | % y 2 needs to be moved backward
13 | delay = -idNeg/sRate;
14 | else
15 | % y 2 needs to be moved foward
16 | delay = idPos/sRate;
17 | end
18 |
19 | end
20 |
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/getDelayAndAlignData.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % Get a subsection of data and interpolate Emotibit Data at Flexcomp frequency
3 | % Calculate optimal delay and realign data
4 | function [delay,x1,y1,x2,y2] = getDelayAndAlignData(x1, y1, x2, y2, sRate, MaxDelayInSec, subsectionDuration)
5 |
6 | [subX1,subY1,splineX2,splineY2] = splineSubSection(x1,y1,x2,y2,subsectionDuration);
7 | delay = getDelay(subY1, splineY2, sRate, MaxDelayInSec);
8 | [x1,y1,x2,y2] = alignData(delay,x1,y1,x2,y2);
9 |
10 | end
11 |
12 |
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/getDelayCorr.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % Get optimal correlation of y1 and delayed y2
3 | function [delayCorr, delayId] = getDelayCorr(y1,y2, maxDelay)
4 | step = 1:1:maxDelay;
5 | for n = step
6 | delayCorr(n) = sum(y1(1:end-n+1).*y2(n:end));
7 | end
8 | [delayCorr, delayId] = max(delayCorr);
9 | end
10 |
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/getRawDataFromFile.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % Get raw data from Flexcomp .txt file and Emotibit parser .cvs file
3 | function [DataType,FCompTime,FCompData,EmoTime,EmoData] = getRawDataFromFile(FCompTimeSection,FCompEDASection,FCompPPGSection,EmoTimeSection,EmoDataSection)
4 |
5 | % Get Datatype
6 | prompt = ['What type of Emotibit Sensor Data will you realign ? (EA/PG/PR/PI)' '\n'];
7 | DataType = input(prompt,'s');
8 | if ~ismember(DataType,{'EA','PG','PR','PI'})
9 | error('Please select between EA/PG/PR/PI');
10 | end
11 |
12 | % Get FlexComp Data
13 | disp('Please select FlexComp File : ');
14 | [FCompFileGolden,FCompPath] = uigetfile('*.txt');
15 | if isequal(FCompFileGolden,0)
16 | error('User selected Cancel');
17 | else
18 | disp(['User selected FlexComp file : ', fullfile(FCompPath,FCompFileGolden)]);
19 | end
20 |
21 | FlexCompFile = [FCompPath 'copy.txt'];
22 | copyfile([FCompPath FCompFileGolden],FlexCompFile)
23 | % Change commas for point to be able to read floats
24 | file = memmapfile(FlexCompFile, 'writable', true );
25 | comma = ',';
26 | point = '.';
27 | file.Data( transpose( file.Data==comma) ) = point;
28 | clear file;
29 |
30 | % Create FlexComp matrix
31 | FCompMatrix= readmatrix(FlexCompFile);
32 | delete(FlexCompFile);
33 | FCompTime = FCompMatrix(2:end,FCompTimeSection);
34 |
35 | if DataType == 'EA'
36 | FCompData = FCompMatrix(2:end,FCompEDASection);
37 | else
38 | FCompData = FCompMatrix(2:end,FCompPPGSection);
39 | end
40 |
41 | % Get Emotibit Data
42 | disp(['Please select Emotibit ' DataType ' File : '] );
43 | [EmoFile,EmoPath] = uigetfile('*.csv');
44 | if isequal(EmoFile,0)
45 | disp('User selected Cancel');
46 | else
47 | disp(['User selected Emotibit file : ', fullfile(EmoPath,EmoFile)]);
48 | end
49 | EmoFile = [EmoPath EmoFile];
50 |
51 | % Create Emotibit matrix
52 | EmoData = readmatrix(EmoFile,'Range', EmoDataSection);
53 | EmoTime = readmatrix(EmoFile,'Range', EmoTimeSection);
54 | EmoTime = EmoTime - floor(EmoTime(1));
55 |
56 | end
57 |
58 |
--------------------------------------------------------------------------------
/matlab/emotibit/datarealigner/splineSubSection.m:
--------------------------------------------------------------------------------
1 | %% Created on July 3rd by Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
2 | % Interpolare a subsection of wave 2 at wave 1 frequency and return both
3 | % subsections
4 | function [subX1,subY1,splineX2,splineY2] = splineSubSection(x1,y1,x2,y2,subSectionDuration)
5 |
6 | % Geting middle subsection
7 | startSection = x1(size(x1,1)/2)- subSectionDuration/2;
8 | stopSection = startSection+subSectionDuration;
9 |
10 | x1SubIds = find(x1>=startSection & x1<=stopSection);
11 | subX1 = x1(x1SubIds);
12 | subY1 = y1(x1SubIds);
13 |
14 | x2SubIds = find(x2>=startSection & x2<=stopSection);
15 | subX2 = x2(x2SubIds);
16 | subY2 = y2(x2SubIds);
17 |
18 | % Applying spline to Emotibit Data at FlexComp sampling rate
19 | splineX2 = subX1;
20 | splineY2 = spline(subX2,subY2,splineX2);
21 |
22 | % figure
23 | % sgtitle('Sub Section and Spline Data');
24 | % plot(subX1,subY1);
25 | % hold on;
26 | % plot(splineX2,splineY2);
27 |
28 | end
29 |
--------------------------------------------------------------------------------
/matlab/examples/datarealigner_example.m:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/matlab/examples/datarealigner_example.m
--------------------------------------------------------------------------------
/py/README.md:
--------------------------------------------------------------------------------
1 | # Welcome to the EmotiBit Python Library
2 |
3 | ## Setup Anaconda and Spyder
4 | * Setup Anaconda Python
5 | * See these helpful instructions - https://medium.com/@Shreedharvellay/anaconda-jupyter-spyder-things-you-need-to-know-3c808d824739
6 | * Briefly:
7 | * Install Anaconda - https://www.anaconda.com/download/
8 | * Download or clone EmotiBit_Biometric_Lib to your computer
9 | * Open Anaconda Navigator
10 | * Import EmotiBit_Biometric_Lib/py/EmotiBit-pyenv.yml into Anaconda Navigator Environments
11 | *
12 | * Launch Spyder
13 | * Open Tools>PYTHONPATH manager and Add path to the EmotiBit_Biometric_Lib/py folder
14 | * Optional: Set graphs to open in an interactive window
15 | * Open Tools>Preferences>IPython console>Graphics>Backend>Automatic
16 | * [ToDo: insert screenshot]
17 | * Close and re-launch Spyder
18 |
19 | [comment]: <> (**Note:** Sample data has been provided. You should be able to visualize the data just by clicking `Run` button in spyder.)
20 |
21 | ## Checkout the [examples](./examples) folder to start working with the recorded data.
22 |
23 | ### Python Data Viewer
24 | The Python Data viewer is an interactive tool designed to visualize the data stored on the SD-Card from the EmotiBit.
25 | - Open the [`dataviewer_example.py`](./examples/dataviewer_example) example under the `py/Examples/dataviewer_example` folder in Spyder.
26 |
27 |
--------------------------------------------------------------------------------
/py/anaconda-environments/EmotiBit-pyenv.yml:
--------------------------------------------------------------------------------
1 | name: EmotiBit-pyenv
2 | channels:
3 | - defaults
4 | dependencies:
5 | - alabaster=0.7.12=py37_0
6 | - argh=0.26.2=py37_0
7 | - astroid=2.4.1=py37_0
8 | - atomicwrites=1.4.0=py_0
9 | - attrs=19.3.0=py_0
10 | - autopep8=1.4.4=py_0
11 | - babel=2.8.0=py_0
12 | - backcall=0.1.0=py37_0
13 | - bcrypt=3.1.7=py37he774522_0
14 | - blas=1.0=mkl
15 | - bleach=3.1.4=py_0
16 | - ca-certificates=2022.07.19=haa95532_0
17 | - category_encoders=2.5.0=py37hd4e2768_1
18 | - certifi=2022.9.14=py37haa95532_0
19 | - cffi=1.14.0=py37h7a1dbc1_0
20 | - chardet=3.0.4=py37_1003
21 | - cloudpickle=1.4.1=py_0
22 | - colorama=0.4.3=py_0
23 | - cryptography=2.9.2=py37h7a1dbc1_0
24 | - cycler=0.10.0=py37_0
25 | - decorator=4.4.2=py_0
26 | - defusedxml=0.6.0=py_0
27 | - diff-match-patch=20181111=py_0
28 | - docutils=0.16=py37_0
29 | - entrypoints=0.3=py37_0
30 | - flake8=3.7.9=py37_0
31 | - freetype=2.9.1=ha9979f8_1
32 | - future=0.18.2=py37_0
33 | - icc_rt=2019.0.0=h0cc432a_1
34 | - icu=58.2=ha925a31_3
35 | - idna=2.9=py_1
36 | - imagesize=1.2.0=py_0
37 | - importlib-metadata=1.6.0=py37_0
38 | - importlib_metadata=1.6.0=0
39 | - intel-openmp=2020.1=216
40 | - intervaltree=3.0.2=py_0
41 | - ipykernel=5.1.4=py37h39e3cac_0
42 | - ipython=7.13.0=py37h5ca1d4c_0
43 | - ipython_genutils=0.2.0=py37_0
44 | - isort=4.3.21=py37_0
45 | - jedi=0.15.2=py37_0
46 | - jinja2=2.11.2=py_0
47 | - joblib=1.1.0=pyhd3eb1b0_0
48 | - jpeg=9b=hb83a4c4_2
49 | - jsonschema=3.2.0=py37_0
50 | - jupyter_client=6.1.3=py_0
51 | - jupyter_core=4.6.3=py37_0
52 | - keyring=21.1.1=py37_2
53 | - kiwisolver=1.2.0=py37h74a9793_0
54 | - lazy-object-proxy=1.4.3=py37he774522_0
55 | - libpng=1.6.37=h2a8f88b_0
56 | - libsodium=1.0.16=h9d3ae62_0
57 | - libspatialindex=1.9.3=h33f27b4_0
58 | - markupsafe=1.1.1=py37he774522_0
59 | - matplotlib=3.1.3=py37_0
60 | - matplotlib-base=3.1.3=py37h64f37c6_0
61 | - mccabe=0.6.1=py37_1
62 | - mistune=0.8.4=py37he774522_0
63 | - mkl=2020.1=216
64 | - mkl-service=2.3.0=py37hb782905_0
65 | - mkl_fft=1.0.15=py37h14836fe_0
66 | - mkl_random=1.1.1=py37h47e9c7a_0
67 | - nbconvert=5.6.1=py37_0
68 | - nbformat=5.0.6=py_0
69 | - numpy=1.18.1=py37h93ca92e_0
70 | - numpy-base=1.18.1=py37hc3f5095_1
71 | - numpydoc=0.9.2=py_0
72 | - openssl=1.1.1q=h2bbff1b_0
73 | - packaging=20.3=py_0
74 | - pandas=1.2.4=py37hf11a4ad_0
75 | - pandoc=2.2.3.2=0
76 | - pandocfilters=1.4.2=py37_1
77 | - paramiko=2.7.1=py_0
78 | - parso=0.5.2=py_0
79 | - pathtools=0.1.2=py_1
80 | - patsy=0.5.2=py37haa95532_1
81 | - pexpect=4.8.0=py37_0
82 | - pickleshare=0.7.5=py37_0
83 | - pip=20.0.2=py37_3
84 | - pluggy=0.13.1=py37_0
85 | - prompt-toolkit=3.0.4=py_0
86 | - prompt_toolkit=3.0.4=0
87 | - psutil=5.7.0=py37he774522_0
88 | - pycodestyle=2.5.0=py37_0
89 | - pycparser=2.20=py_0
90 | - pydocstyle=4.0.1=py_0
91 | - pyflakes=2.1.1=py37_0
92 | - pygments=2.6.1=py_0
93 | - pylint=2.5.2=py37_0
94 | - pynacl=1.3.0=py37h62dcd97_0
95 | - pyopenssl=19.1.0=py37_0
96 | - pyparsing=2.4.7=py_0
97 | - pyqt=5.9.2=py37h6538335_2
98 | - pyrsistent=0.16.0=py37he774522_0
99 | - pysocks=1.7.1=py37_0
100 | - python=3.7.7=h81c818b_4
101 | - python-dateutil=2.8.1=py_0
102 | - python-jsonrpc-server=0.3.4=py_0
103 | - python-language-server=0.31.10=py37_0
104 | - pytz=2020.1=py_0
105 | - pywin32=227=py37he774522_1
106 | - pywin32-ctypes=0.2.0=py37_1000
107 | - pyyaml=5.3.1=py37he774522_0
108 | - pyzmq=18.1.1=py37ha925a31_0
109 | - qdarkstyle=2.8.1=py_0
110 | - qt=5.9.7=vc14h73c81de_0
111 | - qtawesome=0.7.0=py_0
112 | - qtconsole=4.7.4=py_0
113 | - qtpy=1.9.0=py_0
114 | - requests=2.23.0=py37_0
115 | - rope=0.17.0=py_0
116 | - rtree=0.9.4=py37h21ff451_1
117 | - scikit-learn=1.0.2=py37hf11a4ad_1
118 | - scipy=1.6.2=py37h14eb087_0
119 | - setuptools=46.4.0=py37_0
120 | - sip=4.19.8=py37h6538335_0
121 | - six=1.14.0=py37_0
122 | - snowballstemmer=2.0.0=py_0
123 | - sortedcontainers=2.1.0=py37_0
124 | - sphinx=3.0.3=py_0
125 | - sphinxcontrib-applehelp=1.0.2=py_0
126 | - sphinxcontrib-devhelp=1.0.2=py_0
127 | - sphinxcontrib-htmlhelp=1.0.3=py_0
128 | - sphinxcontrib-jsmath=1.0.1=py_0
129 | - sphinxcontrib-qthelp=1.0.3=py_0
130 | - sphinxcontrib-serializinghtml=1.1.4=py_0
131 | - spyder=4.1.3=py37_0
132 | - spyder-kernels=1.9.1=py37_0
133 | - sqlite=3.31.1=h2a8f88b_1
134 | - statsmodels=0.12.2=py37h2bbff1b_0
135 | - testpath=0.4.4=py_0
136 | - threadpoolctl=2.2.0=pyh0d69192_0
137 | - toml=0.10.0=py37h28b3542_0
138 | - tornado=6.0.4=py37he774522_1
139 | - traitlets=4.3.3=py37_0
140 | - typed-ast=1.4.1=py37he774522_0
141 | - ujson=1.35=py37hfa6e2cd_0
142 | - urllib3=1.25.8=py37_0
143 | - vc=14.1=h0510ff6_4
144 | - vs2015_runtime=14.16.27012=hf0eaf9b_2
145 | - watchdog=0.10.2=py37_0
146 | - wcwidth=0.1.9=py_0
147 | - webencodings=0.5.1=py37_1
148 | - wheel=0.34.2=py37_0
149 | - win_inet_pton=1.1.0=py37_0
150 | - wincertstore=0.2=py37_0
151 | - wrapt=1.11.2=py37he774522_0
152 | - yaml=0.1.7=hc54c509_2
153 | - yapf=0.28.0=py_0
154 | - zeromq=4.3.1=h33f27b4_3
155 | - zipp=3.1.0=py_0
156 | - zlib=1.2.11=h62dcd97_4
157 | - pip:
158 | - pyxdf==1.16.3
159 |
--------------------------------------------------------------------------------
/py/emotibit/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Apr 17 11:10:10 2019
4 |
5 | @author: Sean Montgomery
6 |
7 | Modified on Mon March 25 2024
8 |
9 | @author: Marie-Eve Bilodeau
10 | """
11 |
12 | name = "emotibit"
13 |
14 |
15 | __all__ = ["datasyncer", "datarealigner", "tapdetector", "signal", "timestamp_converter", "hr_scorer", "ecgHR_detector"]
--------------------------------------------------------------------------------
/py/emotibit/datarealigner.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Tue July 7 2019
3 |
4 | @author: Marie-Eve Bilodeau
5 | """
6 |
7 | __version__ = '0.0.2'
8 |
9 |
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 | from scipy import interpolate
13 |
14 | class DataRealigner:
15 |
16 | timestamp = []
17 | data = []
18 |
19 | def __init__(self):
20 | self.timestamp = []
21 | self.data = []
22 |
23 | def load_data(self, timestamp0, data0, timestamp1, data1):
24 | """ Load data from array
25 | """
26 | self.timestamp.append(timestamp0)
27 | self.timestamp.append(timestamp1)
28 | self.data.append(data0)
29 | self.data.append(data1)
30 |
31 | def match_data_sets(self, invert):
32 | """ Remove DC and match amplitude
33 | """
34 | mean_y0 = np.mean(self.data[0])
35 | mean_y1 = np.mean(self.data[1])
36 | self.data[0] = np.subtract(self.data[0], mean_y0)
37 | self.data[1] = np.subtract(self.data[1], mean_y1)
38 | amp_y0 = np.amax(self.data[0]) - np.amin(self.data[0])
39 | amp_y1 = np.amax(self.data[1]) - np.amin(self.data[1])
40 | amp = amp_y1/amp_y0
41 | self.data[1] = np.divide(self.data[1], amp)
42 | if invert:
43 | self.data[1] = -self.data[1]
44 |
45 | def get_data_subsections(self, x, y, start, stop):
46 | """ Get subsection a data from start to stop time
47 | """
48 | id = [i for i in range(len(x)) if stop>=x[i]>=start]
49 | sub_x = [x[i] for i in id]
50 | sub_y = [y[i] for i in id]
51 | return sub_x, sub_y
52 |
53 | def spline_subsections(self, start, stop):
54 | """ Make a spline interpolation on given subsection
55 | """
56 | x0_new, y0_new = self.get_data_subsections(self.timestamp[0], self.data[0], start, stop)
57 | x1_new, y1_new = self.get_data_subsections(self.timestamp[1], self.data[1], start, stop)
58 | tck = interpolate.splrep(x1_new, y1_new, s=0)
59 | x1_new = x0_new
60 | y1_new = interpolate.splev(x1_new, tck, der=0)
61 | return x0_new, y0_new, x1_new, y1_new
62 |
63 | def get_delay_correlation(self, y0, y1, max_delay):
64 | """ Get correlation between y0 and each delayed y1
65 | return the max correlation and its delayed id
66 | """
67 | delay_correlation = []
68 | for n in range(1, max_delay):
69 | delay_correlation.append(sum(np.array(y0[0:-n])*np.array(y1[n:])))
70 | max_correlation = np.amax(delay_correlation)
71 | id = [i for i in range(len(delay_correlation)) if delay_correlation[i]>=max_correlation]
72 | return max_correlation, id
73 |
74 | def get_delay(self, spline_start_time, spline_stop_time, max_delay, srate):
75 | """ Get max correlation of positive and negative delay
76 | return the delay in seconds
77 | """
78 | x0, y0, x1, y1 = self.spline_subsections(spline_start_time, spline_stop_time)
79 | max_neg, id_neg = self.get_delay_correlation(y0, y1, max_delay*srate)
80 | max_pos, id_pos = self.get_delay_correlation(y1, y0, max_delay*srate)
81 | max_correlation=np.amax([max_neg,max_pos])
82 | if max_correlation == max_neg:
83 | # Data 2 needs to be moved backward
84 | delay = -id_neg[0]/srate
85 | else:
86 | # Data 2 needs to be moved foward
87 | delay = id_pos[0]/srate
88 | return delay
89 |
90 |
91 | def realign_data(self, delay):
92 | """ Realign data1 with a given delay, match both data starting a ending time
93 | """
94 | self.timestamp[1] = np.add(self.timestamp[1], delay)
95 | start = np.amax([self.timestamp[0][0],self.timestamp[1][0]])
96 | stop = np.amin([self.timestamp[0][-1],self.timestamp[1][-1]])
97 | self.timestamp[0], self.data[0] = self.get_data_subsections(self.timestamp[0], self.data[0], start, stop)
98 | self.timestamp[1], self.data[1] = self.get_data_subsections(self.timestamp[1], self.data[1], start, stop)
99 |
100 | def get_delay_and_realign_data(self, spline_start_time,spline_stop_time, max_delay, srate):
101 | delay = self.get_delay(spline_start_time, spline_stop_time, max_delay, srate)
102 | self.realign_data(delay)
103 | return delay
104 |
105 | def upsample_emo_at_flex(self):
106 | """
107 | Upsample Emotibit Data at Flexcomp Timestamp
108 | """
109 | start = np.amax([self.timestamp[0][0],self.timestamp[1][0]])
110 | stop = np.amin([self.timestamp[0][-1],self.timestamp[1][-1]])
111 | self.timestamp[0], self.data[0], self.timestamp[1], self.data[1] = self.spline_subsections(start, stop)
112 |
113 | def downsample(self, start, stop):
114 | """ Make a spline interpolation at x1 sampling rate on given subsection
115 | """
116 |
117 | x0_new, y0_new = self.get_data_subsections(self.timestamp[0], self.data[0], start, stop)
118 | x1_new, y1_new = self.get_data_subsections(self.timestamp[1], self.data[1], start, stop)
119 | tck = interpolate.splrep(x0_new, y0_new, s=0)
120 | x0_new = x1_new
121 | y0_new = interpolate.splev(x0_new, tck, der=0)
122 | return x0_new, y0_new, x1_new, y1_new
123 |
124 | def downsample_flex_at_emo(self):
125 | """
126 | Downsample Flexcomp Data at Emotibit Timestamp
127 | """
128 | start = np.amax([self.timestamp[0][0],self.timestamp[1][0]])
129 | stop = np.amin([self.timestamp[0][-1],self.timestamp[1][-1]])
130 | self.timestamp[0], self.data[0], self.timestamp[1], self.data[1] = self.downsample(start, stop)
--------------------------------------------------------------------------------
/py/emotibit/datasyncer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Apr 15 15:57:57 2019
4 |
5 | @author: Sean Montgomery
6 | """
7 |
8 | # __all__ = ['data0', 'data1', 'load_data_0', 'load_data_1', 'plotTimestampHist']
9 | __version__ = '0.0.1'
10 |
11 | import numpy
12 | import csv
13 | import matplotlib.pyplot as plt
14 | import locale
15 |
16 |
17 | # class datasyncer:
18 |
19 | class TimeSeries:
20 | timestamp = []
21 | data = []
22 |
23 | def __init__(self):
24 | self.timestamp = []
25 | self.data = []
26 |
27 |
28 | class CsvFileInfo:
29 | file_dir = None
30 | file_name = None
31 | file_path = None
32 | data_col = 1
33 | timestamp_col = 0
34 | data_start_row = 0
35 | delimiter = ","
36 |
37 | def __init__(self):
38 | self.file_dir = None
39 | self.file_name = None
40 | self.file_path = None
41 | self.data_col = 1
42 | self.timestamp_col = 0
43 | self.data_start_row = 0
44 | self.delimiter = ","
45 |
46 |
47 | class DataSyncer:
48 | time_series = []
49 | csv_file_info = []
50 |
51 | def __init__(self):
52 | self.time_series = []
53 | self.csv_file_info = []
54 | self.dataLoss = []
55 |
56 | # class DataType(Enum):
57 | # EMOTIBIT = 0
58 | # FLEXCOMP_INFINITY = 1
59 | # length = 2
60 |
61 | def load_data(self, file_dirs, file_names, data_cols, timestamp_col=0, data_start_row=0, delimiter=","):
62 | """Load data from csv file
63 | """
64 | if not isinstance(file_dirs, list):
65 | file_dirs = [file_dirs]
66 | if not isinstance(file_names, list):
67 | file_names = [file_names]
68 | if not isinstance(data_cols, list):
69 | data_cols = [data_cols]
70 | for file_dir in file_dirs:
71 | for file_name in file_names:
72 | file_path = file_dir + "/" + file_name
73 | for data_col in data_cols:
74 | # ToDo: improve efficiency by parsing file once for all data_cols
75 | last_index = len(self.time_series)
76 | self.time_series.append(TimeSeries())
77 | self.csv_file_info.append(CsvFileInfo())
78 |
79 | self.csv_file_info[last_index].file_dir = file_dir
80 | self.csv_file_info[last_index].file_name = file_name
81 | self.csv_file_info[last_index].file_path = file_path
82 | self.csv_file_info[last_index].data_col = data_col
83 | self.csv_file_info[last_index].timestamp_col = timestamp_col
84 | self.csv_file_info[last_index].data_start_row = data_start_row
85 | self.csv_file_info[last_index].delimiter = delimiter
86 |
87 | dialects = csv.list_dialects()
88 | print("csv dialects:")
89 | print(*dialects, "\n")
90 | counter = 0
91 | print("Loading data into time_series[" + str(last_index) + "] from " + file_path)
92 | with open(file_path, newline='') as csvfile:
93 | dataReader = csv.reader(csvfile, delimiter=delimiter, quotechar='|')
94 | for row in dataReader:
95 | if (counter >= data_start_row and len(row) > timestamp_col and len(row) > data_col and not
96 | row[timestamp_col].isalpha()):
97 | if "UN" in file_path or "DC" in file_path or "DO" in file_path:
98 | try:
99 | self.time_series[last_index].timestamp.append(locale.atof(row[timestamp_col]))
100 | self.time_series[last_index].data.append(row[data_col])
101 | except ValueError:
102 | print(str(counter) + row[timestamp_col] + ", " + row[data_col])
103 | else:
104 | try:
105 | self.time_series[last_index].timestamp.append(locale.atof(row[timestamp_col]))
106 | self.time_series[last_index].data.append(locale.atof(row[data_col]))
107 | except ValueError:
108 | print(str(counter) + "--" + row[timestamp_col] + ", " + row[data_col])
109 | # TODO: create a better exception
110 | self.time_series[last_index].data.append(self.time_series[last_index].data[-1])
111 | self.dataLoss.append((last_index, self.time_series[last_index].timestamp[-1] - self.time_series[last_index].timestamp[0]))
112 | else:
113 | print("**** Skipping row " + str(counter) + " ****")
114 | print(row)
115 | counter += 1
116 |
117 | def plot_timestamp_hist(self, nbins=100):
118 | """Plot histograms of diff(timestamps)
119 | """
120 | # fig, axs = plt.subplots(nrows=1, ncols=len(self.time_series), num="Timestamp Histogram")
121 | plt.close("Timestamp Histogram")
122 | fig, axs = plt.subplots(nrows=1, ncols=len(self.time_series), num="Timestamp Histogram")
123 | fig.set_size_inches([12, 4])
124 | if len(self.time_series) > 1:
125 | for t in range(len(self.time_series)):
126 | plt.sca(axs[t])
127 | plt.cla()
128 | axs[t].hist(numpy.diff(self.time_series[t].timestamp), nbins)
129 | # print(self.csv_file_info[t].file_name + " col:" + str(self.csv_file_info[t].data_col))
130 | axs[t].set_title(self.csv_file_info[t].file_name + " col:" + str(self.csv_file_info[t].data_col))
131 | else:
132 | plt.hist(numpy.diff(self.time_series[0].timestamp), nbins)
133 | plt.title(self.csv_file_info[t].file_name + " col:" + str(self.csv_file_info[t].data_col))
134 | fig.tight_layout()
135 |
136 | def select_sync_times(self):
137 | """Plot data to manually select sync times across data files
138 | """
139 | plt.close("Select Sync Times")
140 | fig, axs = plt.subplots(nrows=len(self.time_series), ncols=1, num="Select Sync Times")
141 | fig.set_size_inches([14, 7])
142 | for t in range(len(self.time_series)):
143 | plt.sca(axs[t])
144 | plt.cla()
145 | axs[t].plot(self.time_series[t].timestamp, self.time_series[t].data)
146 | # print(self.csv_file_info[t].file_name + " col:" + str(self.csv_file_info[t].data_col))
147 | axs[t].set_title(self.csv_file_info[t].file_name + " col:" + str(self.csv_file_info[t].data_col))
148 | fig.tight_layout()
149 |
150 | def test(self):
151 | print("test worked yay")
152 |
--------------------------------------------------------------------------------
/py/emotibit/dataviewer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Thu Aug 8 12:34:23 2019
5 |
6 | @author: Nitin
7 | """
8 | import emotibit.datasyncer as syncer
9 |
10 | # import numpy as np
11 | # import csv
12 | # import tkinter as tk
13 | import matplotlib.pyplot as plt
14 | import locale
15 | import os
16 | from matplotlib.widgets import Slider, CheckButtons
17 | from bisect import bisect_left
18 | import platform
19 |
20 | # import pandas as pd
21 |
22 |
23 | class DataViewer:
24 | def __init__(self, file_dir, file_base, hide_dc_tags, usernote_toggle):
25 | self.file_dir0 = file_dir
26 | self.file_base = file_base
27 | self.file_ext = ".csv"
28 | self.cmd_hide_dc_tags = hide_dc_tags
29 | self.cmd_usernote_toggle = usernote_toggle
30 | self.data_types = ["EA", "SA", "SR", "SF","PI", "PR", "PG", "HR", "TH", "AX", "AY", "AZ", "GX", "GY", "GZ",
31 | "MX", "MY", "MZ", "T1","DC", "DO", "UN"] # add the aperiodic data types
32 | # TODO: try to come up with better grouping with less redundancy
33 | self.data_groups = {"accelerometer": ["AX", "AY", "AZ"],
34 | "gyroscope": ["GX", "GY", "GZ"],
35 | "magnetometer": ["MX", "MY", "MZ"],
36 | "heart-rate": ["PG", "PI", "PR"],
37 | "temp-hum": ["T0", "H0", "TH"],
38 | "imu": ["AX", "AY", "AZ", "GX", "GY", "GZ", "MX", "MY", "MZ"],
39 | "eda": ["EA", "EL", "ER"],
40 | "aperiodic": ["DC", "DO"],
41 | "push_messages": ["UN"]}
42 |
43 | # reading data
44 | self.my_syncer = syncer.DataSyncer()
45 | self.file_names0 = []
46 | self.absentTags = []
47 | for data_type in self.data_types:
48 | if os.path.isfile(self.file_dir0 + "/" + self.file_base + "_" + data_type + self.file_ext):
49 | self.file_names0.append(file_base + "_" + data_type + self.file_ext)
50 | else:
51 | self.absentTags.append(data_type)
52 | for tag in self.absentTags:
53 | self.data_types.remove(tag)
54 |
55 | # Identify the timestamp and data columns
56 | self.timestamp_col = 0 # Default to 1st column for timestamp
57 | self.data_col0 = [7] # Default to 7th column for data
58 | col_id_type = "EA"
59 | timestamp_headers = ["LocalTimestamp", "EpochTimestamp"] # ToDo: add optional input
60 | with open(self.file_dir0 + "/" + self.file_base + "_" + col_id_type + self.file_ext) as f:
61 | firstline = next(f)
62 | firstline = firstline.split("\n")[0]
63 | col_headers = firstline.split(",")
64 | for i in range(len(col_headers)):
65 | for h in timestamp_headers:
66 | if (col_headers[i] == h):
67 | self.timestamp_col = i
68 | print("Timestamp column = " + str(self.timestamp_col))
69 | if (col_headers[i] == col_id_type):
70 | self.data_col0 = i
71 | print("Data column = " + str(self.data_col0))
72 | # ToDo: add error handling
73 |
74 | self.data_start_row1 = 2
75 | self.myLocale = locale.getlocale() # Store current locale
76 | if platform.system() == "Darwin":
77 | location = 'en_US'
78 | elif platform.system() == "Windows":
79 | location = 'USA'
80 | # TODO: add support for linux
81 | locale.setlocale(locale.LC_NUMERIC, location) # Switch to new locale to process file
82 | self.my_syncer.load_data(self.file_dir0, self.file_names0, self.data_col0, self.timestamp_col)
83 | locale.setlocale(locale.LC_NUMERIC, self.myLocale) # Set locale back to orignal
84 |
85 | # shifting the x axis to start from 0
86 | base_val = self.my_syncer.time_series[0].timestamp[0] # subtracting the smallest val from EA
87 | for i in range(len(self.my_syncer.time_series)):
88 | self.my_syncer.time_series[i].timestamp[:] = [stamp - base_val for stamp in
89 | self.my_syncer.time_series[i].timestamp]
90 |
91 | # Declaring all markers which are going to populate the plot apart from the data.
92 | # Achieved after reading non-data files
93 | # TODO: change the structure of markers to be in sync with data groups
94 | self.markers = {"points_DC": {"EA": [], "SA": [], "SR": [],
95 | "PI": [], "PR": [], "PG": [],
96 | "TH": [], "SF": [], "HR": [],
97 | "AX": [], "AY": [], "AZ": [],
98 | "GX": [], "GY": [], "GZ": [],
99 | "MX": [], "MY": [], "MZ": [], "T1":[]},
100 | "points_DO": {"EA": [], "SA": [], "SR": [],
101 | "PI": [], "PR": [], "PG": [],
102 | "TH": [], "SF": [], "HR": [],
103 | "AX": [], "AY": [], "AZ": [],
104 | "GX": [], "GY": [], "GZ": [],
105 | "MX": [], "MY": [], "MZ": [], "T1":[], "DC": []},
106 | "points_UN": []}
107 | # Set to false to stop processing DC and DO files below. The Data parser needs a patch to handle DC/DO version 2 before reading that data
108 | self.parseDo = False
109 | self.parseDc = False
110 | # reading all the markers from files and storing in markers dict
111 | for tag in self.data_groups["aperiodic"]: # for each aperiodic signal
112 | if tag not in self.absentTags:
113 | for i, (timestamp, data) in enumerate(
114 | zip(self.my_syncer.time_series[self.data_types.index(tag)].timestamp,
115 | self.my_syncer.time_series[self.data_types.index(tag)].data)): # for each line in the file
116 | if tag == "DC":
117 | if self.parseDc == True:
118 | self.markers["points_" + tag][data].append(timestamp)
119 |
120 | elif tag == "DO":
121 | if self.parseDo == True:
122 | self.markers["points_" + tag][data].append(timestamp)
123 |
124 | else:
125 | print("Error: unknown tag:" + tag)
126 | # TODO: come up with a better fix
127 | if "UN" in self.data_types:
128 | for tag in self.data_groups["push_messages"]:
129 | if tag not in self.absentTags:
130 | for i, (timestamp, data) in enumerate(zip(self.my_syncer.time_series[self.data_types.index(tag)].timestamp,
131 | self.my_syncer.time_series[self.data_types.index(
132 | tag)].data)): # for each line in the file
133 | if tag == "UN":
134 | self.markers["points_" + tag].append([timestamp, data])
135 | else:
136 | print("Error: Unknown tag")
137 |
138 | # Start of main plotting
139 | # generate the figure with subplots
140 | self.fig, self.axes = plt.subplots(nrows=9, ncols=2, sharex=True)
141 |
142 | # code for widgets
143 | plt.subplots_adjust(bottom=0.2, left=0.15)
144 |
145 | # uncomment the following lines to enable slider
146 | # axSlider = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor='lightgoldenrodyellow')
147 | # self.slider = Slider(axSlider, 'data', 10, self.my_syncer.time_series[0].timestamp[-1] - 10, valinit=10)
148 | # self.slider.on_changed(self.slide_through_data)
149 |
150 | # self.sliderEnableToggle = False
151 | # axButton = plt.axes([0.1, 0.15, 0.1, 0.05])
152 | # self.sliderEnableButton = CheckButtons(axButton, labels=["Enable slider", ""], actives=[False])
153 | # self.sliderEnableButton.on_clicked(self.enable_slider)
154 |
155 | self.TextAxesLeft = self.fig.add_subplot(position=[self.axes[0, 0].get_position().x0,
156 | self.axes[0, 0].get_position().y1,
157 | self.axes[0, 0].get_position().x1-self.axes[0, 0].get_position().x0,
158 | 0.001],sharex=self.axes[0, 0])
159 | self.TextAxesLeft.set_xlim([self.my_syncer.time_series[0].timestamp[0], self.my_syncer.time_series[0].timestamp[-1]])
160 | self.TextAxesLeft.get_xaxis().set_visible(False)
161 | self.TextAxesLeft.get_yaxis().set_visible(False)
162 |
163 | self.TextAxesRight = self.fig.add_subplot(position=[self.axes[0, 1].get_position().x0,
164 | self.axes[0, 1].get_position().y1,
165 | self.axes[0, 1].get_position().x1 - self.axes[0, 1].get_position().x0,
166 | 0.001], sharex=self.axes[0, 0])
167 | self.TextAxesRight.set_xlim([self.my_syncer.time_series[0].timestamp[0], self.my_syncer.time_series[0].timestamp[-1]])
168 | self.TextAxesRight.get_xaxis().set_visible(False)
169 | self.TextAxesRight.get_yaxis().set_visible(False)
170 |
171 | self.indicator = self.fig.add_subplot(position=[self.axes[8, 0].get_position().x0,
172 | self.axes[8, 0].get_position().y0 - 0.075, 0.75, 0.015])
173 | self.indicator.get_yaxis().set_visible(False)
174 | # self.indicatorLeft.set_ylabel("Complete Time Series", rotation="horizontal")
175 | self.indicator.set_xlim(
176 | [self.my_syncer.time_series[0].timestamp[0], self.my_syncer.time_series[0].timestamp[-1]])
177 |
178 |
179 | # add callbacks to the plot
180 | for i in range(9):
181 | for j in range(2):
182 | self.axes[i, j].callbacks.connect('xlim_changed', self.on_xlims_change)
183 | self.selected_axes = None
184 | self.selected_time = None
185 | self.temp_highlights = [] # stores the axvspan used to temporarily highlight the cursor
186 | self.cid0 = self.fig.canvas.mpl_connect('button_press_event', self.on_mouse_click)
187 | self.cid1 = self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
188 |
189 | # instance variables to hold all the marker lines
190 | self.lines_data = []
191 | self.lines_DC = []
192 | self.lines_DO = []
193 | self.lines_UN = []
194 | self.init_plot()
195 | plt.show()
196 |
197 | def init_plot(self):
198 | self.lines_data = []
199 | self.lines_DC = []
200 | self.lines_DO = []
201 | self.lines_UN = [] # used for the slider
202 |
203 | for j in range(2): # columns of subplot
204 | for i in range(9): # rows in subplot
205 | line = self.axes[i, j].plot(self.my_syncer.time_series[j * 9 + i].timestamp,
206 | self.my_syncer.time_series[j * 9 + i].data, linestyle='-', zorder=10,
207 | alpha=0.9)
208 | self.lines_data.append(line)
209 | self.axes[i, j].autoscale(enable=True, axis='y', tight=True)
210 |
211 | # to draw background color
212 | for loss in self.my_syncer.dataLoss:
213 | # marking a window of len 64 red
214 | self.axes[i, j].axvspan(loss[1], loss[1]+64, facecolor='r', alpha=0.5)
215 |
216 | # plotting markers once on initialization
217 |
218 | # to mark DO
219 | for tag in self.markers["points_DO"].keys():
220 | if self.parseDo and tag != "DC":
221 | try:
222 | plot_idx = (int(self.data_types.index(tag) % 9), int(self.data_types.index(tag) / 9))
223 | for point in self.markers["points_DO"][tag]: # for every point in the list
224 | line = self.axes[plot_idx[0], plot_idx[1]].axvline(x=point, color='k', label="DO", zorder=1, lw=0.75)
225 | self.lines_DO.append(line)
226 | except ValueError:
227 | print("Value Error")
228 |
229 | # to mark UN
230 | if self.cmd_usernote_toggle:
231 | for (point, note) in self.markers["points_UN"]:
232 | line = self.axes[i, j].axvline(x=point, color='g', label="UN")
233 | self.lines_UN.append(line)
234 |
235 | # to add the signal tag on the y-axis
236 | self.axes[i, j].set_ylabel(self.data_types[j * 9 + i])
237 |
238 | # to mark UN text on Text axes
239 | # TODO: Make the fontsize Accessible to the User
240 | for (point, note) in self.markers["points_UN"]:
241 | self.TextAxesLeft.text(point, 1, note, fontsize=12, rotation=45)
242 | self.TextAxesRight.text(point, 1, note, fontsize=12, rotation=45)
243 |
244 | # to mark DC
245 | for tag in self.markers["points_DC"].keys():
246 | if self.parseDc and tag not in self.cmd_hide_dc_tags:
247 | plot_idx = (int(self.data_types.index(tag) % 9), int(self.data_types.index(tag) / 9))
248 | for point in self.markers["points_DC"][tag]: # for every point in the list
249 | line = self.axes[plot_idx[0], plot_idx[1]].axvline(x=point, color='y', label="DC", zorder=1, lw=0.75)
250 | self.lines_DC.append(line)
251 |
252 | # to add the legend
253 | # plt.figlegend((self.lines_data[0], self.lines_DC[0], self.lines_UN[0]), labels=("Data", "DC", "UN"), loc='lower center', ncol=3, labelspacing=0.)
254 | self.fig.suptitle(self.file_base)
255 |
256 |
257 |
258 | def updateUN(self, new_xlim=(0, 0)):
259 |
260 | """
261 | Function to update the User notes displayed on the top of the subplots
262 | :param new_xlim: limits of the x axis in the current plot
263 | :return: None
264 | """
265 | if not self.TextAxesLeft.texts:
266 | return
267 | for text_left, text_right in zip(self.TextAxesLeft.texts, self.TextAxesRight.texts):
268 | if not (new_xlim[0] <= text_left.get_position()[0] <= new_xlim[1]):
269 | text_left.set_visible(False)
270 | text_right.set_visible(False)
271 | else:
272 | text_left.set_visible(True)
273 | text_right.set_visible(True)
274 |
275 |
276 | def on_xlims_change(self, axes):
277 | """
278 | Function used to set the visibility of user note tags when zooming in on the plots.
279 | only hand;es user note tags. not the lines drawn on the subplots.
280 | :param axes: axes where the xlims were changed
281 | :return: None
282 | """
283 | new_xlim = tuple(axes.get_xlim())
284 | self.updateUN(new_xlim=new_xlim)
285 | self.indicator.clear()
286 | self.indicator.set_xlim(self.my_syncer.time_series[0].timestamp[0], self.my_syncer.time_series[0].timestamp[-1])
287 | self.indicator.axvspan(new_xlim[0], new_xlim[1], facecolor="g")
288 |
289 | def on_mouse_click(self, event):
290 | """
291 | Function used to update the instance variable:selected_axes which is then used by the
292 | hide_DC function
293 | :param event: mouse click event
294 | :return: None
295 | """
296 | print("mouse click detected")
297 | if event.inaxes:
298 | self.selected_axes = event.inaxes
299 | self.selected_time = event.xdata
300 | else:
301 | if len(self.temp_highlights):
302 | for highlight in self.temp_highlights:
303 | highlight.remove()
304 | self.temp_highlights = []
305 | self.fig.canvas.draw()
306 |
307 |
308 | def on_key_press(self, event):
309 | """
310 | Function that hides DC lines on the selected subplot
311 | :param event: key press event
312 | :return: None
313 | """
314 | print("entered key press change:", event.key)
315 | if event.key == " ": # hide the markers
316 | for line in self.lines_DC:
317 | if line in self.selected_axes.lines:
318 | line.set_visible(not line.get_visible())
319 | plt.pause(0.005)
320 | self.fig.canvas.draw()
321 | # TODO: replace the hard coded "10" with limits input by the user
322 | elif event.key == "right":
323 | if self.sliderEnableToggle:
324 | if self.slider.val + 10 <= self.my_syncer.time_series[0].timestamp[-1] - 10:
325 | self.slider.set_val(self.slider.val + 10)
326 |
327 | elif event.key == "left":
328 | if self.sliderEnableToggle:
329 | if self.slider.val - 10 >= self.my_syncer.time_series[0].timestamp[0] + 10:
330 | self.slider.set_val(self.slider.val - 10)
331 | elif event.key == "m":
332 | if not len(self.temp_highlights): # if the length == 0
333 | for j in range(2):
334 | for i in range(9):
335 | # TODO: change the hardcoded width of window
336 | highlight = self.axes[i, j].axvspan(self.selected_time - 1, self.selected_time + 1, facecolor='y', alpha=0.5)
337 | self.temp_highlights.append(highlight)
338 | self.fig.canvas.draw()
339 |
340 | elif event.key == "a":
341 | new_xlim = self.axes[0, 0].get_xlim()
342 | for j in range(2):
343 | for i in range(9):
344 | x_low_index = int((new_xlim[0]/self.my_syncer.time_series[j * 9 + i].timestamp[-1]) * len(self.my_syncer.time_series[j * 9 + i].timestamp)) - 1
345 | x_high_index = int((new_xlim[1]/self.my_syncer.time_series[j * 9 + i].timestamp[-1]) * len(self.my_syncer.time_series[j * 9 + i].timestamp)) - 1
346 | new_ymin = min(self.my_syncer.time_series[j * 9 + i].data[x_low_index: x_high_index])
347 | new_ymax = max(self.my_syncer.time_series[j * 9 + i].data[x_low_index: x_high_index])
348 | self.axes[i, j].set_ylim([new_ymin, new_ymax])
349 | self.fig.canvas.draw()
350 |
351 | @staticmethod
352 | def take_closest(myList, myNumber):
353 | """
354 | Assumes myList is sorted. Returns closest value to myNumber.
355 |
356 | If two numbers are equally close, return the smallest number.
357 | """
358 | pos = bisect_left(myList, myNumber)
359 | if pos == 0:
360 | return myList[0]
361 | if pos == len(myList):
362 | return myList[-1]
363 | before = myList[pos - 1]
364 | after = myList[pos]
365 | if after - myNumber < myNumber - before:
366 | return after
367 | else:
368 | return before
369 |
370 | def clear_subplots(self):
371 | """
372 | Clears all subplots
373 | :return: None
374 | """
375 | for i in range(9):
376 | for j in range(2):
377 | self.axes[i, j].clear()
378 | self.fig.canvas.draw()
379 |
380 | def enable_slider(self, label):
381 | """
382 | Function attached to the enable slider checkbox.
383 | if enabled, redraws the plot to contain only part of data
384 | if disabled, redraws the plot to contain the whole data
385 | :param label: the label of the checkbox clicked
386 | :return: None
387 | """
388 | self.sliderEnableToggle = not self.sliderEnableToggle
389 | # print(self.sliderEnableToggle)
390 | if self.sliderEnableToggle:
391 | self.clear_subplots()
392 | self.fig.canvas.draw()
393 | for j in range(2): # columns of subplot
394 | for i in range(9): # rows in subplot
395 | # TODO: replace the hard coded "10" with limits input by the user
396 | closest = self.take_closest(self.my_syncer.time_series[j * 9 + i].timestamp, 20)
397 | last_idx = self.my_syncer.time_series[j * 9 + i].timestamp.index(closest)
398 | self.axes[i, j].plot(self.my_syncer.time_series[j * 9 + i].timestamp[:last_idx], self.my_syncer.time_series[j * 9 + i].data[:last_idx], linestyle='-', zorder=10, alpha=0.9)
399 | self.axes[i, j].autoscale(enable=True, axis='y')
400 | self.axes[i, j].autoscale(enable=True, axis='x')
401 | self.fig.canvas.draw()
402 |
403 | else:
404 | self.clear_subplots()
405 | self.fig.canvas.draw()
406 | # connect the callbacks again
407 | for i in range(9):
408 | for j in range(2):
409 | self.axes[i, j].callbacks.connect('xlim_changed', self.on_xlims_change)
410 | self.init_plot()
411 | self.fig.canvas.draw()
412 |
413 | def slide_through_data(self, val):
414 | """
415 | Function to update the plots to slide through the entire data.
416 | Takes in the value of the slider, updates the plot acordingly
417 |
418 | :param val: value of the slider
419 | :return: None
420 | """
421 | if self.sliderEnableToggle:
422 | print('sliding', val)
423 | self.clear_subplots()
424 | for j in range(2):
425 | for i in range(9):
426 | # TODO: replace the hard coded "10" with limits input by the user
427 | closest_low = self.take_closest(self.my_syncer.time_series[j * 9 + i].timestamp, val - 10)
428 | closest_high = self.take_closest(self.my_syncer.time_series[j * 9 + i].timestamp, val + 10)
429 | begin_idx = self.my_syncer.time_series[j * 9 + i].timestamp.index(closest_low)
430 | end_idx = self.my_syncer.time_series[j * 9 + i].timestamp.index(closest_high)
431 | self.axes[i, j].plot(self.my_syncer.time_series[j * 9 + i].timestamp[begin_idx:end_idx], self.my_syncer.time_series[j * 9 + i].data[begin_idx:end_idx], linestyle='-', zorder=10, alpha=0.9)
432 | for line in self.lines_DC:
433 | if line.axes == self.axes[i, j]:
434 | if closest_low <= line.get_xdata()[0] <= closest_high:
435 | self.axes[i, j].axvline(x=line.get_xdata()[0], color='r', label="DC", zorder=1, lw=0.75)
436 |
437 | for line in self.lines_UN:
438 | if not isinstance(line, tuple): # means the line does not belong to first row of plots
439 | if line.axes == self.axes[i, j]:
440 | if closest_low <= line.get_xdata()[0] <= closest_high:
441 | self.axes[i, j].axvline(x=line.get_xdata()[0], color='g', label="UN", zorder=1,
442 | lw=0.75)
443 | else:
444 | if line[0].axes == self.axes[i, j]:
445 | if closest_low <= line[0].get_xdata()[0] <= closest_high:
446 | self.axes[i, j].axvline(x=line[0].get_xdata()[0], color='g', label="UN", zorder=1,
447 | lw=0.75)
448 | self.axes[i, j].text(line[1].get_position()[0], self.axes[i, j].get_ylim()[1],
449 | line[1].get_text(), fontsize=6,
450 | rotation=45)
451 | self.fig.canvas.draw()
452 |
--------------------------------------------------------------------------------
/py/emotibit/ecgHR_detector.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import argparse
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import scipy.signal as scisig
7 | import emotibit.signal as ebsig
8 |
9 | try:
10 | import IPython
11 | IPython.get_ipython.magic("matplotlib qt")
12 | except AttributeError:
13 | plt.ion()
14 |
15 |
16 | def detect_hr(ecg_file,
17 | ecg_column,
18 | ecg_timestamp,
19 | hr_file,
20 | hr_column,
21 | hr_timestamp,
22 | input_frequency,
23 | filename,
24 | plot_file,
25 | max_heart_rate_bpm=180,
26 | height=250):
27 | """
28 | @input ecg_file:
29 | String of path to the file containing the ECG data
30 | @input ecg_column:
31 | String of the column name in the ECG file containing the ECG data
32 | @input ecg_timestamp:
33 | String of the column name in the ECG file containing the timestamps
34 | in the ECG data
35 | @input hr_file:
36 | String of the path to the file containing the HR data
37 | @input hr_column:
38 | String of the column name in the HR file containing the HR data
39 | @input hr_timestamp:
40 | String of the column name containing the timestamps in the HR data
41 | @input input_frequency:
42 | OPTIONAL: int Frequency of the ECG data, defaults to 250 hertz.
43 | @input maxHeartRateBPM:
44 | OPTIONAL: int of the maximum heart rate to consider valid in BPM.
45 | Defaults to 180 BPM.
46 | @input filename:
47 | OPTIONAL: string of the filename to write the detected ECG HR to.
48 | Defaults to 'ecgHR.csv'.
49 | @input height:
50 | OPTIONAL: int of the minimum height to use in ECG peak detection.
51 | Defaults to 250.
52 | @input plot_file:
53 | OPTIONAL: string of the filename to use to write the plot of HR data.
54 | Defaults to 'detectedHR.png'.
55 |
56 | @output: df: a dataframe containing the heart rate values
57 | and the timestamps of those heart rate values
58 | """
59 |
60 | # Providing a maximum allowed heart rate will allow the
61 | # find_peaks function to discard any duplicate readings
62 | # by enforcing a minimum space between beats.
63 | max_heart_rate_hertz = max_heart_rate_bpm / 60
64 | minimum_sample_separation = input_frequency / max_heart_rate_hertz
65 |
66 | file = pd.read_csv(ecg_file)
67 | # We filter the data so that we can more easily find the peaks.
68 | filtered = ebsig.band_filter(file[ecg_column],
69 | np.array([5, 49]),
70 | fs=input_frequency)
71 | ind, _ = scisig.find_peaks(filtered,
72 | height=height,
73 | distance=minimum_sample_separation)
74 | # Plot HR detections and filtered ECG data.
75 | if plot_file is not None:
76 | create_validation_plot(filtered, ind)
77 | # Calculate hr.
78 | hr = 1 / (np.diff(ind) / input_frequency) * 60
79 | hr_timestamps = file[ecg_timestamp][ind][1:] # We dont need the first one.
80 | # Save hr with hr timestamps.
81 | df = pd.DataFrame({'Timestamp': hr_timestamps,
82 | 'HR': hr})
83 | if filename is not None:
84 | df.to_csv(filename)
85 |
86 | # Plot both HRs on one plot.
87 | known_hr = None
88 | if (hr_file is not None):
89 | known_hr = pd.read_csv(hr_file)
90 | if plot_file is not None and known_hr is not None:
91 | create_hr_plot(known_hr[hr_timestamp],
92 | known_hr[hr_column],
93 | file[ecg_timestamp][ind][1:],
94 | hr,
95 | plot_file)
96 | return df
97 |
98 |
99 | def create_hr_plot(known_hr_timestamps,
100 | known_hr_values,
101 | detected_hr_timestamps,
102 | detected_hr_values,
103 | filename):
104 | """
105 | @input: known_hr_timestamps:
106 | The column of timestamps of the HR data from the known HR file.
107 | @input: known_hr_values:
108 | The column of values of the HR data from the known HR file.
109 | @input: detected_hr_timestamps:
110 | The column of timestamps of the ECG HR data
111 | @input: detected_hr_values:
112 | The column of values of HR data from the ECG HR data
113 | @input: filename:
114 | String of the name of where this plot should be saved
115 |
116 | @output: None: Saves a plot to a file.
117 | """
118 |
119 | plt.clf()
120 | plt.step(known_hr_timestamps,
121 | known_hr_values,
122 | where="pre",
123 | color="blue",
124 | label="Known HR")
125 | plt.step(detected_hr_timestamps,
126 | detected_hr_values,
127 | where="post",
128 | color="purple",
129 | label="HR from ECG")
130 | plt.xlabel("Timestamp")
131 | plt.legend(loc="upper left")
132 | plt.ylabel("Heart Rate")
133 | plt.title("Heart Rate")
134 | plt.savefig(filename, dpi=600)
135 |
136 |
137 | def create_validation_plot(filtered,
138 | ind,
139 | filename="hr_detection_validation_plot.png"):
140 | """
141 | @input filtered: A column of ECG data that has been filtered.
142 | @input ind: A sequences of indexes of the filtered
143 | data where the peaks have been detected.
144 | @input filename: OPTIONAL: String of the filename to save this plot to.
145 |
146 | @output: None: A plot is written to the indicated filename.
147 | """
148 |
149 | plt.plot(filtered)
150 | plt.xlabel("Timestamp")
151 | plt.ylabel("ECG")
152 | plt.title("ECG Data with Detected Peaks")
153 | plt.scatter(ind, filtered[ind], color="orange")
154 | plt.gcf().set_size_inches(14, 6)
155 | plt.savefig(filename, dpi=600)
156 |
157 |
158 | def main():
159 | parser = argparse.ArgumentParser()
160 | parser.add_argument("-ecg",
161 | "--ecg_file",
162 | action="store",
163 | type=str,
164 | nargs=1,
165 | help="Path to the file containing the ECG data")
166 | parser.add_argument("-ecgCol",
167 | "--ecg_column",
168 | action="store",
169 | type=str,
170 | nargs=1,
171 | help="Name of the column storing the ECG data")
172 | parser.add_argument("-ecgt",
173 | "--ecg_timestamp",
174 | action="store",
175 | type=str,
176 | nargs=1,
177 | help="""Name of the column containing
178 | the timestamps for the ECG data""")
179 | parser.add_argument("-f",
180 | "--frequency",
181 | action="store",
182 | type=int,
183 | nargs=1,
184 | help="""The frequency of the ECG data,
185 | defaults to 250 hertz.""")
186 | parser.add_argument("-mhr",
187 | "--maxHeartRate",
188 | action="store",
189 | type=int,
190 | nargs="?",
191 | help="""[OPTINAL] The maximum heart rate
192 | that can be detected, used for
193 | reducing false detection, defaults to 180""")
194 | parser.add_argument("-ht",
195 | "--height",
196 | action="store",
197 | type=int,
198 | nargs="?",
199 | help="""[OPTIONAL] The height threshold to be used
200 | for ECG HR detection, defaults to 250""")
201 | parser.add_argument("-hr",
202 | "--hr_file",
203 | action="store",
204 | type=str,
205 | nargs="?",
206 | help="""[OPTIONAL] Path to file containing containing
207 | already known HR Data (such as from EmotiBit)""")
208 | parser.add_argument("-hrCol",
209 | "--hr_column",
210 | action="store",
211 | type=str,
212 | nargs="?",
213 | help="[OPTIONAL] of the column storing the HR data")
214 | parser.add_argument("-hrt",
215 | "--hr_timestamp",
216 | action="store",
217 | type=str,
218 | nargs="?",
219 | help="""[OPTIONAL] Name of the column
220 | containing the timestamps for the HR data""")
221 | parser.add_argument("-o",
222 | "--output",
223 | action="store",
224 | type=str,
225 | nargs="?",
226 | help="""[OPTIONAL] Name for the output file of HR
227 | detected from the ECG file, does not write file
228 | if not provided.""")
229 | parser.add_argument("-po",
230 | "--plotOutput",
231 | action="store",
232 | type=str,
233 | nargs="?",
234 | help="""[OPTIONAL] Name for the output file of plot,
235 | does not write plot if not provided.""")
236 |
237 | args = parser.parse_args()
238 | ecg_file = args.ecg_file[0]
239 | ecg_column = args.ecg_column[0]
240 | ecg_timestamps = args.ecg_timestamp[0]
241 |
242 | hr_file = None
243 | hr_column = None
244 | hr_timestamps = None
245 | if args.hr_file is not None:
246 | hr_file = args.hr_file
247 | if args.hr_column is not None:
248 | hr_column = args.hr_column
249 | if args.hr_timestamp is not None:
250 | hr_timestamps = args.hr_timestamp
251 |
252 | if not ((hr_file is not None
253 | and hr_column is not None
254 | and hr_timestamps is not None) or
255 | (hr_file is None
256 | and hr_column is None
257 | and hr_timestamps is None)):
258 | print("""Error - You must either provide no values
259 | for any known HR files, or all values for a known HR.""")
260 | return None
261 |
262 | frequency = None
263 | max_hr = 180
264 | # Derived by examining the data, could possibly need to change this
265 | # if using a different ECG measuring device or something
266 | # else in a setup changes.
267 | height = 250
268 | output_file = "ecgHR.csv"
269 | plot_file = "detectedHR.png"
270 | if (args.frequency is not None):
271 | frequency = args.frequency[0]
272 | else:
273 | print("Error - The frequency of the ECG data was not provided.")
274 | return None
275 | if (args.maxHeartRate is not None):
276 | max_hr = args.maxHeartRate
277 | if (args.height is not None):
278 | height = args.height
279 | output_file = args.output
280 | plot_file = args.plotOutput
281 |
282 | detect_hr(ecg_file,
283 | ecg_column,
284 | ecg_timestamps,
285 | hr_file,
286 | hr_column,
287 | hr_timestamps,
288 | frequency,
289 | output_file,
290 | plot_file,
291 | max_hr,
292 | height)
293 |
294 |
295 | if __name__ == "__main__":
296 | main()
297 |
--------------------------------------------------------------------------------
/py/emotibit/flexcompparser.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue May 28 16:50:21 2019
4 |
5 | @author: Sean Montgomery
6 | """
7 |
8 | __version__ = '0.0.1'
9 |
10 | #import csv
11 | import time
12 |
13 | class Parser:
14 |
15 | session_date_time_row = 4
16 | start_end_time_row = 5
17 | date_pattern = "%Y-%m-%d"
18 | time_pattern = "%H:%M:%S,%f"
19 |
20 | session_date = ""
21 | session_time = ""
22 | start_time = ""
23 | end_time = ""
24 |
25 | def __init__(self, file_path):
26 | with open(file_path) as f:
27 | i = 1
28 | while(i < self.session_date_time_row):
29 | tmp = f.readline()
30 | #print(tmp)
31 | i += 1
32 |
33 | session_date_time = f.readline()
34 | splt = session_date_time.split(" ")
35 | #print(splt)
36 |
37 | splt2 = splt[2].split("\t")
38 | #print(splt2)
39 | self.session_date = splt2[0]
40 | print("Session Date: " + self.session_date)
41 |
42 | splt2 = splt[4].split("\n")
43 | #print(splt2)
44 | self.session_time = splt2[0]
45 | print("Session Time: " + self.session_time)
46 |
47 |
48 | i += 1
49 | while(i < self.start_end_time_row):
50 | tmp = f.readline()
51 | #print(tmp)
52 | i += 1
53 | start_end_time = f.readline()
54 | #print(start_end_time)
55 | splt = start_end_time.split(" ")
56 | splt2 = splt[4].split("\n")
57 | self.end_time = splt2[0]
58 | print("End Time: " + self.end_time)
59 |
60 | def get_session_epoch(self):
61 | return time.mktime(time.strptime(self.session_date + " " + self.session_time + ",000", self.date_pattern + " " + self.time_pattern))
62 |
63 | def get_end_epoch(self):
64 | end_float = self.end_time.split(",")
65 | return time.mktime(time.strptime("1970-01-01 " + self.end_time + "000", self.date_pattern + " " + self.time_pattern)) + float(end_float[1])/1000 - time.mktime(time.gmtime(0))
66 |
67 | def get_start_epoch(self):
68 | return self.get_session_epoch() - self.get_end_epoch()
69 |
70 |
--------------------------------------------------------------------------------
/py/emotibit/hr_scorer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import pandas as pd
3 | import emotibit.signal as ebsig
4 | import matplotlib.pyplot as plt
5 | import scipy.stats as scistats
6 | import argparse
7 |
8 | try:
9 | import IPython
10 | IPython.get_ipython.magic("matplotlib qt")
11 | except AttributeError:
12 | plt.ion()
13 |
14 |
15 | def resample(file_one_name,
16 | file_one_time_col,
17 | file_two_name,
18 | file_two_timestamp_column,
19 | desired_frequency=100):
20 | """
21 | @input file_one_name:
22 | String of the path to the first tile. (The data you are testing
23 | (dependent variable))
24 | @input fileOneDataColumn:
25 | String name of the column containing the HR data in file one.
26 | @input file_one_time_col:
27 | String name of the column containing the timestamp data in file one.
28 | @input fileTwoDataName:
29 | String of the path to the second file.
30 | (The data you are assuming to be truth and testing against
31 | (independent variable))
32 | @input fileTwoDataColumn:
33 | String name of the column containing the HR data in file two.
34 | @input file_two_timestamp_column:
35 | String name of the column containing the timestamp data in file two.
36 | @input desired_frequency:
37 | OPTIONAL: int of the desired frequency to resample to,
38 | defaults to 100hz.
39 |
40 | @output: Two resampled dfs
41 |
42 | @info: Resamples the two given data sources to the desired frequency.
43 | Aligns the second source to the first source. Ensure that the sources
44 | cover the same amount of time,
45 | or that the second source overlaps the first source.
46 | """
47 | file_one = pd.read_csv(file_one_name)
48 | file_two = pd.read_csv(file_two_name)
49 |
50 | # Trim file two so that it matches the size of file one.
51 | # Works on the assumption that file one is shorter,
52 | # otherwise there will be unmatched data.
53 | file_two_trimmed = file_two.loc[
54 | (file_two[file_two_timestamp_column]
55 | >= file_one[file_one_time_col][0])
56 | & (file_two[file_two_timestamp_column]
57 | <= file_one[file_one_time_col].iloc[-1])]
58 | file_two_trimmed = file_two_trimmed.reset_index(drop=True)
59 |
60 | # We are resampling the HR data to desired frequency,
61 | # this has some implications:
62 | # HR doesn't really have frequency in quite the same way
63 | # as other measurements, since it is derivative.
64 | # This means that we are oversampling all of the data,
65 | # and that also introduces some bias:
66 | # For example, if the HR is slow, there are more timepoints sampled
67 | # of that HR than if the HR is fast, since the resampled
68 | # frequency is the same thorughout the data.
69 | # So, if the algorithm is really good when HR is fast and really
70 | # bad when HR is slow, this method will make the
71 | # performance look even worse than it really is.
72 | # So why keep the oversampling? It solves a different (bigger?)
73 | # problem of being able to compare the HRs at each time point.
74 | # If we have the same number of samples and they are perfectly lined up,
75 | # then we are able to compare them and generate
76 | # some metrics for how close they are.
77 | resampled_one = ebsig.periodize(file_one,
78 | file_one_time_col,
79 | desired_frequency,
80 | start_t=file_one[file_one_time_col][0],
81 | end_t=file_one[file_one_time_col].iloc[-1])
82 | resampled_two = ebsig.periodize(file_two_trimmed,
83 | file_two_timestamp_column,
84 | desired_frequency,
85 | start_t=file_one[file_one_time_col][0],
86 | end_t=file_one[file_one_time_col].iloc[-1])
87 |
88 | # It is possible that file two has been left with some NAs in the beginning
89 | # this fixes those.
90 | file_two_early_part = file_two[file_two[file_two_timestamp_column]
91 | < file_one[file_one_time_col][0]]
92 | # Gets the last HR before the start of file one.
93 | fill_in_hr = file_two_early_part["HR"].iloc[-1]
94 | # Fills in the missing values with that value.
95 | resampled_two = resampled_two.fillna(fill_in_hr)
96 |
97 | return resampled_one, resampled_two
98 |
99 |
100 | def score(data_one,
101 | data_one_column,
102 | data_two,
103 | data_two_column,
104 | plot_base_name,
105 | name_one="Source One",
106 | name_two="Source Two"):
107 | """
108 | @input data_one:
109 | df of the first set of data (the data you are testing)
110 | @input data_one_column:
111 | string name of the column of interest in data_one
112 | @input data_two:
113 | df of the second set of data
114 | (the data you are assuming to be the "truth"/independent variable)
115 | @input dataTwoColmn:
116 | string name of the column of interset in data_two
117 |
118 | @info: df One and df Two should already be resampled so
119 | that they have the same sampling rate and identical timestamps
120 |
121 | @output: The statistics from the comparison
122 | """
123 |
124 | if plot_base_name is not None:
125 | plot_both_hrs(data_one[data_one_column],
126 | data_two[data_two_column],
127 | plot_base_name,
128 | name_one,
129 | name_two)
130 | # We use this simple linear regression to get some stats,
131 | # mainly interested in r, the correlation between the two
132 | # note that because the relationship is not necessarily linear,
133 | # r is not necessarily the best metric, but we still record it
134 | # so we can understand how the value of r fits into the bigger picture
135 | (slope,
136 | intercept,
137 | r,
138 | p,
139 | std_err) = scistats.linregress(data_two[data_two_column],
140 | data_one[data_one_column])
141 | # We choose to use spearman's rank correlation since
142 | # it can help us to understand if they are well correlated,
143 | # even if the distribution is non-parametric.
144 | spearman_r = scistats.spearmanr(data_two[data_two_column],
145 | data_one[data_one_column])
146 | rho = spearman_r[0]
147 | # We also decided to report the kendall rank correlation coefficient.
148 | # Another way of looking at how well the two signals are correlated.
149 | tau, _ = scistats.kendalltau(data_two[data_two_column],
150 | data_one[data_one_column])
151 |
152 | if plot_base_name is not None:
153 | scatter_plot(data_one[data_one_column],
154 | data_two[data_two_column],
155 | slope,
156 | intercept,
157 | r,
158 | rho,
159 | tau,
160 | plot_base_name,
161 | name_one,
162 | name_two)
163 |
164 | return slope, intercept, r, rho, tau, p, std_err
165 |
166 |
167 | def plot_both_hrs(data_one_hr,
168 | data_two_hr,
169 | plot_base_name,
170 | name_one="Source One",
171 | name_two="Source Two"):
172 | """
173 | @input: data_one_hr: series of data containing the HR for data one
174 | @input: data_two_hr: series of data containing the HR for data two
175 | @input: name_one: OPTIONAL: name for data one, defaults to 'Source One'
176 | @input: name_two: OPTIONAL: name for data two, defaults to 'Source Two'
177 | """
178 |
179 | plt.clf()
180 | plt.rcParams.update(plt.rcParamsDefault)
181 | plt.figure(figsize=(8, 6))
182 | plt.plot(data_one_hr, label=name_one)
183 | plt.plot(data_two_hr, label=name_two)
184 | plt.legend(loc="upper left")
185 | plt.xlabel("Time")
186 | plt.ylabel("HR")
187 | plt.title(name_one + " and " + name_two + " HR")
188 | plt.savefig(plot_base_name + "_resampletest.png", dpi=600)
189 |
190 |
191 | def scatter_plot(data_one_hr,
192 | data_two_hr,
193 | slope,
194 | intercept,
195 | r,
196 | rho,
197 | tau,
198 | plot_base_name,
199 | name_one="Source One",
200 | name_two="Source Two"):
201 | """
202 | @input data_one_hr:
203 | series of data containing the HR for data one
204 | (the dependent variable, which is being tested)
205 | @input data_two_hr:
206 | series of data containing the HR for data two
207 | (the independent/truth variable, which is being tested against)
208 | @input slope:
209 | float of the slope value of the regression line for the scatter plot
210 | @input intercept:
211 | float of the intercept value of regression line for the scatter plot
212 | @input name_one: OPTIONAL:
213 | a string for the label of data_one
214 | @input name_two: OPTIONAL:
215 | a string for the label of data_two
216 | """
217 |
218 | plt.clf()
219 | plt.rc('xtick', labelsize=3.5)
220 | plt.rc('ytick', labelsize=3.5)
221 | plt.rc('axes', linewidth=0.5)
222 | plt.figure(figsize=(3.3, 3.3))
223 | plt.scatter(data_two_hr, data_one_hr, s=0.5)
224 | plt.xlabel(name_two, fontsize=7)
225 | plt.ylabel(name_one, fontsize=7)
226 | plt.title(name_two + " vs. " + name_one + " HR, with Regression Line",
227 | fontsize=7)
228 | plt.text(min(data_two_hr),
229 | max(data_one_hr) - 39,
230 | f"Slope: {slope:.4f}\nIntercept: {intercept:.4f}"
231 | f"\nr: {r:.4f}\nrho: {rho:.4f}\ntau: {tau:.4f}",
232 | fontsize=5)
233 |
234 | def slope_line(x):
235 | return slope * x + intercept
236 | this_slope_line = list(map(slope_line, data_two_hr))
237 |
238 | plt.plot(data_two_hr, this_slope_line, color="magenta", linewidth=0.5)
239 | plt.tick_params(axis="both", which="major", labelsize=7)
240 | plt.savefig(plot_base_name + "_scatter.png", dpi=200)
241 | plt.rcParams.update(plt.rcParamsDefault)
242 |
243 |
244 | def main():
245 | parser = argparse.ArgumentParser()
246 | parser.add_argument("-hr1",
247 | "--heartRateOne",
248 | action="store",
249 | type=str,
250 | nargs=1,
251 | help="""Path to the file containing HR data
252 | for source one. This should be the
253 | dependent source (the one you are testing).""")
254 | parser.add_argument("-t1",
255 | "--timestampOne",
256 | action="store",
257 | type=str,
258 | nargs=1,
259 | help="""Name of the column in source
260 | one that contains the timestamps.""")
261 | parser.add_argument("-d1",
262 | "--data_one",
263 | action="store",
264 | type=str,
265 | nargs=1,
266 | help="""Name of the column in source
267 | one that contains the HR data.""")
268 | parser.add_argument("-hr2",
269 | "--heartRateTwo",
270 | action="store",
271 | type=str,
272 | nargs=1,
273 | help="""Path to the file containing HR data
274 | for source two.
275 | This should be the indpendent source
276 | (Your source of truth,
277 | what you are testing against).""")
278 | parser.add_argument("-t2",
279 | "--timestampTwo",
280 | action="store",
281 | type=str,
282 | nargs=1,
283 | help="""Name of the column in source two
284 | that contains the timestamps.""")
285 | parser.add_argument("-d2",
286 | "--data_two",
287 | action="store",
288 | type=str,
289 | nargs=1,
290 | help="""Name of the column in source
291 | two that contains the HR data.""")
292 | parser.add_argument("-f",
293 | "--frequency",
294 | action="store",
295 | type=int,
296 | nargs=1,
297 | help="""Frequency of device with lower frequency.
298 | (e.g. if source one is 250hz and source two is 125hz,
299 | set this to 125).""")
300 | parser.add_argument("-n1",
301 | "--name_one",
302 | action="store",
303 | type=str,
304 | nargs="?",
305 | help="OPTIONAL: Name for source one, used in plots.")
306 | parser.add_argument("-n2",
307 | "--name_two",
308 | action="store",
309 | type=str,
310 | nargs="?",
311 | help="OPTIONAL: Name for source two, used in plots.")
312 | parser.add_argument("-o",
313 | "--output",
314 | action="store",
315 | type=str,
316 | nargs="?",
317 | help="""OPTIONAL: Name for the outputs.
318 | Name will be used to generate a plot output
319 | such as -scatter.png and
320 | -resampledHR.png.
321 | If no name is provided, plots are not written""")
322 | args = parser.parse_args()
323 |
324 | file_one = args.heartRateOne[0]
325 | time_col_one = args.timestampOne[0]
326 | data_col_one = args.data_one[0]
327 |
328 | file_two = args.heartRateTwo[0]
329 | time_col_two = args.timestampTwo[0]
330 | data_col_two = args.data_two[0]
331 |
332 | frequency = args.frequency[0]
333 | name_one = "Source One"
334 | if args.name_one is not None:
335 | name_one = args.name_one
336 | name_two = "Source Two"
337 | if args.name_two is not None:
338 | name_two = args.name_two
339 | plot_base_name = None
340 | if args.output is not None:
341 | plot_base_name = args.output
342 |
343 | print("===== BEGINNING RESAMPLING to " + str(frequency) + " hz =====")
344 | data_one, data_two = resample(file_one,
345 | time_col_one,
346 | file_two,
347 | time_col_two,
348 | frequency)
349 | print("===== FINISHED RESAMPLING =====")
350 | print("===== BEGINNING SCORING =====")
351 | slope, intercept, r, rho, tau, p, std_err = score(data_one,
352 | data_col_one,
353 | data_two,
354 | data_col_two,
355 | plot_base_name,
356 | name_one,
357 | name_two)
358 | print("===== FINISHED SCORING =====")
359 | print("\nSLOPE: ", slope)
360 | print("INTERCEPT: ", intercept)
361 | print("R: ", r)
362 | print("RHO: ", rho)
363 | print("TAU: ", tau)
364 | print("P: ", p)
365 | print("STD_ERR: ", std_err)
366 | print("\n=== END ===")
367 |
368 |
369 | if __name__ == "__main__":
370 | main()
371 |
--------------------------------------------------------------------------------
/py/emotibit/info.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @emotibit info
4 | @brief helper for EmotiBit _info.json
5 | @example info.print_info(file_dir = r"C:\priv\myDir",
6 | file_base_names = ["2022-06-07_15-23-33-810389"])
7 |
8 | Created on Wed Jul 20 05:43:45 2022
9 |
10 | @author: consu
11 | """
12 |
13 |
14 | def print_info(file_dir = "", file_base_names = "", print_len = -1):
15 | """
16 | @fn print_info()
17 | @brief batch prints EmotiBit _info.json file to console
18 | @param file_dir Base directory of the parsed data files
19 | @param file_base_names array of file bases of the data files. Expected
20 | organization is file_dir/file_base_names[i]/file_base_names[i]_XX.csv
21 | @param print_len Max length of each print. -1 = print whole file.
22 | """
23 |
24 | for f in range(len(file_base_names)):
25 | file_base = file_base_names[f]
26 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + 'info.json'
27 | with open(file_path) as f:
28 | contents = f.read()
29 | if (print_len > -1):
30 | contents = contents[0:print_len]
31 | print(contents)
32 |
--------------------------------------------------------------------------------
/py/emotibit/signal.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @package signal
4 | Signal processing module for EmotiBit
5 |
6 | @author: consu
7 | """
8 |
9 | import numpy as np
10 | import pandas as pd
11 | import scipy.signal as scisig
12 |
13 |
14 | def periodize(input_df, timestamp_col_name, fs, start_t = None, start_val = None, end_t = None):
15 | """ Periodizes an aperiodic signal to the passed sampling frequency
16 | @param input_df data frame with data and timestamp columns
17 | @param timestamp_col_name column header of timestamps
18 | @param target sampling rate of output dataframe
19 | @param start_t optional start time of the periodized output dataframe
20 | @param start_val optional starting value of the periodized output dataframe
21 | @param end_t optional end time of the periodized output dataframe
22 | @return periodized dataframe
23 | """
24 | if (len(input_df) == 0):
25 | return
26 | val = start_val
27 | if (start_t == None):
28 | start_t = input_df.loc[0][timestamp_col_name]
29 | if (start_val == None):
30 | val = float('nan')
31 | if (end_t == None):
32 | end_t = input_df.loc[len(input_df) - 1][timestamp_col_name]
33 |
34 | timestamps = np.arange(start_t, end_t, 1/fs)
35 | t_col = input_df.columns.get_loc(timestamp_col_name)
36 |
37 | ind = 0
38 | # output_df = pd.DataFrame()
39 | output_list = []
40 | for t in timestamps:
41 | output_list.append(input_df.loc[ind].tolist())
42 | if (t >= input_df.loc[ind,timestamp_col_name]):
43 | val = input_df.iloc[ind,-1]
44 | ind = min(ind + 1, len(input_df)-1)
45 | l_ind = len(output_list) - 1
46 | output_list[l_ind][len(output_list[l_ind]) - 1] = val
47 | output_list[l_ind][t_col] = t
48 |
49 | output_df = pd.DataFrame(output_list, columns = list(input_df.columns))
50 | return output_df
51 |
52 | def butter_lowpass(cutoff, fs, order=4):
53 | """Calculate coefficients (a, b) for a lowpass filter
54 |
55 | Args:
56 | cutoff (array): Critical frequencies
57 | fs (int): Sampling frequency
58 | order (int, optional): Filter order. Defaults to 4.
59 |
60 | Returns:
61 | b, a ([nbarray, nbarray]): Numerator (b) and denominator (a) polynomials of the IIR filter.
62 | """
63 | nyq = 0.5 * fs
64 | normal_cutoff = cutoff / nyq
65 | #b, a = butter(order, normal_cutoff, btype='low', analog=False)
66 | b, a = scisig.bessel(order, normal_cutoff, btype='lowpass', analog=False, norm='delay')
67 | return b, a
68 |
69 | def butter_lowpass_filter(data, cutoff, fs, order=4):
70 | """Calculate coefficients (a, b) for a lowpass filter
71 |
72 | Args:
73 | data (array): Data to filter
74 | cutoff (array): Critical frequencies
75 | fs (int): Sampling frequency
76 | order (int, optional): Filter order. Defaults to 4.
77 |
78 | Returns:
79 | y, group_delay ([nbarray, nbarray]): The output of the digital filter and the group_delay of the filter.
80 | """
81 | b, a = butter_lowpass(cutoff, fs, order=order)
82 | #b, a = signal.bessel(cutoff, fs, 'low', order=order, analog=True, norm='delay')
83 | w, h = scisig.freqz(b, a, fs=fs)
84 | #print(w)
85 | group_delay = -np.diff(np.unwrap(np.angle(h))) / np.diff(w)
86 | y = scisig.lfilter(b, a, data)
87 | return y, group_delay
88 |
89 | def lowpass_filter(data, cutoff, fs, order=4):
90 | """Calculate coefficients (a, b) for a lowpass filter
91 |
92 | Args:
93 | data (array): Data to filter
94 | cutoff (array): Critical frequencies
95 | fs (int): Sampling frequency
96 | order (int, optional): Filter order. Defaults to 4.
97 |
98 | Returns:
99 | dataf (nbarray): The filtered data.
100 | """
101 | y, gd1 = butter_lowpass_filter(data, cutoff, fs, order)
102 | #print(gd1)
103 | delay = np.int(np.round(gd1[np.int(cutoff.mean()/((fs/2.0)/511.0))]))
104 | #print(delay)
105 | dataf = np.zeros(data.shape)
106 | dataf[0:dataf.shape[0]-delay] = y[delay:]
107 |
108 | return dataf
109 |
110 | def butter_bandpass(cutoff, fs, order=4):
111 | """Calculate coefficients (a, b) for a bandpass filter
112 |
113 | Args:
114 | cutoff (array): Critical frequencies
115 | fs (int): Sampling frequency
116 | order (int, optional): Filter order. Defaults to 4.
117 |
118 | Returns:
119 | b, a ([nbarray, nbarray]): Numerator (b) and denominator (a) polynomials of the IIR filter.
120 | """
121 | nyq = 0.5 * fs
122 | normal_cutoff = cutoff / nyq
123 | b, a = scisig.bessel(order, normal_cutoff, btype='bandpass', analog=False, norm='delay')
124 | return b, a
125 |
126 | def butter_bandpass_filter(data, cutoff, fs, order=4):
127 | """Calculate coefficients (a, b) for a bandpass filter
128 |
129 | Args:
130 | data (array): Data to filter
131 | cutoff (array): Critical frequencies
132 | fs (int): Sampling frequency
133 | order (int, optional): Filter order. Defaults to 4.
134 |
135 | Returns:
136 | y, group_delay ([nbarray, nbarray]): The output of the digital filter and the group_delay of the filter.
137 | """
138 | b, a = butter_bandpass(cutoff, fs, order=order)
139 | w, h = scisig.freqz(b, a, fs=fs)
140 | group_delay = -np.diff(np.unwrap(np.angle(h))) / np.diff(w)
141 | y = scisig.lfilter(b, a, data)
142 | return y, group_delay
143 |
144 | def band_filter(data, cutoff, fs, order=4):
145 | """Calculate coefficients (a, b) for a bandpass filter
146 |
147 | Args:
148 | data (array): Data to filter
149 | cutoff (array): Critical frequencies
150 | fs (int): Sampling frequency
151 | order (int, optional): Filter order. Defaults to 4.
152 |
153 | Returns:
154 | dataf (nbarray): The filtered data.
155 | """
156 | y, gd1 = butter_bandpass_filter(data, cutoff, fs, order)
157 | delay = int(np.round(gd1[int(cutoff.mean()/((fs/2.0)/511.0))]))
158 | dataf = np.zeros(data.shape)
159 | dataf[0:dataf.shape[0]-delay] = y[delay:]
160 |
161 | return dataf
162 |
163 | def nan_helper(y):
164 | """Helper to handle indices and logical indices of NaNs.
165 |
166 | Input:
167 | - y, 1d numpy array with possible NaNs
168 | Output:
169 | - nans, logical indices of NaNs
170 | - index, a function, with signature indices= index(logical_indices),
171 | to convert logical indices of NaNs to 'equivalent' indices
172 | Example:
173 | >>> # linear interpolation of NaNs
174 | >>> nans, x= nan_helper(y)
175 | >>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
176 | """
177 |
178 | return np.isnan(y), lambda z: z.nonzero()[0]
179 |
180 | def interp_nan(y):
181 | """Interpolation for nan values
182 |
183 | Args:
184 | y (nbarray): Array containing nan values
185 |
186 | Returns:
187 | y (nbarray): Corrected array
188 | """
189 | nans, x = nan_helper(y)
190 | y[nans] = np.interp(x(nans), x(~nans), y[~nans])
191 |
192 | return y
193 |
194 | def zero_mean(signal):
195 | """Convert data to zero mean and unit variance
196 |
197 | Args:
198 | signal (nbarray) : Signal to convert
199 |
200 | Returns
201 | unit (array): Signal converted
202 | """
203 | unit = (signal - signal.mean())/signal.std()
204 |
205 | return unit
--------------------------------------------------------------------------------
/py/emotibit/test_timestamp_converter.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import timestamp_converter as tsc
3 |
4 | class TestTimestampConverterMethods(unittest.TestCase):
5 |
6 | def test_calculateSlopeWithGoodValues(self):
7 | sourceOneFirstTaps = [10, 20, 30]
8 | sourceTwoFirstTaps = [10, 15, 20]
9 | sourceOneSecondTaps = [50, 60, 70]
10 | sourceTwoSecondTaps = [50, 55, 60]
11 |
12 | self.assertEqual(tsc.calculate_slope(sourceOneFirstTaps, sourceOneSecondTaps, sourceTwoFirstTaps, sourceTwoSecondTaps),
13 | 1,
14 | "Slope was not calculated correctly")
15 |
16 | def test_calculateSlopeWithMismatchedFirstLengths(self):
17 | sof = [1, 2, 3]
18 | stf = [1, 2]
19 | sos = [4, 6, 8]
20 | sts = [6, 8, 10]
21 |
22 | self.assertIsNone(tsc.calculate_slope(sof, sos, stf, sts),
23 | "Did not detect that the length of first set of taps did not match")
24 |
25 | def test_calculateSlopeWithMismatchedSecondLengths(self):
26 | sof = [1, 2, 3]
27 | stf = [1, 2, 8]
28 | sos = [4, 8]
29 | sts = [6, 8, 10]
30 |
31 | self.assertIsNone(tsc.calculate_slope(sof, sos, stf, sts),
32 | "Did not detect that the length of second set of taps did not match")
33 |
34 | def test_calculateBWithGoodValues(self):
35 | so = [1, 2, 3]
36 | st = [3, 4, 5]
37 |
38 | self.assertEqual(tsc.calculate_b(1, so, st),
39 | -2,
40 | "Did not properly calculate y-intercept")
41 |
42 | def test_calculateBWithMismatchedLengths(self):
43 | so = [1, 2, 3]
44 | st = [2, 3]
45 |
46 | self.assertIsNone(tsc.calculate_b(1, so, st),
47 | "Did not detect mismatched lengths")
48 |
49 |
50 |
51 |
52 | if __name__ == "__main__":
53 | unittest.main()
--------------------------------------------------------------------------------
/py/emotibit/timestamp_converter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | timestamp_coverter.py
4 | A script that will take two data files (ex. EmotiBit and Cyton)
5 | and convert the second file to the EmotiBit timestamps.
6 | Requires the input of detected taps
7 | """
8 | import argparse
9 | import pandas as pd
10 |
11 |
12 | def calculate_slope(source_one_first_taps,
13 | source_one_second_taps,
14 | source_two_first_taps,
15 | source_two_second_taps):
16 | """
17 | @input souceOneFirstTaps:
18 | List of timestamps of the first set of taps from source one
19 | @input source_one_second_taps:
20 | List of timestamps of the second set of taps from souce once
21 | @input source_two_first_taps:
22 | List of timestamps of the first set of taps from source two
23 | @input source_two_second_taps:
24 | List of timestamps of the second set of taps from source two
25 |
26 | Calculates the slope by averaging the points in the front
27 | and back and then using equation of linear line.
28 | Source one is the intended output (Emotibit most likely)
29 | and the "y" value
30 | Source two is the intended input - and the "x" value
31 |
32 | @output Returns the slope of the line created
33 | by the average of the two points provided.
34 | """
35 | # Ensure matching length of taps.
36 | if len(source_one_first_taps) != len(source_two_first_taps):
37 | print("""Error - Length of first taps between
38 | source one and source two does not match: """,
39 | len(source_one_first_taps), len(source_two_first_taps))
40 | return None
41 | if len(source_one_second_taps) != len(source_two_second_taps):
42 | print("""Error - Length of second taps between
43 | source one and source two does not match: """,
44 | len(source_one_second_taps), len(source_two_second_taps))
45 | return None
46 |
47 | # Get values to put into a standard linear slope formula.
48 | y1 = sum(source_one_first_taps) / len(source_one_first_taps)
49 | x1 = sum(source_two_first_taps) / len(source_two_first_taps)
50 | y2 = sum(source_one_second_taps) / len(source_one_second_taps)
51 | x2 = sum(source_two_second_taps) / len(source_two_second_taps)
52 |
53 | return ((y2 - y1) / (x2 - x1))
54 |
55 |
56 | def calculate_b(slope, source_one_first_taps, source_two_first_taps):
57 | """
58 | @input slope:
59 | a value for the slope of the line
60 | @input source_one_first_taps:
61 | the list of tap times from source one for the first set of taps
62 | @input source_two_first_taps:
63 | the list of tap times from source two for the first set of taps
64 |
65 | Calculates the "b" in y = mx + b, given y, m, and x.
66 | Use after using the calculate_slope function.
67 | y = mx + b ===> y - mx = b
68 |
69 | @output Returns a single float value, b
70 | """
71 | # Ensure same length.
72 | if (len(source_one_first_taps) != len(source_two_first_taps)):
73 | return
74 |
75 | y = sum(source_one_first_taps) / len(source_one_first_taps)
76 | x = sum(source_two_first_taps) / len(source_two_first_taps)
77 | return y - slope * x
78 |
79 |
80 | def update_time_stamps(df, column_name, m, b):
81 |
82 | """
83 | @input df: The df which is to have its timestamps converted
84 | @input columname: The name of the column in the df
85 | which holds the timestamps that are to be converted
86 | @input m: The slope for the linear conversion
87 | @input b: The y-intercept for the linear conversion
88 |
89 | Converts the timestamps of a df given a slope
90 | and intercept using y = mx + b
91 |
92 | @output Returns the df with the timestamps in
93 | column name as the output of y = mx + b
94 | """
95 | df[column_name] = (df[column_name] * m) + b
96 | return df
97 |
98 |
99 | def convert_time_stamps(source_one_first_taps,
100 | source_one_second_taps,
101 | source_two_first_taps,
102 | source_two_second_taps,
103 | df_to_convert,
104 | column_name,
105 | output_file=None):
106 | """
107 | @input source_one_first_taps:
108 | A list of times of the taps from file one for section one of taps
109 | @input source_one_second_taps:
110 | A list of times of the taps from file one for section two of taps
111 | @input source_two_first_taps:
112 | A list of times of the taps from file two for section one of taps
113 | @input source_two_second_taps:
114 | A list of times of the taps from file two for section two of taps
115 | @input df_to_convert: A df of the file that needs to be converted
116 | @input column_name: The name of the df to convert
117 | @input output_file: The name of the file to write the new df to.
118 | (Optional: If not provided, the df will only be
119 | returned from this function but not written to file)
120 |
121 | @output Returns the df_to_convert,
122 | after converting the timestamps to the new values.
123 | """
124 | slope = calculate_slope(source_one_first_taps,
125 | source_one_second_taps,
126 | source_two_first_taps,
127 | source_two_second_taps)
128 | b = calculate_b(slope, source_one_first_taps, source_two_first_taps)
129 | print("==== INFO ====")
130 | print("SLOPE: ", slope)
131 | print("Y-INTERCEPT: ", b)
132 | print("==== END INFO ====")
133 | df_to_convert = update_time_stamps(df_to_convert, column_name, slope, b)
134 |
135 | if (output_file is not None):
136 | df_to_convert.to_csv(output_file)
137 | return df_to_convert
138 |
139 |
140 | def main():
141 | """
142 | Provides the command line interface of the timestamp converter.
143 | Note that by importing this file the convert_time_stamps() function
144 | can be directly used without the CLI.
145 | The convert_time_stamps() function returns a df so CLI is optional.
146 | """
147 | print("==== TIMESTAMP CONVERTER =====")
148 | parser = argparse.ArgumentParser()
149 | parser.add_argument("-tf1",
150 | "--tapfile1",
151 | action="store",
152 | type=str,
153 | nargs=1,
154 | help="""Path to the .csv file containing
155 | the taps for source one.""")
156 | parser.add_argument("-dc1",
157 | "--datacolumn1",
158 | action="store",
159 | type=str,
160 | nargs=1,
161 | help="""Name of the data column
162 | in the first file.""")
163 | parser.add_argument("-tf2",
164 | "--tapfile2",
165 | action="store",
166 | type=str,
167 | nargs=1,
168 | help="""Path to the .csv file containing
169 | the taps for source two.""")
170 | parser.add_argument("-dc2",
171 | "--datacolumn2",
172 | action="store",
173 | type=str,
174 | nargs=1,
175 | help="Name of the data column in the second file.")
176 | parser.add_argument("-f",
177 | "--fileToConvert",
178 | action="store",
179 | type=str,
180 | nargs=1,
181 | help="""File that should be converted to new timestamps
182 | (should align with the taps for file two).""")
183 | parser.add_argument("-fc",
184 | "--fileToConvertColumn",
185 | action="store",
186 | type=str,
187 | nargs=1,
188 | help="""Name of the column that should be converted
189 | to new timestamps in the file to convert.""")
190 | parser.add_argument("-o",
191 | "--output",
192 | action="store",
193 | type=str,
194 | nargs="?",
195 | help="""Output file name. (Optional: If not provided,
196 | df is returned and not written to a file).""")
197 | args = parser.parse_args()
198 | taps_one = pd.read_csv(args.tapfile1[0])[args.datacolumn1[0]].to_list()
199 | taps_two = pd.read_csv(args.tapfile2[0])[args.datacolumn2[0]].to_list()
200 |
201 | if (len(taps_one) % 2 != 0):
202 | print("""Error - The number of taps should be even.
203 | Source one has this many taps: """, len(taps_one))
204 | return
205 | if (len(taps_two) % 2 != 0):
206 | print("""Error = The number of taps should be even.
207 | Source two has this many taps: """, len(taps_two))
208 | return
209 |
210 | haf_of_taps_one = int(len(taps_one) / 2)
211 | haf_of_taps_two = int(len(taps_two) / 2)
212 | fileToConvert = pd.read_csv(args.fileToConvert[0])
213 | outputFileName = None
214 | if (args.output is not None):
215 | outputFileName = args.output
216 |
217 | df = convert_time_stamps(taps_one[:haf_of_taps_one],
218 | taps_one[haf_of_taps_one:],
219 | taps_two[:haf_of_taps_two],
220 | taps_two[haf_of_taps_two:],
221 | fileToConvert,
222 | args.fileToConvertColumn[0],
223 | outputFileName)
224 | print("===== FINISHED! =====")
225 | return df
226 |
227 |
228 | if __name__ == "__main__":
229 | main()
230 |
--------------------------------------------------------------------------------
/py/emotibit/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @emotibit info
4 | @brief helper for EmotiBit _info.json
5 | @example info.print_info(file_dir = r"C:\priv\myDir",
6 | file_base_names = ["2022-06-07_15-23-33-810389"],
7 | delim = ",")
8 |
9 | Created on Wed Jul 20 06:14:05 2022
10 |
11 | @author: consu
12 | """
13 |
14 |
15 | import pandas as pd
16 | import matplotlib.pyplot as plt
17 |
18 | def print_user_notes(file_dir = "", file_base_names = "", delim = ", "):
19 | """
20 | @fn print_user_notes()
21 | @brief batch prints contents of user note (UN) files to console
22 | @param file_dir Base directory of the parsed data files
23 | @param file_base_names array of file bases of the data files. Expected
24 | organization is file_dir/file_base_names[i]/file_base_names[i]_XX.csv
25 | @param delim delimiter between notes
26 | """
27 | output = ''
28 |
29 | for f in range(len(file_base_names)):
30 | file_base = file_base_names[f]
31 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + 'UN' + '.csv'
32 | user_notes = pd.read_csv(file_path);
33 | for note in user_notes['UN']:
34 | output = output + note + delim
35 | if (f < len(file_base_names) - 1): # Don't add an extra \n
36 | output = output + '\n'
37 | print(output)
38 | def save_fig(save_dir = "", _dpi = 300):
39 | import os
40 | import pickle
41 | fig = plt.gcf()
42 | title = fig.canvas.get_window_title()
43 | if save_dir == "":
44 | save_dir = os.getcwd()
45 | if not os.path.exists(save_dir):
46 | os.makedirs(save_dir)
47 | plt.savefig(save_dir + "/" + title + ".png", transparent=False, dpi = _dpi)
48 | plt.savefig(save_dir + "/" + title + ".pdf", transparent=True, dpi = _dpi)
49 | with open(save_dir + "/" + title + ".fig", 'wb') as f:
50 | # pickle.dump(fig, open('FigureObject.fig.pickle', 'wb', f))
51 | pickle.dump(fig, f)
52 | # open with pickle.load(open('FigureObject.fig.pickle', 'rb'))
--------------------------------------------------------------------------------
/py/examples/datarealigner_example/ReadMe.md:
--------------------------------------------------------------------------------
1 | # Something about the example
--------------------------------------------------------------------------------
/py/examples/datarealigner_example/datarealigner_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Tue June 25 2019
3 |
4 | @author: Marie-Eve Bilodeau marie-eve.bilodeau.1@etsmtl.net
5 | """
6 |
7 | import emotibit.datasyncer as syncer
8 | import matplotlib.pyplot as plt
9 | import locale
10 | import numpy
11 | import emotibit.datarealigner as realign
12 |
13 | my_syncer = syncer.DataSyncer()
14 | my_realigner = realign.DataRealigner()
15 |
16 | def get_data_from_datasyncer():
17 |
18 | # As per datasyncer example
19 | # Load EmotiBit data
20 | file_dir0 = "C:/Users/marie/Documents/Maîtrise/Emotibit/Validation study PPG_EDA/Participant 00013/Golden_p13/p13_data"
21 | file_base = "2019-05-24_14-25-39-507"
22 | file_ext = ".csv"
23 | data_types = ["PI"]
24 | file_names0 = []
25 | for data_type in data_types:
26 | file_names0.append(file_base + "_" + data_type + file_ext)
27 | data_col0 = 7
28 | data_start_row1 = 2
29 | myLocale = locale.getlocale() # Store current locale
30 | locale.setlocale(locale.LC_NUMERIC, 'USA') # Switch to new locale to process file
31 | my_syncer.load_data(file_dir0, file_names0, data_col0)
32 | locale.setlocale(locale.LC_NUMERIC, myLocale) # Set locale back to orignal
33 | #print("Data0 = ", my_syncer.time_series[0].data)
34 | #print("Timestamp0 = ", my_syncer.time_series[0].timestamp)
35 | my_syncer.time_series[0].timestamp = numpy.subtract(my_syncer.time_series[0].timestamp, numpy.floor(my_syncer.time_series[0].timestamp[0]))
36 |
37 | # Load Flexcomp data
38 | # ToDo: Utilize flexcompparser to handle more mundane settings
39 | FLEXCOMP_TIME = 0
40 | FLEXCOMP_EKG = 1
41 | FLEXCOMP_BVP = 2
42 | FLEXCOMP_SC = 5
43 | file_dir1 = "C:/Users/marie/Documents/Maîtrise/Emotibit/Validation study PPG_EDA/Participant 00013/Golden_p13/p13_data"
44 | file_name1 = "ID_00013_exp_10min_rest_1_2019-05-24_1437.txt"
45 | data_cols1 = [FLEXCOMP_BVP] #, FLEXCOMP_SC]
46 | timestamp_col1 = FLEXCOMP_TIME
47 | data_start_row1 = 8
48 | delimiter1 = ';'
49 | myLocale = locale.getlocale() # Store current locale
50 | locale.setlocale(locale.LC_NUMERIC, 'French_Canada.1252') # Switch to new locale to process file
51 | my_syncer.load_data(file_dir1, file_name1, data_cols1, timestamp_col1, data_start_row1, delimiter1)
52 | locale.setlocale(locale.LC_NUMERIC, myLocale) # Set locale back to orignal
53 |
54 | get_data_from_datasyncer()
55 | # Flexcomp first then Emotibit
56 | my_realigner.load_data(my_syncer.time_series[1].timestamp, my_syncer.time_series[1].data, my_syncer.time_series[0].timestamp, my_syncer.time_series[0].data)
57 | # Remove DC from signal and match amplitude,
58 | # for PPG data, Inverting the wave can help
59 | INVERT = True
60 | my_realigner.match_data_sets(INVERT)
61 |
62 | plt.figure()
63 | plt.title('Before Realignment')
64 | plt.plot(my_realigner.timestamp[0],my_realigner.data[0],'b')
65 | plt.plot(my_realigner.timestamp[1],my_realigner.data[1],'r')
66 | plt.gca().legend(('Flexcomp Data','Emotibit Data'))
67 | plt.xlabel('Data')
68 | plt.ylabel('Timestamp')
69 | plt.show()
70 |
71 | """ 1 minute works well for rest PPG data, for non-rest data take a longer section
72 | For EDA data, 4 min works well """
73 | SPLINE_START_TIME = 100
74 | SPLINE_STOP_TIME = 160
75 | MAX_DELAY = 30 # align on a delay of max 30 sec
76 | FLEXCOMP_SAMPLING_RATE = 256
77 |
78 | delay = my_realigner.get_delay_and_realign_data(SPLINE_START_TIME, SPLINE_STOP_TIME, MAX_DELAY, FLEXCOMP_SAMPLING_RATE)
79 | print(delay)
80 | plt.figure()
81 | plt.title('Realigned Data')
82 | plt.plot(my_realigner.timestamp[0],my_realigner.data[0],'g')
83 | plt.plot(my_realigner.timestamp[1],my_realigner.data[1],'y')
84 | plt.gca().legend(('Flexcomp Data','Emotibit Data'))
85 | plt.xlabel('Data')
86 | plt.ylabel('Timestamp')
87 | plt.show()
88 |
89 | # my_realigner.upsample_emo_at_flex();
90 |
91 | """ Save realigned and downsampled data in text file"""
92 | my_realigner.downsample_flex_at_emo();
93 |
94 | realigned_file_name = "C:/Users/marie/Documents/Maîtrise/Emotibit/Test_Linda/realigned_data.txt";
95 | f= open(realigned_file_name,"w+")
96 | f.write('Timestamp, Flexcomp Data, Emotibit Data\n')
97 | for i in range(len(my_realigner.timestamp[0])):
98 | print(my_realigner.timestamp[0][i], ',', my_realigner.data[0][i], ',', my_realigner.data[1][i], file=f)
99 | f.close()
100 |
101 | plt.figure()
102 | plt.title('Realigned Data with Same Timestamp')
103 | plt.plot(my_realigner.timestamp[0],my_realigner.data[0],'b')
104 | plt.plot(my_realigner.timestamp[1],my_realigner.data[1],'r')
105 | plt.gca().legend(('Flexcomp Data','Emotibit Data'))
106 | plt.xlabel('Data')
107 | plt.ylabel('Timestamp')
108 | plt.show()
--------------------------------------------------------------------------------
/py/examples/datasyncer_example/ReadMe.md:
--------------------------------------------------------------------------------
1 | # Something about the example
--------------------------------------------------------------------------------
/py/examples/datasyncer_example/datasyncer_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Apr 17 12:06:41 2019
4 |
5 | @author: Sean Montgomery
6 | """
7 |
8 | import emotibit.datasyncer as syncer
9 | import matplotlib.pyplot as plt
10 | import locale
11 | import numpy
12 | import emotibit.flexcompparser as flexcomp
13 | my_syncer = syncer.DataSyncer()
14 |
15 | # Load EmotiBit data
16 | file_dir0 = "C:/priv/local/LocalDev/Sean/of_v0.9.8_vs_release/apps/CFL_SW_BiosensorModule/EmotiBitDataParser/Participant 00013/EmotiBit"
17 | file_base = "2019-05-24_14-51-51-078"
18 | file_ext = ".csv"
19 | data_types = ["PI"]
20 | file_names0 = []
21 | for data_type in data_types:
22 | file_names0.append(file_base + "_" + data_type + file_ext)
23 | data_col0 = 7
24 | data_start_row1 = 2
25 | myLocale = locale.getlocale() # Store current locale
26 | locale.setlocale(locale.LC_NUMERIC, 'USA') # Switch to new locale to process file
27 | my_syncer.load_data(file_dir0, file_names0, data_col0)
28 | locale.setlocale(locale.LC_NUMERIC, myLocale) # Set locale back to orignal
29 | print("Data0.len = " + str(len(my_syncer.time_series[0].data)))
30 | #9 May 2019 14:02:01 - 00:11:07,813 = 13:50:53.187 = 1557424253.187
31 | # 1557409853.187 + 4*60*60 = 1557424253.187
32 | #my_syncer.time_series[0].timestamp = numpy.subtract(my_syncer.time_series[0].timestamp, 0)
33 | #my_syncer.time_series[1].timestamp = numpy.subtract(my_syncer.time_series[1].timestamp, 0)
34 |
35 | # Load Flexcomp data
36 | # ToDo: Utilize flexcompparser to handle more mundane settings
37 | FLEXCOMP_TIME = 0
38 | FLEXCOMP_EKG = 1
39 | FLEXCOMP_BVP = 2
40 | FLEXCOMP_SC = 5
41 | file_dir1 = "C:/priv/local/LocalDev/Sean/of_v0.9.8_vs_release/apps/CFL_SW_BiosensorModule/EmotiBitDataParser/Participant 00013/Flex comp"
42 | file_name1 = "ID_00013_exp_10min_tapi_1_2019-05-24_1503.txt"
43 | data_cols1 = [FLEXCOMP_BVP] #, FLEXCOMP_SC]
44 | timestamp_col1 = FLEXCOMP_TIME
45 | data_start_row1 = 8
46 | delimiter1 = ';'
47 | myLocale = locale.getlocale() # Store current locale
48 | locale.setlocale(locale.LC_NUMERIC, 'French_Canada.1252') # Switch to new locale to process file
49 | my_syncer.load_data(file_dir1, file_name1, data_cols1, timestamp_col1, data_start_row1, delimiter1)
50 | locale.setlocale(locale.LC_NUMERIC, myLocale) # Set locale back to orignal
51 | my_flexcomp_parser = flexcomp.Parser(file_dir1 + "/" + file_name1)
52 | my_syncer.time_series[1].timestamp = numpy.add(my_syncer.time_series[1].timestamp, my_flexcomp_parser.get_start_epoch())
53 | #my_syncer.time_series[3].timestamp = numpy.add(my_syncer.time_series[3].timestamp, session_epoch - end_epoch)
54 |
55 |
56 | # Plot histogram of timestamps
57 | my_syncer.plot_timestamp_hist()
58 | # Select sync times
59 | my_syncer.select_sync_times()
60 |
61 | #while (1):
62 | # m = plt.ginput(1);
63 | # print(m)
64 |
65 | if (0):
66 | points = plt.ginput(2)
67 | diff = points[0][0] - points[1][0]
68 | print(str(points[0][0]) + " - " + str(points[1][0]) + " = " + str(diff))
69 |
70 |
--------------------------------------------------------------------------------
/py/examples/dataviewer_example/ReadMe.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 | The Python DataViewer is a tool developed to help visualize the data captured on EmotiBit. It is an interactive tool where you can view the entire data at once, zoom in and out of any part in the time series, compare data accross different channels and get an overall sense of the data captured.
3 | ![][EmotiBit-PythonDataViewer]
4 |
5 | ## Getting Started
6 | - You need to have python installed on your system to use the dataViewer. The easiest way to use get python is to install anaconda navigator.
7 | - Click [here](https://www.anaconda.com/products/individual) to download the latest anaconda release. Make sure to get Anaconda distribution for Python 3.7
8 | - Once installed, you will have access to Anaconda Navigator and Anaconda prompt.
9 | - The easiest way to create an environment required for dataViewer is to import the environment using a yml file.
10 | - We have provided a `env.yml` with this repo. You can find the file in `py/anaconda-environments/EmotiBit-pyenv.yml`.
11 | - Open the Anaconda prompt. You can do so by typing `anaconda` in the start menu and choosing `Anaconda prompt`.
12 |
13 |
14 | - In the `anaconda prompt`, type the following command to create the environment
15 | - `conda env create -f .yml`. Replace `` with the path to the **yml** file.
16 | - When finished, close the `anaconda prompt` and open the `anaconda navigator`. choose `EmotiBit-pyenv` as the active environment.
17 |
18 | ![][conda-choose-env]
19 |
20 | - Once EmotiBit-pyenv is active, Launch spyder.
21 |
22 | ![][conda-start-spyder]
23 |
24 | - Open the file `dataviewer_example.py`
25 | - To succesfully build the file, you will need to add paths to the **PYTHONPATHMANAGER**.
26 | - To do so, in the Spyder window, click on `Tools` > `PYTHONPATH manager` > `Add`. Add the path to the `py` folder(the one included in this repo).
27 | - You will need to close and restart the `Spyder` for the changes to take effect.
28 | - Once Spyder is restarted, run the command `%matplotlib qt` in the iPython terminal. This enables plotting in interactive windows.
29 | - 
30 | - Follow the steps below to load the data.
31 |
32 |
33 | ### Steps to load data
34 | - The parsed files are loaded using 2 variables, `The path to the directory containing the files` and the `base file name`.
35 | - Specify the path to the directory containing the parsed data(The output of the data parser), inside the double quotes of the _variable_ **file_dir**.
36 | - Specify the file name in the double quotes of the _variable_ **file_base**. This will be the name of the file(wihout any file extension) generated by the emotibit.
37 | - For example, if your data to be visualized looks like the image shown below:
38 | - ![][Example-dataDirectory]
39 | - Then the `file_dir = "C:\Users\cfl\Documents\EmotiBit\DataAnalysis\exampleData"` (absolute path to the folder)
40 | - And `file_base = "2021-04-26_16-59-17-085213"`
41 | - You can provide extra instructions before runtime to Enable plotting User Notes or DC. To do so, go to
42 | `Run->Configuration per file`(In your Spyder Window).
43 | - Check the `command line options` check box. In the text box next to the checkbox, enter parameters as shown below:
44 | - _parameter1_ value _parameter2_ value
45 | - The parameters available are **hide_dc** and **usernote_toggle**.
46 | - Ex: `hide_dc MX,MY,MZ` will prevent plotting of DC markers on the MX, MY and MZ sensor data
47 | - Ex: `usernote_toggle False` will not display any user notes.
48 | - Ex: `hide_dc MX,MY,MZ usernote_toggle False` will perform both of the above actions.
49 | - The tags for which the DC markers should be avoided must be separated by _commas_
50 | - click on `Ok`
51 | - Run the cmmand `%matplotlib qt` on the spyder iPython terminal to enable plotting graphs on a separate window, if not already done so.
52 | - Run the file in Spyder.
53 | ### The python visualizer has the folowing functions available for use
54 | - You can **zoom** into any part of the plot using the default `zoom button` provided in the spyder. Just press the magnifying glass icon, click and drag anywhere on the plot to zoom in on that portion.
55 | - The `move button` is located next to the `zoom button`. you can press this button and _click and drag_ on any plot to slide through the data. If you press the `x` button or the `y` button simultaneously when moving, the data is moves only in the _x or the y direction_, respectively.
56 | - You can press the home button to go back to the original view of the data.
57 | - **Keyboard Shortcuts**
58 | - **_hide/show DC markers_**: The Cutoffs(instances when the sensor data goes out of bounds) in the data, are marked with red verticle lines. You can toggle the visibility of the DC markers in any individual plot by clicking on the plot and presing the `space` key. Pressing it once remoes the DC markers from that plot. Pressing it again will re-mark the lines on the plot.
59 | - **_Y-Axis Autoscale_**: After you zoom into any portion of the data, the data on each plot can be scaled . by pressing the `a` key. This adjusts the Y-Axis limits for each plot.
60 | - **_Mark any point in time on the fly_**: Click on the point(any location within the plot) you want to highlihgt, and press `m`. This will highlight a region around that point in time on all plots. This is great way to see the relative position on any activity across plots. Click anywhere on the figure, outside the plots to remove this highlight
61 |
62 | [conda-choose-env]: ../../../assets/anaconda-choosing-environment.jpg "Anaconda Environment Choosing"
63 | [conda-start-spyder]: ../../../assets/anaconda-launch-spyder.jpg "Anaconda launch Spyder"
64 | [conda-prompt]: ../../../assets/anaconda-prontpt-start.jpg "Anaconda prompt start"
65 | [EmotiBit-PythonDataViewer]: ../../../assets/PythonDataViewer.jpg "EmotiBit-PythonDataViewer"
66 | [Example-dataDirectory]: ../../../assets/Example-dataDirectory.png "Example-dataDirectory"
67 |
--------------------------------------------------------------------------------
/py/examples/dataviewer_example/dataviewer_example.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Thu Aug 15 3:06:23 2019
5 |
6 | @author: Nitin
7 | """
8 |
9 | import sys
10 | import emotibit.dataviewer as dataviewer
11 |
12 | # specify the location of the folder which contains the parsed data
13 | file_dir = r"path/to/parsed/data" # e.g. C:\Users\dev\data # Ex. C:\Users\dev\data
14 | # specify the base file name of teh parsed data
15 | file_base = r"base-file-name" # e.g. 2019-12-10_11-55-54-038975
16 |
17 | # arguments for command line
18 | # usernote_toggle False hide_dc EA,ER
19 |
20 | hide_DC_tags = ["EA", "SA", "SR", "SF","PI", "PR", "PG", "HR", "TH", "AX", "AY", "AZ", "GX", "GY", "GZ",
21 | "MX", "MY", "MZ", "T1", "DC", "DO", "UN"]
22 |
23 | userNote_toggle = True
24 | for i, argument in enumerate(sys.argv[1:]):
25 | if i % 2 == 0: # even position counting from after file name
26 | if argument == "hide_dc":
27 | hide_DC_tags = sys.argv[i+2].split(',')
28 |
29 | elif argument == "usernote_toggle":
30 | if sys.argv[i+2] == "False":
31 | userNote_toggle = False
32 |
33 | analysis = dataviewer.DataViewer(file_dir, file_base, hide_DC_tags, userNote_toggle)
34 |
--------------------------------------------------------------------------------
/py/examples/docs/Spyder_datarealigner_guide_en.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/py/examples/docs/Spyder_datarealigner_guide_en.docx
--------------------------------------------------------------------------------
/py/examples/ecgHR_detector_example/README.md:
--------------------------------------------------------------------------------
1 | ## ECG HR Detector
2 | ### About
3 | The ECG HR Detector is designed to take in raw ECG data and determine the HR from this data. After detecting the HR, it will plot the data against another HR signal for comparison. Additionally, the detected HR file is saved.
4 |
5 | ### Quickstart Usage
6 | Sample data has been provided and can be used with the ECG HR Detector with the following command:
7 |
8 | ```py ecgHR_detector.py -ecg cytonHR2_newTime.csv -ecgCol ' EXG Channel 0' -ecgt ' Timestamp' -hr ebhr2_HR.csv -hrCol HR -hrt LocalTimestamp```
9 |
10 | #### Explanation of Arguments in Example
11 | - ecg: Provides the file with the ECG Data
12 | - ecgCol: The column name in the ECG file containing the ECG Data
13 | - ecgt: The column name in the ECG file containing timestamps
14 | - hr: Provides the filename with HR data
15 | - hrCol: Provides the column name in the HR file with the HR data
16 | - hrt: Provides the column name in the HR file containing timestamps
17 |
18 | For a full list of available arguments and their usages, run the ecgHR_detector with the -h argument.
19 |
20 | ### Usage Notes
21 | - Ensure that the timestamps between the ECG file and HR have already been aligned. ECG HR detection will still work if they are not aligned, but the comparison plot will not be readable if they have not been aligned. If the data needs to be aligned, see the timestampConverter example.
22 | - The output file will not have a consistent frequency. Timestamps with HR data will be provided, but the spacing between them will not be consistent.
--------------------------------------------------------------------------------
/py/examples/ecgHR_detector_example/SampleDataforECG_HR_Detector.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/py/examples/ecgHR_detector_example/SampleDataforECG_HR_Detector.zip
--------------------------------------------------------------------------------
/py/examples/ecgHR_detector_example/ecgHR_detector_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mar 13 2024
3 | An example of how to use the ECG HR detector functions in code.
4 | CLI usage is also supported and documentation can be found by using -h
5 | """
6 |
7 | from emotibit.ecgHR_detector import detect_hr
8 |
9 | """
10 | This example calls the detectHR function using a cyton file that has had
11 | its timestamps aligned and an emotibit file with HR to compare the HR detection results to.
12 |
13 | For more information see the README
14 | """
15 |
16 | def main():
17 |
18 | hr = detect_hr("cytonHR2_newTime.csv",
19 | " EXG Channel 0",
20 | " Timestamp",
21 | "ebhr2_HR.csv",
22 | "HR",
23 | "LocalTimestamp",
24 | 250,
25 | "ecgHR.csv",
26 | "detectedHR.png")
27 | print(hr)
28 |
29 | if __name__ == "__main__":
30 | main()
--------------------------------------------------------------------------------
/py/examples/full_experiment_example/README.md:
--------------------------------------------------------------------------------
1 | # HR_Scorer_Full_Experiment_Example
2 | This repository contains several different scripts for working with biometric signals from EmotiBit (and other devices) to create derivative metrics and to compare signals to each other to see how similar they are.
3 |
4 | This README describes the full process of conducting an experiment between two devices all the way to scoring how similar the heart rate readings from the devices are to each other.
5 |
6 | ## The Bigger Picture
7 | ### What are we trying to do?
8 |
9 | We are trying to create a way that we can easily test the perfomance of different biometric algorithms. We want to have an environment where we can easily and quickly test the performance of alogrithms in a way that we can directly compare them to each other.
10 |
11 | ### How are we trying to do it?
12 |
13 | We have created a pipeline that through realignment and resampling, allows us to compare two heart rate signals. Through analysis of the different metrics that we have tried and the data itself, we report multiple metrics that we believe help to explain the similarities between the two HR signals.
14 |
15 | We have also created a framework to design tests to ensure that they are replicatable and versionable to ensure that the data we use can evolve along with the processes that we create.
16 |
17 | ### Where do datasets/tests get stored?
18 | We have created different tests (ways of collecting data), the artifacts of these tests are datasets that were collected with those tests. The tests can be found in [this repository.](https://github.com/EmotiBit/Biometric_Validation_Methods)
19 |
20 | These tests all carry a version number to make undestanding when changes happened and how they do or do not affect data easy to understand. The methodology for versioning can be found [here.](https://github.com/EmotiBit/Biometric_Validation_Methods)
21 |
22 | Test results are stored in a table that shows statistics for each algorithm on the test side by side, as part of a relase, an example can be seen [here.](https://github.com/EmotiBit/Biometric_Validation_Methods/releases/tag/sit-stand-sit_v0.0.0)
23 |
24 | ### How does what we are doing help us do what we are trying to do?
25 |
26 | This allows us to have a basic structure and start establishing the processes by which we will be able to continue to test algorithms.
27 |
28 | ## Data Collection
29 | The first step is data collection. Collection should be done on two devices at the same time, tapping the devices against each other at the beginning and the end of the session so that the data from the devices can be properly aligned.
30 |
31 | You can find a detailed description on performing data collection [here](../device_tapdetector_example/README.md#completing-the-tapping-procedure).
32 |
33 | At the end of your data collection process, you should have one set of acceleration and heart data from each device, in a .csv format. Ensure that any required parsing or other preprocessing that might be necessary to use the data is completed before moving to the next step.
34 |
35 | ## Tap Detection
36 | The second step is tap detection. The taps that you performed in the data collection step need to be detected and their timestamps recorded so that they can be used in a later step to align the data between devices.
37 |
38 | Tap detection is done with the tap detector script and the files collected during your data collection. Tap detector directions can be found [here](../device_tapdetector_example/README.md)
39 |
40 | At the end of this step, you should now have two tap files. In each file, there should be a column of the timestamp when taps hapenned. There should be the same amount of timestamps as taps that you performed, and the number of taps in each file should be the same.
41 |
42 | ## Timestamp Conversions
43 | Now that you have your raw data and the tap data, you can realign the timestamps of one of your data sources to the other source. To do this you will use the tap files and the raw data files.
44 |
45 | You can find detailed instructions for timestamp converting [here](../timestamp_converter_example/README.md)
46 |
47 | At the end of this step, you should now have one of your raw data sources rewritten with new timestamps so that it is lined up with your other data source.
48 |
49 | ## ECG HR Detection
50 | This step only applies if your data is ECG data that has not had HR detected from it yet. If all of your data is already HR data, you can skip this step.
51 |
52 | If you have ECG data that needs to have HR detected from it, you can do so using the ecgHR_detector. You can find detailed instructions on using it [here](../ecgHR_detector_example/README.md).
53 |
54 | At the end of this step, you should now have HR data with timestamps for any ECG data you had.
55 |
56 | ## HR Scoring
57 | In this final step, data is resampled to a consistent rate and then an analysis between the two files is done. You will need two HR data files for this step.
58 |
59 | The process for using the hr_scorer is shown [here](../hr_scorer_example/README.MD).
60 |
61 | At the end of this step you will have multiple metrics for comparing the similarity of the two HR files.
--------------------------------------------------------------------------------
/py/examples/hr_scorer_example/README.MD:
--------------------------------------------------------------------------------
1 | ## HR Scorer
2 | ### About
3 | The HR Scorer is designed to take in two heart rate signals and provide a measure of how similar they are.
4 |
5 | To do this, the script will resample both files at the given rate and then will conduct a linear regression, using data one as the dependent variable and data two as the indpendent variable.
6 |
7 | The HR Scorer provides the following metrics for evaluation of signal similarity:
8 | - Slope: The slope of the line created in the linear regression. An ideal situation would see a slope of 1.
9 | - Intercept: The intercept of the line created in the linear regression. An ideal situation would see an intercept of 0.
10 | - R: The correlation coefficient between the two heart rates. If the heart rates are the exact same values for every time stamp, then this value would be 1.
11 | - P: The p-value associated with R.
12 | - err: The standard error value.
13 |
14 | ### Quickstart Usage
15 | Sample files have been provided in the .zip folder. The example file will work as long as those files are in the same directory as the example file. Here is how the HR scorer can be used with the sample data via CLI:
16 |
17 | ```py hr_scorer.py -hr1 ebhr6_HR_trim.csv -t1 LocalTimestamp -d1 HR -hr2 ecgHR6.csv -t2 Timestamp -d2 HR -f 100 -n1 EmotiBit -n2 Cyton```
18 |
19 | #### Explanation of Arguments in Example:
20 | - hr1: Provides the path to the file of the HR signal that is being considered the dependent variable in the analysis.
21 | - t1: Provides the column name in the first file that contains the timestamps of the HR data.
22 | - d1: Provides the column name in the first file that contains the HR data.
23 | - hr2: Provides the path to the file of the HR signal that is being considred the independent vairable in the analysis.
24 | - t2: Provides the column name in the second file the contains the timestamps of the HR data.
25 | - d2: Provides the name of the column in the second file the contains all of the HR data.
26 | - f: Provides the frequency to resample to (in hertz).
27 | - n1: Provides a name for the dependent variable.
28 | - n2: Provides a name for the independent variable.
29 |
30 | ### Format of Files
31 | The HR scorer requires that two files are provided. The files must conform to these requirements:
32 |
33 | 1. There is a column of HR data
34 | 1. There is a corresponding column of timestamps (should be the same length as the HR column)
35 |
36 | ### Output
37 | The values of the scoring are shown in the terminal and two plots are saved/opened for viewing to validate results.
38 |
39 | ### Notes on Scoring Methodology
40 | To score the files, they are first resampled to the specified rate. This rate should be the sampling rate of the slower device to avoid creating precision that does not truly exist.
41 |
42 | Then, a linear regression is completed between the two sets of resamped data and statistics are reported.
43 |
44 | It should be noted that due to the way that correlation coefficients are calculated, data with low variance tends to have lower R values than data with high variance. (For example, if your HR data is someone sitting still for several minutes as opposed to standing up and sitting down during the recording, then your R value will likely be lower.) Keep this scoring methodology in mind when scoring your own data to ensure that the results you are getting correctly answer the questions you are asking.
--------------------------------------------------------------------------------
/py/examples/hr_scorer_example/SampleDataForHRScorer.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/py/examples/hr_scorer_example/SampleDataForHRScorer.zip
--------------------------------------------------------------------------------
/py/examples/hr_scorer_example/hr_scorer_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mar 25 2024
3 | An example of how to use the timestamp converter functions in code.
4 | CLI usage is also supported and documentation can be found by using -h
5 | """
6 |
7 | from emotibit.hr_scorer import resample, score
8 | import pandas as pd
9 |
10 | """
11 | In this example detected HR from ECG data and EmotiBit HR data are compared and scored
12 | """
13 |
14 | def main():
15 |
16 | # first, we need to resample the data, the function will read the files and do that for us
17 | ebResampled, cyResampled = resample("ebhr6_HR_trim.csv", "LocalTimestamp", "ecgHR6.csv", "Timestamp", 100)
18 | # then, score them
19 | slope, intercept, r, rho, tau, p, std_err = score(ebResampled, "HR", cyResampled, "HR", None, "EmotiBit", "Cyton")
20 | # and print results
21 | print("Slope: ", slope, "\nIntercept: ", intercept, "\nR: ", r, "\nP: ", p, "\nerr: ", std_err)
22 |
23 |
24 | if __name__ == "__main__":
25 | main()
--------------------------------------------------------------------------------
/py/examples/hyperscanning_viewer/hyperscanning_viewer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | hyperscanning_viewer.py
4 | Plots EmotiBit data from multiple people and multiple data files per person.
5 | Change usage parameter in specified section to match desired inputs and run.
6 |
7 | Utilize the pan, zoom
8 | and home/reset buttons to traverse data.
9 |
10 | The following keystrokes provide
11 | additional functionality:
12 | 'c': Creates a [c]omment entry for storage in an output note file. Note the
13 | the timestamp corresponds the x-position clicked before hitting 'c'.
14 | 'a': [a]uto-rescales the y-limits. At present a mouse click is
15 | required after pressing 'a' to update the QT plot engine.
16 | 't': [t]ransposes the subplots, swapping plot rows and columns. Note this
17 | presently breaks the home/reset button.
18 | 'r': [r]esets the figure.
19 | 'e': Toggles titles and labels for [e]ach subplot
20 |
21 | ToDo:
22 | - Remove click requirement for 'a' functionality
23 | - Separate the example code from the functions
24 | - Fix home functionality after 't'
25 | - Add vertical lines at comment times and some way to see comment
26 | - Consider making click after 'c' select time
27 | - Add vertical line at click on all plots
28 |
29 | Created on Fri May 24 15:37:34 2023
30 |
31 | @author: produceconsumerobot
32 | """
33 | import pathlib
34 | import pandas as pd
35 | import matplotlib.pyplot as plt
36 | import tkinter
37 | import tkinter.simpledialog as simpledialog
38 | import numpy as np
39 | import os
40 | import csv
41 |
42 | try:
43 | import IPython
44 | IPython.get_ipython().magic("matplotlib qt")
45 | except:
46 | plt.ion()
47 |
48 | # **** Set usage parameters here ****
49 |
50 | fig_size = [15, 12]
51 | typetags_in_cols = False
52 | label_each_subplot = False
53 | output_note_typetag = 'analysis_notes'
54 |
55 | # select the directory where the output will be stored.
56 | output_dir = r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data'
57 |
58 | signals = {
59 | 'typetags': ['TH','T1','D0','AX','EA'],
60 | 'marker_styles': ['-','-','*','-','-']
61 | }
62 |
63 | #update the following dictionary where all the data can be found
64 | # Currently, keep all the files from a single person under 1 directory. sub-directories are allowed
65 | database = [
66 | {
67 | 'name':"Mike",
68 | 'root_path':r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Mike',
69 | },
70 | {
71 | 'name':"Bob",
72 | 'root_path':r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Bob',
73 | },
74 | {
75 | 'name':"Jared",
76 | 'root_path':r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Jared',
77 | },
78 | {
79 | 'name':"John",
80 | 'root_path':r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/John',
81 | },
82 | {
83 | 'name':"Diane",
84 | 'root_path':r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Diane',
85 | }
86 | ]
87 |
88 | # **** END usage parameters ****
89 |
90 | # change the output notes file name if required
91 | output_file_path = os.path.join(output_dir, output_note_typetag + '.csv')
92 |
93 | if (typetags_in_cols):
94 | num_rows = len(database)
95 | num_cols = len(signals['typetags'])
96 | else:
97 | num_rows = len(signals['typetags'])
98 | num_cols = len(database)
99 |
100 | fig, axes = plt.subplots(num_rows,num_cols, sharex=True) # creates number of subplots equal to entries in the database
101 | xlims = []
102 |
103 | #%% Read all files into dataframes
104 | x_axis_col = 'LslMarkerSourceTimestamp'
105 | #x_axis_col = 'LocalTimestamp'
106 | for db_i in range(len(database)):
107 | database[db_i]['data'] = {}
108 | for typetag in signals['typetags']:
109 | print('-->' + database[db_i]['name'])
110 | basepath = pathlib.Path(database[db_i]['root_path'])
111 | file_list = list(basepath.rglob("*" + typetag + "*"))
112 | filepath_list = []
113 | for file in file_list:
114 | print(file.stem)
115 | filepath_list.append(str(file.resolve()))
116 | for f in range(len(filepath_list)):
117 | filepath = filepath_list[f]
118 | temp = pd.read_csv(filepath)
119 | if (f == 0):
120 | database[db_i]['data'][typetag] = pd.DataFrame()
121 | if x_axis_col in temp.columns:
122 | if(0 in temp[x_axis_col].unique()):
123 | print(filepath.split('//')[-1] + ": " + x_axis_col +' timestamp missing. discarding.')
124 | else:
125 | print(filepath.split('//')[-1] + ': good file')
126 | database[db_i]['data'][typetag] = database[db_i]['data'][typetag].append(temp)
127 | else:
128 | print(filepath.split('//')[-1] + ": " + x_axis_col +' not present. discarding.')
129 |
130 | print('data loading complete...')
131 | global_x_loc = 0
132 | global_comment = ""
133 | global_subplot_clicked = 0
134 | user_note_headers = list(database[0]['data'][typetag].columns)
135 | user_note_headers[-1] = output_note_typetag
136 | notes_df = pd.DataFrame(columns=user_note_headers)
137 | notes_file = pathlib.Path(output_file_path)
138 | if not notes_file.is_file():
139 | notes_df.to_csv(output_file_path, mode='a', index=False)
140 |
141 | def auto_y_lim():
142 | for m in range(num_rows):
143 | for n in range(num_cols):
144 | if (typetags_in_cols):
145 | typetag = signals['typetags'][n]
146 | db_i = m
147 | else:
148 | typetag = signals['typetags'][m]
149 | db_i = n
150 | print('subplot {0},{1}'.format(m, n))
151 | x_lims = axes[m][n].get_xlim()
152 | print('x_lims = ', x_lims)
153 |
154 | # Find the indexes of the x limits
155 | x_data_arr = np.array(database[db_i]['data'][typetag][x_axis_col])
156 | x_inds = np.where((x_data_arr > x_lims[0]) & (x_data_arr < x_lims[1]))[0]
157 | if (len(x_inds) > 1): # Don't try to plot missing data
158 | x_inds = np.sort(x_inds)
159 | x_inds = range(x_inds[0], x_inds[len(x_inds) - 1])
160 | print('x_inds = ', x_inds[0], x_inds[len(x_inds) - 1])
161 |
162 | #print('len(data) = ', len(database[db_i]['data'][typetag][typetag]))
163 | y_data_arr = np.array(database[db_i]['data'][typetag][typetag])
164 | y_lims = [
165 | min(y_data_arr[x_inds]),
166 | max(y_data_arr[x_inds])
167 | ]
168 | print('y_lims = ', y_lims[0], y_lims[1])
169 |
170 | #axes[m][n].set_ylim(y_lims[0], y_lims[1])
171 | axes[m][n].update({'ylim': [y_lims[0], y_lims[1]]})
172 | fig.canvas.blit(fig.bbox)
173 |
174 | def plot_data():
175 | #%% plot data
176 | global fig, axes, xlims
177 | new_xlims = True
178 | if (len(xlims) == 2):
179 | # save the xlims when replotting (for changing plot rows/cols)
180 | new_xlims = False
181 | xlims = axes[0][0].get_xlim()
182 | print('xlims = ', xlims)
183 | print('new_xlims = ', new_xlims)
184 | plt.close(fig) # ToDo: consider more elegant ways to preserv
185 | fig, axes = plt.subplots(num_rows,num_cols, sharex=True) # creates number of subplots equal to entries in the database
186 | plt.get_current_fig_manager().toolbar.pan()
187 | fig.set_size_inches(fig_size)
188 |
189 | fig.canvas.mpl_connect('key_press_event', on_key)
190 | plt.connect('button_press_event', on_click)
191 | fig.suptitle( 'EmotiBit Hyperscanning Data')
192 | for m in range(num_rows):
193 | for n in range(num_cols):
194 | if (typetags_in_cols):
195 | typetag = signals['typetags'][n]
196 | marker_style = signals['marker_styles'][n]
197 | db_i = m
198 | plot_title = typetag
199 | plot_xlabel = database[db_i]['name']
200 | else:
201 | typetag = signals['typetags'][m]
202 | marker_style = signals['marker_styles'][m]
203 | db_i = n
204 | plot_title = database[db_i]['name']
205 | plot_xlabel = typetag
206 | if(x_axis_col in database[db_i]['data'][typetag].columns):
207 | #axes[m].title.set_text(database[db_i]['name'])
208 | line, = axes[m][n].plot(database[db_i]['data'][typetag][x_axis_col], database[db_i]['data'][typetag][typetag], marker_style, label=database[db_i]['name'])
209 | #axes[m].legend()
210 | if (label_each_subplot or n == num_cols - 1):
211 | axes[m][n].yaxis.set_label_position("right")
212 | axes[m][n].set_ylabel(plot_xlabel)
213 | if (label_each_subplot or m == 0):
214 | axes[m][n].set_title(plot_title)
215 | if (new_xlims):
216 | xlims = axes[m][n].get_xlim()
217 | else:
218 | axes[m][n].set_xlim(xlims[0], xlims[1])
219 | auto_y_lim()
220 | plt.show()
221 |
222 | #%% callback functions
223 | def on_click(event):
224 | global global_subplot_clicked
225 | global global_x_loc
226 | print('x-axis: ' + str(event.xdata))
227 | global_x_loc = event.xdata
228 | for i in range(len(axes)):
229 | if event.inaxes == axes[i]:
230 | print('you clicked {0}/{1} subplot'.format(i, len(axes) - 1))
231 | global_subplot_clicked = i
232 |
233 | def on_key(event):
234 | print('Key press:\'%s\'' %(event.key))
235 | if event.key == 'c': # create comment
236 | root = tkinter.Tk()
237 | root.withdraw()
238 | w = simpledialog.askstring("Title", "Enter the comment to annotate the last mouse click")
239 | if w != None:
240 | print(str(global_x_loc) + ":" + w)
241 | df_row = list(database[global_subplot_clicked]['data'][typetag].iloc[(database[global_subplot_clicked]['data'][typetag][x_axis_col] - global_x_loc).abs().argsort()[0],:])
242 | df_row[-1] = w
243 | with open (output_file_path, 'a', newline='') as f:
244 | writer = csv.writer(f)
245 | writer.writerow(df_row)
246 |
247 | if event.key == 'a': # auto y-lim
248 | auto_y_lim()
249 |
250 | if event.key == 't': # toggle rows & cols in plot
251 | global typetags_in_cols, num_rows, num_cols
252 | typetags_in_cols = not typetags_in_cols
253 | print('typetags_in_cols = ', typetags_in_cols)
254 | if (typetags_in_cols):
255 | num_rows = len(database)
256 | num_cols = len(signals['typetags'])
257 | else:
258 | num_rows = len(signals['typetags'])
259 | num_cols = len(database)
260 | plot_data()
261 | if event.key == 'r': # reset the plot and start over
262 | # this is a hack because 't' breaks home/reset
263 | global xlims
264 | xlims = []
265 | plot_data()
266 | if event.key == 'e': # Adds titles and labels to [e]ach subplot
267 | global label_each_subplot
268 | label_each_subplot = not label_each_subplot
269 | plot_data()
270 |
271 | plot_data()
272 |
273 |
274 |
275 |
--------------------------------------------------------------------------------
/py/examples/lsl_stream_viewer/README.md:
--------------------------------------------------------------------------------
1 | # LSL Stream Viewer Installation Instructions
2 | These instructions are for installing and using the [LSL Stream Viewer](https://github.com/intheon/stream_viewer).
3 | Getting the install dependencies right was a bit tricky, so the exact steps used are included here:
4 | - Install `Anaconda`
5 | - Open `Anaconda Prompt` as admin
6 | - Run the following commands in the `Anaconda Prompt`:
7 | ```
8 | conda create --clone base -n LSL-Stream-Viewer
9 | conda activate LSL-Stream-Viewer
10 | conda update --all
11 | conda update -n LSL-Stream-Viewer -c defaults conda
12 | conda install --force-reinstall pyqt
13 | conda install --force-reinstall qt
14 | pip install git+https://github.com/intheon/stream_viewer.git
15 | pip install stream_viewer
16 | pip install pandas==1.5.3
17 | ```
18 |
19 | # Running the LSL Stream Viewer
20 | - Open `EmotiBit Oscilloscope`
21 | - Connect to an EmotiBit and verify data is streaming
22 | - Dropdown the `Output List` at the top right of the window
23 | - Select `LSL`
24 | - Open `Anaconda Prompt` as admin and run:
25 | ```
26 | python -m stream_viewer.applications.main -s "[PATH_TO_INI_FILE]lsl_viewer_PPG.ini"
27 | ```
28 | - You can specify different .ini files to view different data streams. NOTE: viewing more than 6 or so streams at once in Stream Viewer tends to run VERY slow, which is why there are several .ini files for viewing all of the streams output by EmotiBit.
29 |
--------------------------------------------------------------------------------
/py/examples/lsl_stream_viewer/lsl_viewer_ACC_GYRO.ini:
--------------------------------------------------------------------------------
1 | [RendererDocksMain]
2 | LineVis%7CACC_X%7C0\dockWidgetArea=2
3 | LineVis%7CACC_X%7C0\size=@Size(870 165)
4 | LineVis%7CACC_X%7C0\pos=@Point(417 21)
5 | LineVis%7CACC_X%7C0\floating=false
6 | LineVis%7CACC_Y%7C0\dockWidgetArea=2
7 | LineVis%7CACC_Y%7C0\size=@Size(870 164)
8 | LineVis%7CACC_Y%7C0\pos=@Point(417 190)
9 | LineVis%7CACC_Y%7C0\floating=false
10 | LineVis%7CACC_Z%7C0\dockWidgetArea=2
11 | LineVis%7CACC_Z%7C0\size=@Size(870 165)
12 | LineVis%7CACC_Z%7C0\pos=@Point(417 358)
13 | LineVis%7CACC_Z%7C0\floating=false
14 | LineVis%7CGYRO_X%7C0\dockWidgetArea=2
15 | LineVis%7CGYRO_X%7C0\size=@Size(870 164)
16 | LineVis%7CGYRO_X%7C0\pos=@Point(417 527)
17 | LineVis%7CGYRO_X%7C0\floating=false
18 | LineVis%7CGYRO_Y%7C0\dockWidgetArea=2
19 | LineVis%7CGYRO_Y%7C0\size=@Size(870 165)
20 | LineVis%7CGYRO_Y%7C0\pos=@Point(417 695)
21 | LineVis%7CGYRO_Y%7C0\floating=false
22 | LineVis%7CGYRO_Z%7C0\dockWidgetArea=2
23 | LineVis%7CGYRO_Z%7C0\size=@Size(870 164)
24 | LineVis%7CGYRO_Z%7C0\pos=@Point(417 864)
25 | LineVis%7CGYRO_Z%7C0\floating=false
26 |
27 | [LineVis%7CACC_X%7C0]
28 | renderer=LineVis
29 | data_sources\0\class=LSLDataSource
30 | data_sources\0\identifier="{\"name\": \"ACC_X\", \"type\": \"AccelerometerX\"}"
31 | upper_limit=5
32 | lower_limit=-5
33 | highpass_cutoff=0
34 | plot_mode=Scroll
35 | duration=5
36 | auto_scale=none
37 | marker_scale=1
38 | font_size=10
39 | color_set=husl
40 | bg_color=black
41 | show_chan_labels=false
42 | draw_mode=line_strip
43 | columns=1
44 | vertical_markers=true
45 | stagger_markers=false
46 | x_offset=0.06
47 | y_offset=0
48 | width=0.94
49 | height=1
50 |
51 | [LineVis%7CACC_Y%7C0]
52 | renderer=LineVis
53 | data_sources\0\class=LSLDataSource
54 | data_sources\0\identifier="{\"name\": \"ACC_Y\", \"type\": \"AccelerometerY\"}"
55 | upper_limit=5
56 | lower_limit=-5
57 | highpass_cutoff=0
58 | plot_mode=Scroll
59 | duration=5
60 | auto_scale=none
61 | marker_scale=1
62 | font_size=10
63 | color_set=husl
64 | bg_color=black
65 | show_chan_labels=false
66 | draw_mode=line_strip
67 | columns=1
68 | vertical_markers=true
69 | stagger_markers=false
70 | x_offset=0.06
71 | y_offset=0
72 | width=0.94
73 | height=1
74 |
75 | [LineVis%7CACC_Z%7C0]
76 | renderer=LineVis
77 | data_sources\0\class=LSLDataSource
78 | data_sources\0\identifier="{\"name\": \"ACC_Z\", \"type\": \"AccelerometerZ\"}"
79 | upper_limit=5
80 | lower_limit=-5
81 | highpass_cutoff=0
82 | plot_mode=Scroll
83 | duration=5
84 | auto_scale=none
85 | marker_scale=1
86 | font_size=10
87 | color_set=husl
88 | bg_color=black
89 | show_chan_labels=false
90 | draw_mode=line_strip
91 | columns=1
92 | vertical_markers=true
93 | stagger_markers=false
94 | x_offset=0.06
95 | y_offset=0
96 | width=0.94
97 | height=1
98 |
99 | [LineVis%7CGYRO_X%7C0]
100 | renderer=LineVis
101 | data_sources\0\class=LSLDataSource
102 | data_sources\0\identifier="{\"name\": \"GYRO_X\", \"type\": \"GyroscopeX\"}"
103 | upper_limit=1000
104 | lower_limit=-1000
105 | highpass_cutoff=0
106 | plot_mode=Scroll
107 | duration=5
108 | auto_scale=none
109 | marker_scale=1
110 | font_size=10
111 | color_set=husl
112 | bg_color=black
113 | show_chan_labels=false
114 | draw_mode=line_strip
115 | columns=1
116 | vertical_markers=true
117 | stagger_markers=false
118 | x_offset=0.06
119 | y_offset=0
120 | width=0.94
121 | height=1
122 |
123 | [LineVis%7CGYRO_Y%7C0]
124 | renderer=LineVis
125 | data_sources\0\class=LSLDataSource
126 | data_sources\0\identifier="{\"name\": \"GYRO_Y\", \"type\": \"GyroscopeY\"}"
127 | upper_limit=1000
128 | lower_limit=-1000
129 | highpass_cutoff=0
130 | plot_mode=Scroll
131 | duration=5
132 | auto_scale=none
133 | marker_scale=1
134 | font_size=10
135 | color_set=husl
136 | bg_color=black
137 | show_chan_labels=false
138 | draw_mode=line_strip
139 | columns=1
140 | vertical_markers=true
141 | stagger_markers=false
142 | x_offset=0.06
143 | y_offset=0
144 | width=0.94
145 | height=1
146 |
147 | [LineVis%7CGYRO_Z%7C0]
148 | renderer=LineVis
149 | data_sources\0\class=LSLDataSource
150 | data_sources\0\identifier="{\"name\": \"GYRO_Z\", \"type\": \"GyroscopeZ\"}"
151 | upper_limit=1000
152 | lower_limit=-1000
153 | highpass_cutoff=0
154 | plot_mode=Scroll
155 | duration=5
156 | auto_scale=none
157 | marker_scale=1
158 | font_size=10
159 | color_set=husl
160 | bg_color=black
161 | show_chan_labels=false
162 | draw_mode=line_strip
163 | columns=1
164 | vertical_markers=true
165 | stagger_markers=false
166 | x_offset=0.06
167 | y_offset=0
168 | width=0.94
169 | height=1
170 |
--------------------------------------------------------------------------------
/py/examples/lsl_stream_viewer/lsl_viewer_EDA.ini:
--------------------------------------------------------------------------------
1 | [RendererDocksMain]
2 | LineVis%7CEDA%7C0\dockWidgetArea=2
3 | LineVis%7CEDA%7C0\size=@Size(870 289)
4 | LineVis%7CEDA%7C0\pos=@Point(417 739)
5 | LineVis%7CEDA%7C0\floating=false
6 | BarPG%7CSCR_AMP%7C0\dockWidgetArea=2
7 | BarPG%7CSCR_AMP%7C0\size=@Size(870 235)
8 | BarPG%7CSCR_AMP%7C0\pos=@Point(417 21)
9 | BarPG%7CSCR_AMP%7C0\floating=false
10 | BarPG%7CSCR_RIS%7C0\dockWidgetArea=2
11 | BarPG%7CSCR_RIS%7C0\size=@Size(870 236)
12 | BarPG%7CSCR_RIS%7C0\pos=@Point(417 499)
13 | BarPG%7CSCR_RIS%7C0\floating=false
14 | BarPG%7CSCR_FREQ%7C0\dockWidgetArea=2
15 | BarPG%7CSCR_FREQ%7C0\size=@Size(870 235)
16 | BarPG%7CSCR_FREQ%7C0\pos=@Point(417 260)
17 | BarPG%7CSCR_FREQ%7C0\floating=false
18 |
19 | [LineVis%7CEDA%7C0]
20 | renderer=LineVis
21 | data_sources\0\class=LSLDataSource
22 | data_sources\0\identifier="{\"name\": \"EDA\", \"type\": \"EDA\"}"
23 | upper_limit=1
24 | lower_limit=0
25 | highpass_cutoff=0
26 | plot_mode=Scroll
27 | duration=5
28 | auto_scale=By-Channel
29 | marker_scale=1
30 | font_size=10
31 | color_set=husl
32 | bg_color=black
33 | show_chan_labels=false
34 | draw_mode=line_strip
35 | columns=1
36 | vertical_markers=true
37 | stagger_markers=false
38 | x_offset=0.06
39 | y_offset=0
40 | width=0.94
41 | height=1
42 |
43 | [BarPG%7CSCR_AMP%7C0]
44 | renderer=BarPG
45 | data_sources\0\class=LSLDataSource
46 | data_sources\0\identifier="{\"name\": \"SCR_AMP\", \"type\": \"SCRAmplitude\"}"
47 | upper_limit=0.01
48 | lower_limit=0
49 | highpass_cutoff=0
50 | plot_mode=Scrolling
51 | duration=2
52 | auto_scale=none
53 | bar_width=6
54 |
55 | [BarPG%7CSCR_RIS%7C0]
56 | renderer=BarPG
57 | data_sources\0\class=LSLDataSource
58 | data_sources\0\identifier="{\"name\": \"SCR_RIS\", \"type\": \"SCRRiseTime\"}"
59 | upper_limit=1.5
60 | lower_limit=0
61 | highpass_cutoff=0
62 | plot_mode=Scrolling
63 | duration=2
64 | auto_scale=none
65 | bar_width=6
66 |
67 | [BarPG%7CSCR_FREQ%7C0]
68 | renderer=BarPG
69 | data_sources\0\class=LSLDataSource
70 | data_sources\0\identifier="{\"name\": \"SCR_FREQ\", \"type\": \"SCRFrequency\"}"
71 | upper_limit=20
72 | lower_limit=0
73 | highpass_cutoff=0
74 | plot_mode=Scrolling
75 | duration=2
76 | auto_scale=none
77 | bar_width=6
78 |
--------------------------------------------------------------------------------
/py/examples/lsl_stream_viewer/lsl_viewer_MAG_TEMP.ini:
--------------------------------------------------------------------------------
1 | [RendererDocksMain]
2 | LineVis%7CMAG_X%7C0\dockWidgetArea=2
3 | LineVis%7CMAG_X%7C0\size=@Size(870 173)
4 | LineVis%7CMAG_X%7C0\pos=@Point(417 21)
5 | LineVis%7CMAG_X%7C0\floating=false
6 | LineVis%7CMAG_Y%7C0\dockWidgetArea=2
7 | LineVis%7CMAG_Y%7C0\size=@Size(870 178)
8 | LineVis%7CMAG_Y%7C0\pos=@Point(417 198)
9 | LineVis%7CMAG_Y%7C0\floating=false
10 | LineVis%7CMAG_Z%7C0\dockWidgetArea=2
11 | LineVis%7CMAG_Z%7C0\size=@Size(870 252)
12 | LineVis%7CMAG_Z%7C0\pos=@Point(417 380)
13 | LineVis%7CMAG_Z%7C0\floating=false
14 | LineVis%7CTEMP1%7C0\dockWidgetArea=2
15 | LineVis%7CTEMP1%7C0\size=@Size(870 198)
16 | LineVis%7CTEMP1%7C0\pos=@Point(417 636)
17 | LineVis%7CTEMP1%7C0\floating=false
18 | LineVis%7CTHERM%7C0\dockWidgetArea=2
19 | LineVis%7CTHERM%7C0\size=@Size(870 190)
20 | LineVis%7CTHERM%7C0\pos=@Point(417 838)
21 | LineVis%7CTHERM%7C0\floating=false
22 |
23 | [LineVis%7CMAG_X%7C0]
24 | renderer=LineVis
25 | data_sources\0\class=LSLDataSource
26 | data_sources\0\identifier="{\"name\": \"MAG_X\", \"type\": \"MagnetometerX\"}"
27 | upper_limit=1
28 | lower_limit=0
29 | highpass_cutoff=0
30 | plot_mode=Sweep
31 | duration=5
32 | auto_scale=By-Channel
33 | marker_scale=1
34 | font_size=10
35 | color_set=husl
36 | bg_color=black
37 | show_chan_labels=false
38 | draw_mode=line_strip
39 | columns=1
40 | vertical_markers=true
41 | stagger_markers=false
42 | x_offset=0.06
43 | y_offset=0
44 | width=0.94
45 | height=1
46 |
47 | [LineVis%7CMAG_Y%7C0]
48 | renderer=LineVis
49 | data_sources\0\class=LSLDataSource
50 | data_sources\0\identifier="{\"name\": \"MAG_Y\", \"type\": \"MagnetometerY\"}"
51 | upper_limit=1
52 | lower_limit=0
53 | highpass_cutoff=0
54 | plot_mode=Scroll
55 | duration=5
56 | auto_scale=By-Channel
57 | marker_scale=1
58 | font_size=10
59 | color_set=husl
60 | bg_color=black
61 | show_chan_labels=false
62 | draw_mode=line_strip
63 | columns=1
64 | vertical_markers=true
65 | stagger_markers=false
66 | x_offset=0.06
67 | y_offset=0
68 | width=0.94
69 | height=1
70 |
71 | [LineVis%7CMAG_Z%7C0]
72 | renderer=LineVis
73 | data_sources\0\class=LSLDataSource
74 | data_sources\0\identifier="{\"name\": \"MAG_Z\", \"type\": \"MagnetometerZ\"}"
75 | upper_limit=1
76 | lower_limit=0
77 | highpass_cutoff=0
78 | plot_mode=Scroll
79 | duration=5
80 | auto_scale=By-Channel
81 | marker_scale=1
82 | font_size=10
83 | color_set=husl
84 | bg_color=black
85 | show_chan_labels=false
86 | draw_mode=line_strip
87 | columns=1
88 | vertical_markers=true
89 | stagger_markers=false
90 | x_offset=0.06
91 | y_offset=0
92 | width=0.94
93 | height=1
94 |
95 | [LineVis%7CTEMP1%7C0]
96 | renderer=LineVis
97 | data_sources\0\class=LSLDataSource
98 | data_sources\0\identifier="{\"name\": \"TEMP1\", \"type\": \"Temperature\"}"
99 | upper_limit=1
100 | lower_limit=0
101 | highpass_cutoff=0
102 | plot_mode=Scroll
103 | duration=5
104 | auto_scale=By-Channel
105 | marker_scale=1
106 | font_size=10
107 | color_set=husl
108 | bg_color=black
109 | show_chan_labels=false
110 | draw_mode=line_strip
111 | columns=1
112 | vertical_markers=true
113 | stagger_markers=false
114 | x_offset=0.06
115 | y_offset=0
116 | width=0.94
117 | height=1
118 |
119 | [LineVis%7CTHERM%7C0]
120 | renderer=LineVis
121 | data_sources\0\class=LSLDataSource
122 | data_sources\0\identifier="{\"name\": \"THERM\", \"type\": \"Thermopile\"}"
123 | upper_limit=1
124 | lower_limit=0
125 | highpass_cutoff=0
126 | plot_mode=Scroll
127 | duration=5
128 | auto_scale=By-Channel
129 | marker_scale=1
130 | font_size=10
131 | color_set=husl
132 | bg_color=black
133 | show_chan_labels=false
134 | draw_mode=line_strip
135 | columns=1
136 | vertical_markers=true
137 | stagger_markers=false
138 | x_offset=0.06
139 | y_offset=0
140 | width=0.94
141 | height=1
142 |
--------------------------------------------------------------------------------
/py/examples/lsl_stream_viewer/lsl_viewer_PPG.ini:
--------------------------------------------------------------------------------
1 | [RendererDocksMain]
2 | LineVis%7CPPG_RED%7C0\dockWidgetArea=2
3 | LineVis%7CPPG_RED%7C0\size=@Size(870 149)
4 | LineVis%7CPPG_RED%7C0\pos=@Point(387 451)
5 | LineVis%7CPPG_RED%7C0\floating=false
6 | LineVis%7CPPG_IR%7C0\dockWidgetArea=2
7 | LineVis%7CPPG_IR%7C0\size=@Size(870 148)
8 | LineVis%7CPPG_IR%7C0\pos=@Point(387 299)
9 | LineVis%7CPPG_IR%7C0\floating=false
10 | LineVis%7CPPG_GRN%7C0\dockWidgetArea=2
11 | LineVis%7CPPG_GRN%7C0\size=@Size(870 149)
12 | LineVis%7CPPG_GRN%7C0\pos=@Point(387 146)
13 | LineVis%7CPPG_GRN%7C0\floating=false
14 | BarPG%7CHR%7C0\dockWidgetArea=2
15 | BarPG%7CHR%7C0\size=@Size(870 121)
16 | BarPG%7CHR%7C0\pos=@Point(387 21)
17 | BarPG%7CHR%7C0\floating=false
18 |
19 | [LineVis%7CPPG_RED%7C0]
20 | renderer=LineVis
21 | data_sources\0\class=LSLDataSource
22 | data_sources\0\identifier="{\"name\": \"PPG_RED\", \"type\": \"PPGRed\"}"
23 | upper_limit=1
24 | lower_limit=0
25 | highpass_cutoff=0
26 | plot_mode=Scroll
27 | duration=5
28 | auto_scale=By-Channel
29 | marker_scale=1
30 | font_size=10
31 | color_set=husl
32 | bg_color=black
33 | show_chan_labels=false
34 | draw_mode=line_strip
35 | columns=1
36 | vertical_markers=true
37 | stagger_markers=false
38 | x_offset=0.06
39 | y_offset=0
40 | width=0.94
41 | height=1
42 |
43 | [LineVis%7CPPG_IR%7C0]
44 | renderer=LineVis
45 | data_sources\0\class=LSLDataSource
46 | data_sources\0\identifier="{\"name\": \"PPG_IR\", \"type\": \"PPGInfrared\"}"
47 | upper_limit=1
48 | lower_limit=0
49 | highpass_cutoff=0
50 | plot_mode=Scroll
51 | duration=5
52 | auto_scale=By-Channel
53 | marker_scale=1
54 | font_size=10
55 | color_set=husl
56 | bg_color=black
57 | show_chan_labels=false
58 | draw_mode=line_strip
59 | columns=1
60 | vertical_markers=true
61 | stagger_markers=false
62 | x_offset=0.06
63 | y_offset=0
64 | width=0.94
65 | height=1
66 |
67 | [LineVis%7CPPG_GRN%7C0]
68 | renderer=LineVis
69 | data_sources\0\class=LSLDataSource
70 | data_sources\0\identifier="{\"name\": \"PPG_GRN\", \"type\": \"PPGGreen\"}"
71 | upper_limit=1
72 | lower_limit=0
73 | highpass_cutoff=0
74 | plot_mode=Scroll
75 | duration=5
76 | auto_scale=By-Channel
77 | marker_scale=1
78 | font_size=10
79 | color_set=husl
80 | bg_color=black
81 | show_chan_labels=false
82 | draw_mode=line_strip
83 | columns=1
84 | vertical_markers=true
85 | stagger_markers=false
86 | x_offset=0.06
87 | y_offset=0
88 | width=0.94
89 | height=1
90 |
91 | [BarPG%7CHR%7C0]
92 | renderer=BarPG
93 | data_sources\0\class=LSLDataSource
94 | data_sources\0\identifier="{\"name\": \"HR\", \"type\": \"HeartRate\"}"
95 | upper_limit=180
96 | lower_limit=0
97 | highpass_cutoff=0
98 | plot_mode=Scrolling
99 | duration=2
100 | auto_scale=none
101 | bar_width=6
102 |
103 | [MainWindow]
104 | fullScreen=false
105 | maximized=false
106 | size=@Size(1257 600)
107 | pos=@Point(1713 295)
108 |
109 | [StreamStatus]
110 | dockWidgetArea=1
111 | size=@Size(383 579)
112 | pos=@Point(0 21)
113 | floating=false
114 |
--------------------------------------------------------------------------------
/py/examples/periodizer_example/coincidence_sum_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Example using emotibit.signal.periodizer to assess the coincidence of multiple
4 | aperiodic signals and write to file a metric quantifying the coincidence.
5 |
6 | @author: consu
7 | """
8 |
9 | import emotibit.signal as ebsig
10 | import numpy as np
11 | import pandas as pd
12 | import scipy.signal as scisig
13 | import matplotlib.pyplot as plt
14 |
15 | try:
16 | import IPython
17 | IPython.get_ipython().magic("matplotlib qt")
18 | except:
19 | plt.ion()
20 |
21 | def butter_lowpass_filter(data, fc, fs, order=4):
22 | # Source: https://github.com/guillaume-chevalier/filtering-stft-and-laplace-transform
23 | w = fc / (fs / 2) # Normalize the frequency
24 | b, a = scisig.butter(5, w, 'low')
25 | y = scisig.filtfilt(b, a, data)
26 | return y
27 |
28 |
29 | # **** Enter parameters here ****
30 | # Removing data directories with super short recordings and no LSL Markers
31 | data_dirs = [
32 | #r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Mike/2023-04-16_11-51-49-659678',
33 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Mike/2023-04-16_15-42-05-608122',
34 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Mike/2023-04-16_16-20-45-696359',
35 | #r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Bob/2023-04-16_11-50-07-982348',
36 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Bob/2023-04-16_15-42-28-096912',
37 | #r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Jared/2023-04-16_11-47-34-166828',
38 | #r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Jared/2023-04-16_11-47-48-309342',
39 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Jared/2023-04-16_15-53-49-307046',
40 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Jared/2023-04-16_16-21-18-728199',
41 | #r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/John/2023-04-16_11-50-45-628347',
42 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/John/2023-04-16_15-43-36-324819',
43 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/John/2023-04-16_16-21-22-226004',
44 | #r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Diane/2023-04-16_11-49-17-193487',
45 | r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Diane/2023-04-16_15-46-43-622974',
46 | ]
47 |
48 | people = [
49 | {
50 | 'name':'Mike',
51 | 'plot_color':'r'
52 | },
53 | {
54 | 'name':'Bob',
55 | 'plot_color':'b'
56 | },
57 | {
58 | 'name':'Jared',
59 | 'plot_color':'g'
60 | },
61 | {
62 | 'name':'John',
63 | 'plot_color':'k'
64 | },
65 | {
66 | 'name':'Diane',
67 | 'plot_color':'m'
68 | }
69 | ]
70 | groups = [
71 | {
72 | 'names':['Mike'],
73 | 'plot_color':'r'
74 | },
75 | {
76 | 'names':['Mike', 'Bob'],
77 | 'plot_color':'b'
78 | },
79 | {
80 | 'names':['Mike', 'Bob', 'Jared', 'John', 'Diane'],
81 | 'plot_color':'g'
82 | }
83 | ]
84 |
85 | #data_dir = r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Mike/'
86 | #data_file_base = '2023-04-16_15-42-05-608122'
87 | multiflow_output_dir = 'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/'
88 | data_typetag = 'D0'
89 | ref_typetag = 'EA'
90 | fs = 15
91 | # cuttoff and threshold determined to result in ~1 minute window
92 | #cutoff_freq = 0.33
93 | win_len = 120 # seconds
94 | #cutoff_freq = 0.05
95 | #threshold = cutoff_freq / 7.5
96 | threshold = 1 / win_len / fs
97 | #output_typetag = 'D0_filt' + str(cutoff_freq) + 'Hz' + '_th' + str(threshold)
98 | output_typetag = 'D0_win' + str(win_len) + 'sec'
99 | t_col_name = "LslMarkerSourceTimestamp"
100 | t_start = 405670
101 | t_end = 410600
102 | # **** End parameters ****
103 |
104 | # Global variables
105 | t_starts = []
106 | t_ends = []
107 | # End global variables
108 |
109 | # Find beginning and end times of all files
110 | if t_start < 0 or t_end < 0:
111 | for temp_dir in data_dirs:
112 | data_dir, data_file_base = temp_dir.rsplit('/', 1)
113 | ref_file_path = data_dir + '/' + data_file_base + '/' + data_file_base + '_' + ref_typetag + '.csv'
114 | temp = pd.read_csv(ref_file_path)
115 | t_starts.append(temp.loc[0, t_col_name])
116 | t_ends.append(temp.loc[len(temp) - 1, t_col_name])
117 |
118 | if t_start < 0:
119 | t_starts = np.array(t_starts)
120 | t_start = min(t_starts[t_starts > 0])
121 | if t_end < 0:
122 | t_ends = np.array(t_ends)
123 | t_end = max(t_ends[t_ends > 0])
124 |
125 | # Cycle through all data files to create periodized data
126 | per_data = []
127 | for temp_dir in data_dirs:
128 | data_dir, person, data_file_base = temp_dir.rsplit('/', 2)
129 | plot_color = ''
130 | person_ind = -1
131 | temp_ind = -1
132 | for p in people:
133 | temp_ind = temp_ind + 1
134 | if p['name'] == person:
135 | person_ind = temp_ind
136 | plot_color = p['plot_color']
137 | if person_ind >= 0:
138 | data_file_path = data_dir + '/' + person + '/' + data_file_base+ '/' + data_file_base + '_' + data_typetag + '.csv'
139 | data = pd.read_csv(data_file_path)
140 |
141 | per_data.append(ebsig.periodize(data,t_col_name,fs,t_start,0,t_end))
142 | # ToDo save individual processed files
143 |
144 |
145 | plt.clf()
146 | fig1, axes1 = plt.subplots(len(people) + 1,1,sharex=True)
147 | fig1.suptitle('Pedal Presses (win_len=' + str(win_len) + 'sec)', fontsize=16)
148 | #axes1.clear()
149 | coin_sum = np.array([])
150 | # Cycle through all data files to create coincidence sum
151 | for i in range(0, len(per_data)):
152 | #data = per_data[i][data_typetag]
153 | #press = np.diff(data)
154 | #output_df = ebsig.periodize(data,t_col_name,fs,t_start,0,t_end)
155 | temp_dir = data_dirs[i]
156 | data_dir, person, data_file_base = temp_dir.rsplit('/', 2)
157 |
158 | person_ind = -1
159 | temp_ind = -1
160 | for p in people:
161 | temp_ind = temp_ind + 1
162 | if p['name'] == person:
163 | person_ind = temp_ind
164 | plot_color = p['plot_color']
165 |
166 | output_df = per_data[i]
167 | temp2 = output_df[data_typetag]
168 | #axes1[person_ind].plot(temp2, color='gold')
169 | axes1[person_ind].plot(output_df[t_col_name], temp2, color='gold')
170 | #win_m = int(fs / cutoff_freq / 2) * 2 + 1 # create a window size that's not a mult of 2
171 | win_m = int(win_len / 2) * 2 + 1 # create a window size that's not a mult of 2
172 | temp2 = np.convolve(temp2, np.ones(win_m) / win_m)
173 | temp2 = temp2[range(int(win_m / 2), len(temp2) - int(win_m / 2))]
174 | #temp2 = butter_lowpass_filter(temp2, cutoff_freq, fs, order=1)
175 | #axes1[person_ind].plot( temp2, color='orange')
176 | axes1[person_ind].plot(output_df[t_col_name], temp2, color='orange')
177 | temp2 = (temp2 > threshold )* 1
178 | #axes1[person_ind].plot(temp2, color=plot_color, label=person)
179 | axes1[person_ind].plot(output_df[t_col_name], temp2, color=plot_color)
180 | axes1[person_ind].set_ylabel(person)
181 | #output_df[data_typetag] = temp2
182 | if (len(coin_sum) == 0):
183 | coin_sum = temp2
184 | else:
185 | coin_sum = coin_sum + temp2
186 |
187 | #output_file_path = data_dir + data_file_base + '/' + data_file_base + '_' + output_typetag + '.csv'
188 | #output_df.to_csv(output_file_path, index=False)
189 |
190 | axes1[len(people)].plot(per_data[0][t_col_name], coin_sum, 'gray')
191 | axes1[len(people)].set_ylim(0, len(people))
192 | axes1[len(people)].set_ylabel('Multi-Flow')
193 |
194 | multiflow_data = per_data[0]
195 | multiflow_data[data_typetag] = coin_sum
196 | output_file_path = multiflow_output_dir + '/' + 'MultiFlow_All' + '_' + output_typetag + '.csv'
197 | multiflow_data.to_csv(output_file_path, index=False)
198 |
199 | output_df.to_csv(output_file_path, index=False)
200 |
201 |
--------------------------------------------------------------------------------
/py/examples/periodizer_example/periodizer_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Basic example using emotibit.signal.periodizer to transform an aperiodic signal
4 | `D0` into a periodic signal to perform filtering and other DSP operations
5 |
6 | @author: consu
7 | """
8 |
9 | import emotibit.signal as ebsig
10 | import pandas as pd
11 | import scipy.signal as scisig
12 | import matplotlib.pyplot as plt
13 |
14 | try:
15 | import IPython
16 | IPython.get_ipython().magic("matplotlib qt")
17 | except:
18 | plt.ion()
19 |
20 | def butter_lowpass_filter(data, fc, fs, order=4):
21 | # Source: https://github.com/guillaume-chevalier/filtering-stft-and-laplace-transform
22 | w = fc / (fs / 2) # Normalize the frequency
23 | b, a = scisig.butter(5, w, 'low')
24 | y = scisig.filtfilt(b, a, data)
25 | return y
26 |
27 | data_dir = r'G:/.shortcut-targets-by-id/1KogPeL5zzT7nFPtEZ5wjIY4poPyVxgWN/EmotiBit Test Data/XenboX/XenboX at TRI 2023-04-16/Data/Mike/'
28 | data_file_base = '2023-04-16_15-42-05-608122'
29 | data_typetag = 'D0'
30 | ref_typetag = 'EA'
31 | fs = 15
32 | cutoff_freq = 0.33
33 | threshold = cutoff_freq / 2
34 | output_typetag = 'D0_filt' + str(cutoff_freq) + 'Hz' + '_th' + str(threshold)
35 | t_col_name = "LslMarkerSourceTimestamp"
36 |
37 |
38 | data_file_path = data_dir + data_file_base + '/' + data_file_base + '_' + data_typetag + '.csv'
39 | ref_file_path = data_dir + data_file_base + '/' + data_file_base + '_' + ref_typetag + '.csv'
40 | data = pd.read_csv(data_file_path)
41 | temp = pd.read_csv(ref_file_path)
42 | t_start = temp.loc[0, t_col_name]
43 | t_end = temp.loc[len(temp) - 1, t_col_name]
44 |
45 | output_df = ebsig.periodize(data,t_col_name,fs,t_start,0,t_end)
46 |
47 | temp2 = output_df[data_typetag]
48 | plt.plot(temp2)
49 | temp2 = butter_lowpass_filter(temp2, cutoff_freq, fs, order=1)
50 | plt.plot(temp2)
51 | temp2 = temp2 > (threshold)
52 | plt.plot(temp2)
53 | output_df[data_typetag] = temp2 * 1
54 |
55 | output_file_path = data_dir + data_file_base + '/' + data_file_base + '_' + output_typetag + '.csv'
56 | output_df.to_csv(output_file_path, index=False)
57 |
--------------------------------------------------------------------------------
/py/examples/tapdetector_example/ExampleDataForTapDetector.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/py/examples/tapdetector_example/ExampleDataForTapDetector.zip
--------------------------------------------------------------------------------
/py/examples/tapdetector_example/README.md:
--------------------------------------------------------------------------------
1 | # Tap Detector
2 |
3 | ## About
4 | The tap detector is designed to take input from two different sources of data and to "align" them together by detecting taps between the two devices. For example, two devices recording at the same time can be tapped against each other. Then, the resulting data files from both devices can be used with the tap detector to find the corresponding taps in either file so that their timestamps can be aligned. Data smoothing is done using a hann filter, whose window is specified by the user for each source of data.
5 |
6 | Within this directory, there are three examples:
7 |
8 | ```tapdetector_example.py``` which shows how to use the tapDetector in full.
9 |
10 | ```tapdetector_extractdata_example.py``` which shows how to use the individual ```extract_data()``` function included.
11 |
12 | ```tapdetector_loaddata_example.py``` which shows how to use the individual ```load_data()``` function included.
13 |
14 | The tapdetector was written and tested using a specific anaconda environment, see the quick start example for more information on finding and using this environment.
15 |
16 | ## Usage
17 |
18 | ### Quick start example using sample data
19 | There is sample data to use with the tapDetector provided in ```ExampleDataForTapDetector.zip```. This section will show you how to use that data in the tapDetector.
20 |
21 | After extracting the data from the .zip file, ensure that all the sample data files are your working directory. Then, ensure that you have enabled the ```EmotiBit-pyenv.yml``` anaconda environment. This environment can be found in the ```EmotiBit-pyenv.yml``` file found [in EmotiBit_Biometric_Lib/py/anaconda-environments/](../../anaconda-environments).
22 |
23 | Once you have extracted the sample data and activated your anaconda environment, you can run the tapdetector with the sample data with the following command (ensure that your current working directory is the same as where the sample data files are located, we recommend making this somewhere that is **not** within the EmotiBit repository to clutter/conflicting file names):
24 |
25 | ```python -sf .\emotibit5_AX.csv .\emotibit5_AY.csv .\emotibit5_AZ.csv -sd 3 -st LocalTimestamp -soa AX AY AZ -o taps5```
26 |
27 | Note: Not all python installations on all platforms use the ```python``` command, if using ```python``` does not work on your system, use ```py``` or whatever the appropriate command is for your system.
28 |
29 | An explanation of arguments in this example:
30 | - ```-sf .\emotibit5_AX.csv .\emotibit5_AY.csv .\emotibit5_AZ.csv``` is used to specify the names of the files where the data can be found.
31 | - ```-sd 3``` is used to specify that there are three dimensions of data.
32 | - ```-st LocalTimestamp``` is used to indicate the name of the timestamp column.
33 | - ```-sa AX AY AZ``` is used to provide the names of the columns of the data in each of the files provided.
34 | - ```-o taps5``` is used to give a filename to the validation plot. An extension is not given as it is automatically appended.
35 |
36 | The above command was tuned to work well with the files provided. After running the tapdetector, you will have 3 outputed files in your working directory: a .png showing the tap detection, and a .csv file recording the tap information.
37 |
38 | ### CLI Options
39 |
40 | To access the full list of command line options available when using the tapDetector, run the tapDetector using the -h flag.
41 |
42 | ### Notes on data compatibility
43 |
44 | To be more accommodating to different devices, the tapdetector allows different input sources to have their data spread across different numbers of files. All files from one source must share the same name for their timestamp column.
45 |
46 | For the best outcome, it is best that you data is padded with 30 seconds between sets of taps for data to return to baseline. At least 2 sets of taps are recommended.
47 |
48 | Additonally, ensure that you have already performed any processing necessary on the data. Some devices may record data in a format the can be immediately used, but some devices (such as EmotiBit) require a parsing/processing step. The tap detector requires that the data being input has the Acceleration Data and Timestamps available, and that the first row in each file is the headers of the data.
49 |
50 | This section details how to handle different situations with the tapdetector:
51 |
52 | ### All my dimensions of data are in one file
53 | - Provide **exactly** one filename
54 | - Provide **exactly** one timestamp column name
55 | - Provide the correct number of dimensions
56 | - Provide a matching number of data column names
57 |
58 | Example: ```-sf .\cyton.csv -std 3 -st " Timestamp" -sa " Accel Channel 0" " Accel Channel 1" " Accel Channel 2"```
59 |
60 | In this situation, the ```cyton.csv``` file holds all of the columns of data, ``` Accel Channel 0```, ``` Accel Channel 1```, ``` Accel Channel 2``` and ``` Timestamp```.
61 |
62 | ### All my dimensions of data are spread across multiple files
63 | - Provide the correct number of dimensions
64 | - Provide the filename of every dimension*
65 | - Provide the data column name of every dimensions
66 | - Provide **exactly** one timestamp column name
67 |
68 | *This means that you need to explicitly say which filename is needed for each column, even if this means providing the same filename more than once.
69 |
70 | Example: ```-sf .\emotibit_AX.csv .\emotibit_AY.csv .\emotibit_AZ.csv -sd 3 -sot LocalTimestamp -sa AX AY AZ```
71 |
72 | In this situation, the ```emotibit_AX.csv``` file holds the ```AX``` column, the ```emotibit_AY.csv``` file holds the ```AY``` column, and the ```emotibit_AZ.csv``` holds the ```AZ``` file. All files contain the ```LocalTimestamp``` column that has the timestamps of the data.
73 |
74 | ### I have two files, but 3 columns of data
75 | This is handled the same way as the above situation "All my dimensions of data are spread across multiple files". Here we will provide an explicit example of how a command would look like in this situation:
76 |
77 | Example: ```-sf .\emotibit_AX_AY.csv .\emotibit_AX_AY.csv .\emotibit_AZ.csv -sd 3 -sot LocalTimestamp -sa AX AY AZ```
78 |
79 | In this situation, the ```AX``` and ```AY``` columns are both held in the ```emotibit_AX_AY.csv``` file and the ```AZ``` column is held in the ```emotibit_AZ.csv``` file.
80 |
81 | ## Completing Your Own Experiment with the Tap Detector
82 | This section gives an overview of a full experiment using the tapdetector and properly collecting data.
83 |
84 | Data should be collected from two sources simultaneously. The following is a set of generic steps to take for data collection of any two devices to use in the tap detector, you may need to take additional steps depending on the devices you are using and the best practices for using those devices.
85 |
86 | 1. **Setup your devices** - Ensure that you have two devices to record data with that are properly setup and ready to be used for data collection.
87 | 1. **Open recording software(s)** - Start the data collection software for each device. Ensure that both devices have properly connected to their respective data collection software and are properly configured.
88 | 1. **Begin data recording** - Begin recording data on both devices. It is recommened to try and begin recording on both devices simultaneously, as this will make validation that the files have lined up properly easier, but it is not strictly required.
89 | 1. **Get devices into position** - Pick up both devices and hold them still, allowing the data settle to a baseline for at least 30 seconds. Do not make any sudden movements during this time.
90 | 1. **Perform the taps** - Quickly and firmly hit the devices against each other 3 times, making contact approximately once per second. The taps need to be strong enough to make a noticeable "spike" in the accelerometer data. The more pronounced and clear the taps are as compared to the baseline data and other noise that may be surrounding the taps, the better the tap detector will be able to identify the taps. **WARNING: While hitting the devices together strongly is important, take care to not damage the devices. For example, tap a connector on one board against the side of another board, avoid hitting chips or other electronics on either board to prevent damage.**
91 | 1. **Return to Baseline** - Allow the data to return to baseline again by holding the devices still for another 30 seconds. Steps 4 and 5 may be repeated as necessary to ensure sufficient data collection. It is recommened to collect at least 2 sets of taps.
92 | 1. **End data recording** - End the recording on both devices. Perform any processing necessary on the data. Some devices may record data in a format the can be immediately used, but some devices (such as EmotiBit) require a parsing/processing step. The tap detector requires that the data being input has the Acceleration Data and Timestamps available.
93 |
94 | Now that you have your data recorded, you can use them in the tapdetector as shown in the above sections.
--------------------------------------------------------------------------------
/py/examples/tapdetector_example/tapdetector_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon Jan 29 2024
3 | An example of how to use the tap detector functions in code.
4 | CLI usage is also supported and documentation can be found in README
5 | """
6 |
7 | from tapdetector import detectTaps
8 |
9 | """
10 | In this example EmotiBit files are used.
11 | Emotibit stores the 3 dimensions of acceleration data in 3 separate files.
12 | This file will give an example of how to use the tap detector in this situation
13 |
14 | Additional information about using the tap detector can be found using CLI usage
15 | via the -h command when running tapdetector.py:
16 | ./py tapdetector.py -h
17 | """
18 |
19 |
20 | def main():
21 |
22 | source_list = [
23 | (".\emotibit4_AX.csv", "LocalTimestamp", "AX"),
24 | (".\emotibit4_AY.csv", "LocalTimestamp", "AY"),
25 | (".\emotibit4_AZ.csv", "LocalTimestamp", "AZ"),
26 | ]
27 |
28 | detectTaps(source_list=source_list,
29 | time_window=[20, 40],
30 | heightone=0.25,
31 | window=1,
32 | output_file="taps",)
33 |
34 |
35 | if __name__ == "__main__":
36 | main()
37 |
--------------------------------------------------------------------------------
/py/examples/tapdetector_example/tapdetector_extractdata_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 |
4 | @author: pam
5 |
6 | This example demonstrates how the extract_data() function found in the tapdetector can be used.
7 | The function is used to extract and organize data from a single file, meaning that it both
8 | reads in the file, but also extracts the desired information from the file.
9 |
10 | The result of calling the extract_data() function is a structured array containing the requested data.
11 | For more information about the usage of extract_data(), see the function definition in tapdetector.py
12 | """
13 | from tapdetector import extract_data
14 |
15 | # Example usage:
16 | file_path = r'C:\Users\pam\CFL_Data\2023-09-29_11-20-15-348181_AX.csv'
17 | timestamp_header = 'LocalTimestamp'
18 | column_name = 'AX'
19 |
20 | result = extract_data(file_path, timestamp_header, column_name)
21 | print(result)
--------------------------------------------------------------------------------
/py/examples/tapdetector_example/tapdetector_loaddata_example.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 |
5 | @author: pam
6 |
7 | This example shows how to use the load_data() function found in the tapdetector.
8 | The load_data() function loads the data from a .csv file, and separates the timestamp column from the rest of the data.
9 | The result of calling the function is a numpy array of the timestamps and a pandas data frame of the rest of the data.
10 |
11 | For additional information about the function, see the function definition of the file within tapdetector.py
12 | """
13 | from tapdetector import load_data
14 |
15 | # Specify the file path and timestamp header
16 | file_path = r'C:\Users\pam\CFL_Data\2023-09-29_11-20-15-348181_AX.csv'
17 | timestamp_header = 'LocalTimestamp'
18 |
19 | # Call the load_data function
20 | timestamps, data_values = load_data(file_path, timestamp_header)
21 |
22 | # Print or use the loaded data as needed
23 | print("Timestamps:", timestamps)
24 | print("Data Values:", data_values)
25 |
--------------------------------------------------------------------------------
/py/examples/timestamp_converter_example/README.md:
--------------------------------------------------------------------------------
1 | ## Timestamp Converter
2 | ### About
3 | The timestamp converter is designed to take in a file, and the detected taps from that file and the file you wish to align it to, and realign the timestamps of said file.
4 |
5 | For example, if you have recorded data on an EmotiBit and a Cyton board simultaneously, you can then use the tapDetector to find the locations of the taps in either file, then use the output from the tapDetector to realign the timestamps in the Cyton file so that the timestamps are lined up with the Emotibit data, allowing for easy comparison across files.
6 |
7 | For more information about tapping devices, see the tapDetector example.
8 |
9 | ### Quickstart Usage
10 | Sample files have been provided, here is how the timestamp converter CLI can be used with the sample data:
11 |
12 | ```py timestamp_converter.py -tf1 sourceOneTaps.csv -dc1 LocalTimeStamp -tf2 sourceTwoTaps.csv -dc2 LocalTimeStamp -f cytonhr2.txt -fc ' Timestamp' -o ConvertedFile.csv```
13 |
14 | #### Explanation of Arguments in Example:
15 | - tf1: Provides the path to the first file containing tap information.
16 | - dc1: Provides the column name with the timestamps of the taps in the first file.
17 | - tf2: Provides the path to the second file containing tap information.
18 | - dc2: Provides the column name with the timestamps of the taps in the second file.
19 | - f: Provides the name of the file to convert.
20 | - fc: Provides the column name in the file to convert that should be conveted.
21 | - o: Provides the name of the output (converted) file.
22 |
23 | For a full list of available arguments and their usages, run the timestamp converter with the -h flag.
24 |
25 | ### Format of Tap Files
26 | As noted, the timestamp converter requires two tap files to be provided. The first tap file should contain the taps from the time domain that you want to convert to, and the second tap file should contain the taps from the time domain you want to convert from.
27 |
28 | For example, if you wanted to align a Cyton file to an Emotibit data, the first tap file would hold the tap information from the EmotiBit, and the second tap file would hold the tap information from the Cyton.
29 |
30 | There are 3 requirements for the tapFiles:
31 | 1. They contain a column of timestamps where taps occurred.
32 | 1. There are the same number of taps in both files.
33 | 1. The total number of taps in each file is even.
34 |
35 | For more information on performing taps and tap detection, see the tapDetector example.
--------------------------------------------------------------------------------
/py/examples/timestamp_converter_example/SampleDataForTimestampConverter.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/EmotiBit/EmotiBit_Biometric_Lib/a7112ed157307df68ddab43fbb5a0a5291c109fd/py/examples/timestamp_converter_example/SampleDataForTimestampConverter.zip
--------------------------------------------------------------------------------
/py/examples/timestamp_converter_example/timestamp_conveter_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mar 13 2024
3 | An example of how to use the timestamp converter functions in code.
4 | CLI usage is also supported and documentation can be found by using -h
5 | """
6 |
7 | from emotibit.timestamp_converter import convert_time_stamps
8 | import pandas as pd
9 |
10 | """
11 | In this example a Cyton file has its timestamps converted to line up with an
12 | Emotibit's timestamps.
13 |
14 | Before using the timestamp converter, the files were run through the tapDetector,
15 | which produced the sourceOneTaps and the sourceTwoTaps files. For more usage information,
16 | see the README.
17 | """
18 |
19 | def main():
20 |
21 | # read the tap files and the file to convert
22 | # reads in the column from each source and converts it to a list
23 | tapsOne = pd.read_csv("sourceOneTaps.csv")['LocalTimeStamp'].to_list()
24 | tapsTwo = pd.read_csv("sourceTwoTaps.csv")['LocalTimeStamp'].to_list()
25 | fileToConvert = pd.read_csv("cytonhr2.txt")
26 |
27 | # programatically determine the halfway point through the tap files
28 | halfOftapsOne = int(len(tapsOne) / 2)
29 | halfOftapsTwo = int(len(tapsTwo) / 2)
30 |
31 | # convert the file
32 | df = convert_time_stamps(tapsOne[:halfOftapsOne],
33 | tapsOne[halfOftapsOne:],
34 | tapsTwo[:halfOftapsTwo],
35 | tapsTwo[halfOftapsTwo:],
36 | fileToConvert,
37 | ' Timestamp',
38 | 'ConvertedFile.csv')
39 | print(df)
40 |
41 | if __name__ == "__main__":
42 | main()
--------------------------------------------------------------------------------
/py/examples/validation_examples/brainproducts_validation_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Sep 23 11:49:41 2022
4 |
5 | @author: consu
6 | """
7 |
8 | import pandas as pd
9 | import pyxdf
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | import tool as bt
13 | import scipy as scp
14 | import statistics as stats
15 | import emotibit.utils as ebu
16 |
17 | try:
18 | import IPython
19 | IPython.get_ipython().magic("matplotlib qt")
20 | except:
21 | plt.ion()
22 |
23 | import matplotlib
24 | matplotlib.rcParams['pdf.fonttype'] = 42
25 | matplotlib.rcParams['ps.fonttype'] = 42
26 | axes_font_size = 20
27 | label_font_size = 28
28 | plt.rcParams.update({'font.size': axes_font_size})
29 | plt.rcParams.update({'axes.labelsize': label_font_size})
30 | plt.rcParams.update({'axes.labelweight': 'bold'})
31 | plt.rcParams.update({'figure.autolayout': False})
32 | fig_size = [[[0]*2]*3]*2
33 | fig_size[1][1] = [15, 5.5]
34 | fig_size[1][2] = [15, 12]
35 |
36 | auto_save_figs = True
37 | fig_save_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share\Science\Manuscripts\Measurement - Sensors\figures"
38 |
39 | #fs_la = 250
40 | fs_la = {}
41 | fs_la["EA"] = 250
42 | fs_la["AX"] = 250
43 | fs_la["AY"] = 250
44 | fs_la["AZ"] = 250
45 | fs_la["ECG"] = 100
46 | fs_eb = {}
47 | fs_eb["EA"] = 15
48 | fs_eb["AX"] = 25
49 | fs_eb["AY"] = 25
50 | fs_eb["AZ"] = 25
51 | fs_eb["PI"] = 100
52 |
53 | # ECG + EDA + Accel files
54 | # liveamp_file_name = r'C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share\Science\data\measurement sensors\2022-09-28\2022-09-28_09-53.xdf'
55 | # file_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share\Science\data\measurement sensors\2022-09-28"
56 | # file_base = r"2022-09-28_09-52-10-439680"
57 | # liveamp_ind = {}
58 | # liveamp_ind["EA"] = [0, 0]
59 | # liveamp_ind["AX"] = [0, 1]
60 | # liveamp_ind["AY"] = [0, 2]
61 | # liveamp_ind["AZ"] = [0, 3]
62 | # liveamp_ind["ECG"] = [1, 0]
63 | # ylim_edr_eb = []
64 | # ylim_edr_la = []
65 |
66 | # EDA + Accel files
67 | liveamp_file_name = r'C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share\Science\data\measurement sensors\2022-09-21\20220921-2336.xdf'
68 | file_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share\Science\data\measurement sensors\2022-09-21"
69 | file_base = r"2022-09-21_23-34-20-381606"
70 | liveamp_ind = {}
71 | liveamp_ind["EA"] = [0, 0]
72 | liveamp_ind["AX"] = [0, 2]
73 | liveamp_ind["AY"] = [0, 3]
74 | liveamp_ind["AZ"] = [0, 4]
75 | ylim_edr_eb = [-.13, .13]
76 | ylim_edr_la = [-.75, .75]
77 |
78 | fig_save_dir = fig_save_dir + "/" + file_base
79 |
80 | trim_durations = [30, 30]
81 | simple_xdf_plot = 0
82 |
83 |
84 | timestamp_id = "LslMarkerSourceTimestamp"
85 |
86 | eda_bandpass = [0.1, 5]
87 | eb_eda_amp = 5
88 | ecg_bandpass = [5, 49]
89 | ppg_bandpass = [1, 49]
90 |
91 | def trim_data(data, timestamps, fs, trim_durations = [0, 0]):
92 | # Cut last 30 seconds. ToDo: add this as a general parameter
93 | out_data = data[int(trim_durations[0]*fs) : int(len(timestamps)-trim_durations[1]*fs) - 1]
94 | out_timestamps = timestamps[int(trim_durations[0]*fs) : int(len(timestamps)-trim_durations[1]*fs) - 1]
95 | return out_data, out_timestamps
96 |
97 |
98 | def regression_plots(x, y, title = "", xlabel = "", ylabel = "", ylim = []):
99 |
100 | plot_data_eb = x["data"]
101 | plot_timestamps_eb = x["timestamps"]
102 | plot_data_la = y["data"]
103 | plot_timestamps_la = y["timestamps"]
104 |
105 | # Find overlapped timestamps and fs_la[type_tag]le data for regression
106 | shared_time = []
107 | shared_time.append(max(plot_timestamps_la[0], plot_timestamps_eb[0]))
108 | shared_time.append(min(plot_timestamps_la[len(plot_timestamps_la)-1],
109 | plot_timestamps_eb[len(plot_timestamps_eb)-1]))
110 | shared_ind_la = ((plot_timestamps_la > shared_time[0]) & (plot_timestamps_la < shared_time[1]))
111 | shared_ind_eb = ((plot_timestamps_eb > shared_time[0]) & (plot_timestamps_eb < shared_time[1]))
112 |
113 | n_resamp = 0
114 | if (sum(shared_ind_la) < sum(shared_ind_eb)):
115 | n_resamp = sum(shared_ind_la)
116 | plot_timestamp_rs = plot_timestamps_la[shared_ind_la]
117 | else:
118 | n_resamp = sum(shared_ind_eb)
119 | plot_timestamp_rs = plot_timestamps_eb[shared_ind_eb]
120 |
121 | plot_data_rs_la = scp.signal.resample(plot_data_la[shared_ind_la], n_resamp)
122 | plot_data_rs_eb = scp.signal.resample(plot_data_eb[shared_ind_eb], n_resamp)
123 |
124 | # Plot resampled overlapping data
125 | fig, ax = plt.subplots(2)
126 | if "ylim" in x.keys() and "ylim" in y.keys():
127 | # setup twin axes
128 | ax2 = ax[0].twinx()
129 | else:
130 | ax2 = ax[0]
131 |
132 | ax[0].plot(plot_timestamp_rs, plot_data_rs_la, color="red", label=y["label"])
133 | if ("ylim" in x.keys()):
134 | ax[0].set_ylim(y["ylim"])
135 | ax2.plot(plot_timestamp_rs, plot_data_rs_eb, color="black", label=x["label"])
136 | if ("ylim" in y.keys()):
137 | ax2.set_ylim(x["ylim"])
138 | ax[0].legend()
139 | ax2.legend(loc='lower right')
140 | ax[0].legend(loc='best')
141 | ax[0].set_xlabel(xlabel)
142 | ax[0].set_ylabel(ylabel)
143 | if (len(ylim) == 2):
144 | ax[0].set_ylim(ylim)
145 | ax2.set_ylim(ylim)
146 | plt.show()
147 |
148 | # ax1.plot(plot_timestamps_la, plot_data_la, color="red")
149 | # ax1.set_ylabel("Brain Products", color="red", fontsize=14)
150 | # ax2.plot(plot_timestamps_eb, plot_data_eb, color="black")
151 | # ax2.set_ylabel("EmotiBit", color="black", fontsize=14)
152 | # ax1.set_xlabel("Time (seconds)")
153 | # plt.show()
154 |
155 | # Remove outliers
156 | lims = [stats.mean(plot_data_rs_eb) - np.std(plot_data_rs_eb)*3,
157 | stats.mean(plot_data_rs_eb) + np.std(plot_data_rs_eb)*3]
158 | not_outliers = (plot_data_rs_eb > lims[0]) & (plot_data_rs_eb < lims[1])
159 | lims = [stats.mean(plot_data_rs_la) - np.std(plot_data_rs_la)*3,
160 | stats.mean(plot_data_rs_la) + np.std(plot_data_rs_la)*3]
161 | not_outliers = not_outliers & (plot_data_rs_la > lims[0]) & (plot_data_rs_la < lims[1])
162 | plot_data_rs_out_eb = plot_data_rs_eb[not_outliers]
163 | plot_data_rs_out_la = plot_data_rs_la[not_outliers]
164 |
165 | # Plot regression
166 | # fig, ax1 = plt.subplots()
167 | ax[1].scatter(plot_data_rs_out_eb, plot_data_rs_out_la)
168 | xlim_data = ax[1].get_xlim()
169 | xlim_diff = (xlim_data[1] - xlim_data[0]) * 0.4
170 | xlim_plt = [xlim_data[0] - xlim_diff, xlim_data[1] + xlim_diff]
171 | slope, intercept, r, p, std_err = scp.stats.linregress(plot_data_rs_out_eb, plot_data_rs_out_la)
172 | ax[1].plot(xlim_plt, [xlim_plt[0] * slope + intercept, xlim_plt[1] * slope + intercept])
173 |
174 | ax[1].set_xlabel(x["label"])
175 | ax[1].set_ylabel(y["label"])
176 | ax[1].text(xlim_data[1], xlim_data[0]* slope + intercept,
177 | "slope = {:.2f}".format(slope) + "\ny-cept = {:.2f}".format(intercept)
178 | + "\nr = {:.2f}".format(r) + "\np = {:.2f}".format(p))
179 | if (len(ylim) == 2):
180 | ax[0].set_ylim(ylim)
181 | fig.suptitle(title)
182 | fig.set_size_inches(fig_size[1][2])
183 |
184 |
185 | liveamp_data, header = pyxdf.load_xdf(liveamp_file_name)
186 |
187 | if (simple_xdf_plot):
188 | stream_counter = 0
189 | fig, ax1 = plt.subplots()
190 | for stream in liveamp_data:
191 | series_counter = 0
192 | for n in range(len(stream['time_series'][0])):
193 | y = stream['time_series'][:, n]
194 | plt.plot(stream['time_stamps'], y, label="" + str(stream_counter) + ":" + str(series_counter))
195 | series_counter += 1
196 | stream_counter += 1
197 |
198 | plt.show()
199 | plt.legend()
200 | quit()
201 |
202 |
203 | #### Script Begin ####
204 |
205 | #######################################################
206 | # Accelerometer
207 | #######################################################
208 |
209 | # setup twin axes
210 | fig, ax1 = plt.subplots(1)
211 | ax2 = ax1.twinx()
212 | # setup data containers
213 | plot_data_la = []
214 | plot_timestamps_la = []
215 | plot_data_eb = []
216 | plot_timestamps_eb = []
217 | # setup TypeTags
218 | type_tags = ["AX", "AY", "AZ"]
219 |
220 | for i in range(len(type_tags)):
221 | #ax2.append(ax1[i].twinx())
222 | #ax1[i].title.set_text(type_tags[i])
223 | type_tag = type_tags[i]
224 |
225 |
226 | # Plot AX data from Brain Products
227 | stream = liveamp_data[liveamp_ind[type_tag][0]]
228 | ts = stream['time_series']
229 | if (i == 0):
230 | plot_data_la = np.square(ts[:, liveamp_ind[type_tag][1]] / 1000)
231 | plot_timestamps_la = stream['time_stamps']
232 | else:
233 | plot_data_la += np.square(ts[:, liveamp_ind[type_tag][1]] / 1000)
234 |
235 | # Plot emotibit Accel X
236 | data = []
237 | t = 0
238 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + type_tag + '.csv'
239 | print(file_path)
240 | data.append(pd.read_csv(file_path))
241 | if (i == 0):
242 | plot_data_eb = np.square(data[t][type_tag])
243 | plot_timestamps_eb = data[t][timestamp_id].to_numpy()
244 | else:
245 | plot_data_eb += np.square(data[t][type_tag])
246 |
247 | plot_data_eb = np.sqrt(plot_data_eb)
248 | plot_data_la = np.sqrt(plot_data_la)
249 |
250 | # Trim beginning / end of data to remove junk & sync taps
251 | plot_data_eb, plot_timestamps_eb = trim_data(plot_data_eb, plot_timestamps_eb, fs_eb[type_tag], trim_durations)
252 | plot_data_la, plot_timestamps_la = trim_data(plot_data_la, plot_timestamps_la, fs_la[type_tag], trim_durations)
253 |
254 |
255 | ax1.plot(plot_timestamps_la, plot_data_la, color="red")
256 | ax1.set_ylabel("Brain Products", color="red", fontsize=label_font_size)
257 | ax2.plot(plot_timestamps_eb, plot_data_eb, color="black")
258 | ax2.set_ylabel("EmotiBit", color="black", fontsize=label_font_size)
259 | ax1.set_xlabel("Time (seconds)")
260 | plt.show()
261 |
262 | reg_data_x = {}
263 | reg_data_x["label"] = "EmotiBit"
264 | reg_data_x["data"] = plot_data_eb
265 | reg_data_x["timestamps"] = plot_timestamps_eb
266 |
267 | reg_data_y = {}
268 | reg_data_y["label"] = "Brain Products"
269 | reg_data_y["data"] = plot_data_la
270 | reg_data_y["timestamps"] = plot_timestamps_la
271 |
272 | regression_plots(reg_data_x, reg_data_y, title="Accelerometer",
273 | xlabel="Time (seconds)", ylabel="Acceleration (G)",
274 | ylim=[.5, 1.5])
275 |
276 | fig = plt.gcf()
277 | fig.canvas.set_window_title("Accelerometer_BP_EB_regr")
278 | if auto_save_figs:
279 | ebu.save_fig(fig_save_dir)
280 |
281 | #######################################################
282 | # EDA
283 | #######################################################
284 |
285 | # setup twin axes
286 | fig, ax1 = plt.subplots()
287 | ax2 = ax1.twinx()
288 | # Plot EDA data from Brain Products
289 | type_tag = "EA"
290 | stream = liveamp_data[liveamp_ind[type_tag][0]]
291 | plot_timestamps_la = stream['time_stamps']
292 | plot_data_la = stream['time_series'][:, liveamp_ind[type_tag][1]] / 25000 # uS / 25000 mV for Brain Products
293 | plot_data_la, plot_timestamps_la = trim_data(plot_data_la, plot_timestamps_la, fs_la[type_tag], trim_durations)
294 | plt.plot(plot_timestamps_la, plot_data_la, color="red")
295 | ax2.set_ylabel("Brain Products", color="red", fontsize=label_font_size)
296 |
297 | # Plot EmotiBit EDA
298 | type_tag = "EA"
299 | data = []
300 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + type_tag + '.csv'
301 | print(file_path)
302 | data.append(pd.read_csv(file_path))
303 |
304 | t = 0
305 | plot_timestamps_eb = data[t][timestamp_id].to_numpy()
306 | plot_data_eb = data[t][type_tag]
307 | #plot_data_eb = plot_data_eb*eb_eda_amp
308 | # Trim beginning / end of data to remove junk & sync taps
309 | plot_data_eb, plot_timestamps_eb = trim_data(plot_data_eb, plot_timestamps_eb, fs_eb[type_tag], trim_durations)
310 | plt.plot(plot_timestamps_eb, plot_data_eb, color="black")
311 | ax1.set_ylabel("EmotiBit", color="black", fontsize=label_font_size)
312 | ax1.set_xlabel("Time (seconds)")
313 | plt.show()
314 |
315 | reg_data_x = {}
316 | reg_data_x["label"] = "EmotiBit"
317 | reg_data_x["data"] = plot_data_eb
318 | reg_data_x["timestamps"] = plot_timestamps_eb
319 | reg_data_x["ylim"] = [0.4, 1.1]
320 |
321 | reg_data_y = {}
322 | reg_data_y["label"] = "Brain Products"
323 | reg_data_y["data"] = plot_data_la
324 | reg_data_y["timestamps"] = plot_timestamps_la
325 | reg_data_y["ylim"] = [3.5, 5.6]
326 |
327 | regression_plots(reg_data_x, reg_data_y, title="EDA", xlabel="Time (seconds)", ylabel="EDA (µSiemens)")
328 |
329 | fig = plt.gcf()
330 | fig.canvas.set_window_title("EDA_BP_EB_regr")
331 | if auto_save_figs:
332 | ebu.save_fig(fig_save_dir)
333 |
334 | # Bandpass filter EDA
335 | plot_data_eb = bt.band_filter(plot_data_eb, np.array(eda_bandpass), fs_eb[type_tag], order=4)
336 | plot_data_la = bt.band_filter(plot_data_la, np.array(eda_bandpass), fs_la[type_tag], order=4)
337 | # Remove filter artifact (3x duration of bandpass[0])
338 | reg_data_x["data"], reg_data_x["timestamps"] = trim_data(plot_data_eb, plot_timestamps_eb, fs_eb[type_tag], [4/eda_bandpass[0], 4/eda_bandpass[0]])
339 | if len(ylim_edr_eb) > 1:
340 | reg_data_x["ylim"] = ylim_edr_eb
341 | else:
342 | reg_data_x.pop("ylim")
343 | reg_data_y["data"], reg_data_y["timestamps"] = trim_data(plot_data_la, plot_timestamps_la, fs_la[type_tag], [4/eda_bandpass[0], 4/eda_bandpass[0]])
344 | if len(ylim_edr_la) > 1:
345 | reg_data_y["ylim"] = ylim_edr_la
346 | else:
347 | reg_data_y.pop("ylim")
348 |
349 | regression_plots(reg_data_x, reg_data_y,
350 | title="EDR (EDA Filtered {:.1f}".format(eda_bandpass[0]) + "-{:.1f}Hz)".format(eda_bandpass[1])
351 | , xlabel="Time (seconds)", ylabel="EDR (µSiemens)")
352 |
353 | fig = plt.gcf()
354 | fig.canvas.set_window_title("EDA_filt_BP_EB_regr")
355 | if auto_save_figs:
356 | ebu.save_fig(fig_save_dir)
357 |
358 | #######################################################
359 | # Heart Rate
360 | #######################################################
361 | if "ECG" in liveamp_ind.keys():
362 | # setup twin axes
363 | fig, ax1 = plt.subplots()
364 | ax2 = ax1.twinx()
365 | # Brain Products
366 | type_tag = "ECG"
367 | stream = liveamp_data[liveamp_ind[type_tag][0]]
368 | plot_timestamps_la = stream['time_stamps']
369 | plot_data_la = -stream['time_series'][:, liveamp_ind[type_tag][1]] / 25000 # uS / 25000 mV for Brain Products
370 | plot_data_la, plot_timestamps_la = trim_data(plot_data_la, plot_timestamps_la, fs_la[type_tag], trim_durations)
371 | #plt.plot(plot_timestamps_la, plot_data_la, color="red")
372 |
373 | # Bandpass filter
374 | plot_data_la = bt.band_filter(plot_data_la, np.array(ecg_bandpass), fs_la[type_tag], order=4)
375 | plot_data_la, plot_timestamps_la = trim_data(plot_data_la, plot_timestamps_la, fs_la[type_tag], [5/ecg_bandpass[0], 5/ecg_bandpass[0]])
376 | ax1.plot(plot_timestamps_la, plot_data_la, color="red")
377 | ax1.set_ylabel("Brain Products (ECG)", color="red", fontsize=label_font_size)
378 | # Detect Peaks
379 | peaks_la, _ = scp.signal.find_peaks(plot_data_la, 0.015)
380 | ax1.plot(plot_timestamps_la[peaks_la], plot_data_la[peaks_la], "*", color="red")
381 |
382 | # EmotiBit
383 | type_tag = "PI"
384 | data = []
385 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + type_tag + '.csv'
386 | print(file_path)
387 | data.append(pd.read_csv(file_path))
388 | t = 0
389 | plot_timestamps_eb = data[t][timestamp_id].to_numpy()
390 | plot_data_eb = -data[t][type_tag]
391 | plot_data_eb, plot_timestamps_eb = trim_data(plot_data_eb, plot_timestamps_eb, fs_eb[type_tag], trim_durations)
392 | #plt.plot(plot_timestamps_eb, plot_data_eb, color="black")
393 | # Bandpass filter
394 | plot_data_eb = bt.band_filter(plot_data_eb, np.array(ppg_bandpass), fs_eb[type_tag], order=4)
395 | plot_data_eb, plot_timestamps_eb = trim_data(plot_data_eb, plot_timestamps_eb, fs_eb[type_tag], [5/ppg_bandpass[0], 5/ppg_bandpass[0]])
396 | ax2.plot(plot_timestamps_eb, plot_data_eb, color="black")
397 | # Detect Peaks
398 | peaks_eb, _ = scp.signal.find_peaks(plot_data_eb, 75, None, fs_eb[type_tag] / 2)
399 | ax2.plot(plot_timestamps_eb[peaks_eb], plot_data_eb[peaks_eb], "*", color="black")
400 | ax2.set_ylabel("EmotiBit (PPG)", color="black", fontsize=label_font_size)
401 | ax1.set_xlabel("Time (seconds)")
402 | ax1.set_xlim([51150.103932117956, 51154.201822153096]) # zoom in on peaks
403 | plt.show()
404 |
405 | # setup twin axes
406 | # Calculate HR
407 | # Brain Products
408 | type_tag = "ECG"
409 | ibis_la = np.diff(peaks_la)/fs_la[type_tag]
410 | hr_la = 1 / ibis_la * 60
411 | plot_data_la = hr_la
412 | plot_timestamps_la = plot_timestamps_la[peaks_la[0:len(peaks_la)-1]]
413 |
414 | # EmotiBit
415 | type_tag = "PI"
416 | ibis_eb = np.diff(peaks_eb)/fs_eb[type_tag]
417 | hr_eb = 1 / ibis_eb * 60
418 | plot_data_eb = hr_eb
419 | plot_timestamps_eb = plot_timestamps_eb[peaks_eb[0:len(peaks_eb)-1]]
420 | plt.show()
421 |
422 | fig = plt.gcf()
423 | fig.set_size_inches(fig_size[1][1])
424 | fig.canvas.set_window_title("ECG_PPG_BP_EB")
425 | if auto_save_figs:
426 | ebu.save_fig(fig_save_dir)
427 |
428 | reg_data_x = {}
429 | reg_data_x["label"] = "EmotiBit"
430 | reg_data_x["data"] = hr_eb
431 | reg_data_x["timestamps"] = plot_timestamps_eb
432 |
433 | reg_data_y = {}
434 | reg_data_y["label"] = "Brain Products HR"
435 | reg_data_y["data"] = hr_la
436 | reg_data_y["timestamps"] = plot_timestamps_la
437 |
438 | regression_plots(reg_data_x, reg_data_y,
439 | title="Heart Rate", xlabel="Time (seconds)", ylabel="Heart Rate (BPM)")
440 |
441 | fig = plt.gcf()
442 | fig.canvas.set_window_title("HR_BP_EB_regr")
443 | if auto_save_figs:
444 | ebu.save_fig(fig_save_dir)
--------------------------------------------------------------------------------
/py/examples/wav_reading_example/ReadMe.md:
--------------------------------------------------------------------------------
1 | # Something about the example
--------------------------------------------------------------------------------
/py/examples/wav_reading_example/wav_reading_example.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed May 22 21:41:48 2019
4 |
5 | @author: produceconsumerobot
6 | """
7 |
8 | import time
9 |
10 | date_time = '09.05.2019 13:50:53'
11 | pattern = '%d.%m.%Y %H:%M:%S'
12 | epoch = int(time.mktime(time.strptime(date_time, pattern)))
13 | print(epoch)
14 |
--------------------------------------------------------------------------------
/py/testing/battery_level_noise_check.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Thu Jul 14 06:16:03 2022
4 |
5 | @author: consu
6 | """
7 |
8 | import numpy as np
9 | import pandas as pd
10 | import emotibit.info as info
11 | import emotibit.utils as utils
12 |
13 | data_type_tags = ["AX","AY","AZ","EA"]
14 | data_min_threshes = [-7, -7, -7, 0]
15 | battery_type_tag = "BV"
16 |
17 | file_dir = r"C:\priv\local\EmotiBit_Data\ESP32 Test Data 2022-06-27"
18 |
19 | file_base_names = [
20 | "2022-07-07_13-00-39-966600",\
21 | "2022-07-07_13-12-07-855649",\
22 | "2022-07-07_15-22-06-628480",\
23 | "2022-07-07_20-11-30-434677",\
24 | "2022-07-08_10-16-32-406912",\
25 | "2022-07-08_18-49-43-185783",\
26 | "2022-07-11_19-10-29-573358",\
27 | "2022-07-12_05-26-20-841536",\
28 | "2022-07-12_16-41-40-227799",\
29 | "2022-07-13_10-41-48-900345",\
30 | "2022-07-15_04-38-31-568274",\
31 | "2022-07-15_04-38-33-566755",\
32 | "2022-07-18_16-32-31-833549",\
33 | "2022-07-18_16-37-26-372411",\
34 | "2022-07-18_18-09-54-431729",\
35 | "2022-07-19_19-17-42-312824",\
36 | "2022-07-19_19-18-28-537625",\
37 | "2022-07-19_19-19-16-295558"\
38 |
39 | ]
40 |
41 | timestamp_header = "LocalTimestamp"
42 |
43 | print_info = True
44 | print_user_notes = True
45 |
46 | all_results = []
47 |
48 | for f in range(len(file_base_names)):
49 | file_base = file_base_names[f]
50 | print("\n")
51 | print(file_base)
52 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + battery_type_tag + '.csv'
53 | battery_data = pd.read_csv(file_path);
54 |
55 | total_runtime = round((battery_data[timestamp_header][len(battery_data[timestamp_header]) - 1] - battery_data[timestamp_header][0])/60/60,2)
56 | clean_runtime = total_runtime
57 | first_clip_time = -1
58 | wonkout_type_tag = ""
59 | noise_battery = - 1
60 | final_battery = battery_data[battery_type_tag][len(battery_data[battery_type_tag]) - 1]
61 | final_batt_time = battery_data[timestamp_header][len(battery_data[timestamp_header]) - 1]
62 | starting_battery = battery_data[battery_type_tag][1]
63 | first_timestamp = battery_data[timestamp_header][1]
64 |
65 | for d in range(len(data_type_tags)):
66 | data_type_tag = data_type_tags[d]
67 | data_min_thresh = data_min_threshes[d]
68 |
69 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + data_type_tag + '.csv'
70 | data = pd.read_csv(file_path);
71 | final_data_time = data[timestamp_header][len(data[timestamp_header]) - 1]
72 |
73 | # Check for missing parsed data
74 | if (abs(final_data_time - final_batt_time) > 5):
75 | print("\n")
76 | print("ERROR: data file durations don't match")
77 | print(battery_type_tag + ": " + str(final_batt_time))
78 | print(data_type_tag + ": " + str(final_data_time))
79 | exit(-1)
80 |
81 | clip_indexes = np.where(data[data_type_tag] < data_min_thresh)
82 | if (len(clip_indexes[0]) > 0):
83 | if (first_clip_time == -1):
84 | first_clip_time = data[timestamp_header][clip_indexes[0][0]]
85 | wonkout_type_tag = data_type_tag
86 | else:
87 | if (first_clip_time > data[timestamp_header][clip_indexes[0][0]]):
88 | first_clip_time = data[timestamp_header][clip_indexes[0][0]]
89 | wonkout_type_tag = data_type_tag
90 |
91 | clean_runtime = round((first_clip_time - first_timestamp)/60/60,2)
92 |
93 | battery_indexes = np.where(battery_data[timestamp_header] > first_clip_time)
94 | if (len(battery_indexes[0]) > 0):
95 | noise_battery = battery_data[battery_type_tag][battery_indexes[0][0]]
96 |
97 | print("Total runtime (h) = " + str(total_runtime))
98 | print("First wonkout time = " + str(first_clip_time))
99 | print("Runtime at first wonkout (h) = " + str(clean_runtime))
100 | print("First wonkout battery level = " + str(noise_battery))
101 | print("Final battery level = " + str(final_battery))
102 | print("Starting battery level = " + str(starting_battery))
103 | print("Wonkout TypeTag = " + str(wonkout_type_tag))
104 | result = file_base \
105 | + ", " + str(total_runtime) \
106 | + ", " + str(first_clip_time) \
107 | + ", " + str(clean_runtime) \
108 | + ", " + str(noise_battery) \
109 | + ", " + str(final_battery) \
110 | + ", " + str(starting_battery) \
111 | + ", " + str(wonkout_type_tag)
112 | all_results.append(result)
113 | print(result)
114 |
115 | if (print_info):
116 | print("")
117 | print("************")
118 | print("Info:")
119 | info.print_info(file_dir, file_base_names)
120 |
121 | if (print_user_notes):
122 | print("")
123 | print("************")
124 | print("User Notes:")
125 | utils.print_user_notes(file_dir, file_base_names, ', ')
126 |
127 | print("")
128 | print("************")
129 | print("All Results:")
130 | for result in all_results:
131 | print(result)
132 |
133 |
134 |
135 |
136 |
137 |
--------------------------------------------------------------------------------
/py/testing/data_check.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Feb 19 14:56:37 2020
4 |
5 | @author: consu
6 | """
7 |
8 | import os
9 | import pandas as pd
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 |
13 | type_tags = ['EA']
14 | file_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share (1)\Conferences_Talks\2020-02-14 Duke CS101 Lab\data"
15 | file_base_names = [f.name for f in os.scandir(file_dir) if f.is_dir()]
16 | nbins = 100
17 | ylims = [0, 10]
18 | fig_size = [1700.0, 900.0]
19 |
20 | file_base = file_base_names[0]
21 |
22 | for type_tag in type_tags:
23 | fig = plt.figure(type_tag)
24 | fig.clf()
25 | fig_dpi = fig.get_dpi()
26 | fig.set_size_inches(fig_size[0]/float(fig_dpi),fig_size[1]/float(fig_dpi))
27 | fig, axs = plt.subplots(nrows=len(file_base_names), ncols=1, num=type_tag)
28 | plt.clf
29 | plt.subplots_adjust(left=.2)
30 | for f in range(len(file_base_names)):
31 | file_base = file_base_names[f]
32 | eda_data = pd.read_csv(file_dir +'/' + file_base + '/' + file_base + '_' + type_tag + '.csv');
33 | ts_diff_epoch = np.diff(eda_data.EpochTimestamp)
34 | ts_diff_emotibit = np.diff(eda_data.EmotiBitTimestamp) / 1000
35 |
36 | plt.sca(axs[f])
37 | plt.plot(eda_data.EpochTimestamp, eda_data[type_tag])
38 | if (len(ylims) > 0):
39 | ylim = plt.ylim()
40 | plt.ylim(max(ylim[0], ylims[0]), min(ylim[1], ylims[1]))
41 | h = plt.ylabel(file_base)
42 | h.set_rotation(0)
43 | axs[f].yaxis.set_label_position("right")
44 | axs[f].yaxis.set_label_coords(-0.125,.65)
45 | axs[f].get_xaxis().set_ticks([])
46 | if (f == 0):
47 | plt.title(type_tag)
--------------------------------------------------------------------------------
/py/testing/dummy_data_check.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
4 |
5 | """
6 | Prints summary stats for EmotiBit triangle wave dummy data to assess for correctness
7 |
8 | Reports:
9 | Data range
10 | diff(data) range
11 | Signal period range (samples)
12 | Signal period range (EpochTimestamps)
13 | Signal freq range (EpochTimestamps)
14 |
15 | ToDo: Turn into a module
16 |
17 | Created on Wed Feb 19 14:56:37 2020
18 |
19 | @author: consu
20 | """
21 |
22 | import os
23 | import pandas as pd
24 | import matplotlib.pyplot as plt
25 | import numpy as np
26 | import sys
27 |
28 |
29 | type_tags = ['PR', 'PI', 'PG', 'EA', 'EL', 'ER', 'H0', 'AX', 'AY', 'AZ',
30 | 'GX', 'GY', 'GZ', 'MX', 'MY', 'MZ', 'TH', 'T0']
31 | file_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share (1)\EmotiBit Test Data\Beta Test Data\Acute\DummyData\2020-03-02_05-47-42-936329"
32 | file_base = "2020-03-02_05-47-42-936329"
33 | nbins = 100
34 | ylims = [0, 10]
35 | fig_size = [1700.0, 900.0]
36 | n_zfill = 9;
37 | p_format = '{:.4f}'
38 | stats_filename = "DummyDataStats.txt"
39 |
40 | stats_file = open(file_dir +'/' + stats_filename, 'w+')
41 | print(stats_file.closed)
42 | print_locs = [sys.stdout, stats_file]
43 |
44 | for print_loc in print_locs:
45 | print(file_base, file = print_loc)
46 |
47 | stats_file = open(file_dir +'/' + stats_filename, 'a+')
48 | print(stats_file.closed)
49 |
50 | for type_tag in type_tags:
51 |
52 | data = pd.read_csv(file_dir +'/' + file_base + '_' + type_tag + '.csv');
53 |
54 | data_diff = np.diff(data[type_tag])
55 | trough_indices = data.index[data[type_tag] == min(data[type_tag])]
56 | sample_periods = np.diff(trough_indices)
57 | epoch_periods = np.diff(data.EpochTimestamp[trough_indices])
58 |
59 | for print_loc in print_locs:
60 | print(type_tag, file = print_loc)
61 | print((p_format.format(min(data[type_tag]))).rjust(n_zfill) + ", " +
62 | (p_format.format(max(data[type_tag]))).rjust(n_zfill) +
63 | " -- Data range", file = print_loc)
64 | print((p_format.format(min(data_diff))).rjust(n_zfill) + ", " +
65 | (p_format.format(max(data_diff))).rjust(n_zfill) +
66 | " -- Delta range", file = print_loc)
67 | print((p_format.format(min(sample_periods))).rjust(n_zfill) + ", " +
68 | (p_format.format(max(sample_periods))).rjust(n_zfill) +
69 | " -- Signal period range (samples)", file = print_loc)
70 | print((p_format.format(min(epoch_periods))).rjust(n_zfill) + ", " +
71 | (p_format.format(max(epoch_periods))).rjust(n_zfill) +
72 | " -- Signal period range (EpochTimestamps)", file = print_loc)
73 | print((p_format.format(min(1 / epoch_periods))).rjust(n_zfill) + ", " +
74 | (p_format.format(max(1 / epoch_periods))).rjust(n_zfill) +
75 | " -- Signal freq range (EpochTimestamps)", file = print_loc)
76 |
77 | # ToDo: print to a csv file?
78 |
79 | # ToDo: Add graphing to get at-a-glance assessment
80 |
81 |
82 | #stats_file.close()
83 | print(stats_file.closed)
84 |
--------------------------------------------------------------------------------
/py/testing/timestamp_check.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue Feb 18 22:19:31 2020
4 |
5 | @author: consu
6 | """
7 |
8 | import os
9 | import pandas as pd
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 |
13 | ### Add the type tage in the list which you want test
14 | type_tags = ['PR', 'PI', 'PG', 'EA', 'EL', 'ER', 'H0', 'AX', 'AY', 'AZ',
15 | 'GX', 'GY', 'GZ', 'MX', 'MY', 'MZ', 'T0']
16 | ### Use the data parser to parse the file. Add the path to the folder below.
17 | ### Make sure the data is stored as /filename/filename_[type_tag].
18 | ### ex: C:\Users\nitin\Documents\EmotiBit\DataAnalysis\unControlledTest\2020-04-09_10-41-19-913922\2020-04-09_10-41-19-913922_PR.csv
19 | file_dir = r"C:\Users\nitin\Documents\EmotiBit\DataAnalysis\unControlledTest"
20 |
21 | file_base_names = ["2020-04-09_10-41-19-913922", "2020-04-09_10-41-19-913922"]
22 | nbins = 100
23 | fig_size = [1700.0, 900.0]
24 |
25 | for type_tag in type_tags:
26 | fig_name = type_tag + ' Timestamp'
27 | fig = plt.figure(fig_name)
28 | fig.clf()
29 | fig_dpi = fig.get_dpi()
30 | fig.set_size_inches(fig_size[0]/float(fig_dpi),fig_size[1]/float(fig_dpi))
31 | fig, axs = plt.subplots(nrows=len(file_base_names), ncols=2, num=fig_name)
32 | plt.clf
33 | plt.subplots_adjust(left=.3)
34 | for f in range(len(file_base_names)):
35 | file_base = file_base_names[f]
36 | file_path = file_dir + '\\' + file_base + '\\' + file_base + '_' + type_tag + '.csv'
37 | print(file_path)
38 | eda_data = pd.read_csv(file_path);
39 | ts_diff_epoch = np.diff(eda_data.EpochTimestamp)
40 | ts_diff_emotibit = np.diff(eda_data.EmotiBitTimestamp) / 1000
41 |
42 | plt.sca(axs[f][0])
43 | h = plt.ylabel(file_base)
44 | h.set_rotation(0)
45 | axs[f][0].yaxis.set_label_position("left")
46 | axs[f][0].yaxis.set_label_coords(-0.35,.35)
47 |
48 | plt.plot(ts_diff_emotibit[1:ts_diff_emotibit.size])
49 | plt.plot(ts_diff_epoch[1:ts_diff_epoch.size])
50 | plt.xlim(0,60*10)
51 | plt.sca(axs[f][1])
52 |
53 |
54 | #counts, bins = np.histogram(ts_diff_emotibit)
55 | #axs[f][1].hist(bins[:-1], bins, weights=counts, label='EmotiBit')
56 | #counts, bins = np.histogram(ts_diff_epoch)
57 | #axs[f][1].hist(bins[:-1], bins, weights=counts, label='Epoch')
58 | x_range = [0, 0.15]
59 | plt.xlim(x_range);
60 | axs[f][1].hist(ts_diff_emotibit, nbins, range=x_range, label='EmotiBit')
61 | axs[f][1].hist(ts_diff_epoch, nbins, range=x_range, label='Epoch')
62 | if(f == 0):
63 | axs[f][1].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=2, mode="expand", borderaxespad=0.)
64 | plt.title(fig_name)
65 |
66 | if (f < len(file_base_names) - 1):
67 | axs[f][0].get_xaxis().set_ticks([])
68 | axs[f][1].get_xaxis().set_ticks([])
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/py/testing/timestamp_file_move.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Wed Feb 19 13:23:33 2020
4 |
5 | @author: consu
6 | """
7 |
8 | import os
9 | import shutil
10 |
11 | type_tags = ['EA']
12 | file_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share (1)\Conferences_Talks\2020-02-14 Duke CS101 Lab\data"
13 | file_base_names = [f.name for f in os.scandir(file_dir) if f.is_dir()]
14 |
15 |
16 | for type_tag in type_tags:
17 | for f in range(len(file_base_names)):
18 | file_base = file_base_names[f]
19 | in_file_path = file_dir +'/' + file_base + '/' + file_base + '_' + type_tag + '.csv'
20 | out_file_path = file_dir +'/' + file_base + '/' + file_base + '_' + type_tag + '_norm.csv'
21 |
22 | source = file_dir + '/' + file_base + '/' + file_base + '_' + type_tag + '.csv'
23 | destination = file_dir + '/' + file_base + '_' + type_tag + '.csv'
24 | print('Copying: ' + source + ' to ' + destination)
25 | shutil.copy(source, destination)
26 |
27 | source = file_dir + '/' + file_base + '/' + file_base + '_' + type_tag + '_norm.csv'
28 | destination = file_dir + '/' + file_base + '/' + file_base + '_' + type_tag + '.csv'
29 | print('Moving: ' + source + ' to ' + destination)
30 | shutil.move(source, destination)
31 | print('****')
32 |
33 |
34 |
--------------------------------------------------------------------------------
/py/testing/timestamp_normalize.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue Feb 18 22:19:31 2020
4 |
5 | @author: consu
6 | """
7 |
8 | import os
9 | import matplotlib.pyplot as plt
10 | import csv
11 |
12 | type_tags = ['EA']
13 | file_dir = r"C:\priv\gd\Dropbox\CFL\EmotiBit\EmotiBit CFL Share (1)\Conferences_Talks\2020-02-14 Duke CS101 Lab\data"
14 | file_base_names = [f.name for f in os.scandir(file_dir) if f.is_dir()]
15 |
16 | for type_tag in type_tags:
17 | fig_name = type_tag + " original"
18 | fig = plt.figure(fig_name)
19 | fig.clf()
20 | fig, axs = plt.subplots(nrows=len(file_base_names), ncols=2, num=fig_name)
21 | plt.clf
22 | for f in range(len(file_base_names)):
23 | #for f in range(1):
24 | file_base = file_base_names[f]
25 | in_file_path = file_dir +'/' + file_base + '/' + file_base + '_' + type_tag + '.csv'
26 | out_file_path = file_dir +'/' + file_base + '/' + file_base + '_' + type_tag + '_norm.csv'
27 |
28 | start_timestamp = 0
29 | end_timestamp = 0
30 |
31 | # Read data to calculate average period
32 | with open(in_file_path, newline='') as csvfile:
33 | dataReader = csv.reader(csvfile, delimiter=',', quotechar='|')
34 | row_counter = 0
35 |
36 | for row in dataReader:
37 | if (row_counter < 2):
38 | print(row)
39 | print(row[0])
40 | if (row_counter == 1):
41 | start_timestamp = float(row[0])
42 | row_counter = row_counter + 1
43 | end_timestamp = row[0] # update end_timestamp to last read
44 | file_duration = float(end_timestamp) - start_timestamp
45 | sampling_period = file_duration / (row_counter - 2)
46 |
47 | print(end_timestamp)
48 | print('Total Duration: ' + str(file_duration))
49 | print('Sample Count: ' + str(row_counter - 1))
50 | print('Avg Period: ' + str(sampling_period))
51 | print('Avg Freq: ' + str(1 / sampling_period))
52 | print('****')
53 |
54 | normalized_data = [];
55 | # Read data and normalize timestamps
56 | with open(in_file_path, newline='') as csvfile:
57 | dataReader = csv.reader(csvfile, delimiter=',', quotechar='|')
58 |
59 | row_counter = 0
60 | for row in dataReader:
61 | if (row_counter > 1):
62 | row[0] = str(start_timestamp + sampling_period * (row_counter - 1))
63 |
64 | row_counter = row_counter + 1
65 | normalized_data.append(row)
66 |
67 | # Write normalized timestamp data
68 | with open(out_file_path,'w', newline='') as csvfile:
69 | wr = csv.writer(csvfile, dialect='excel')
70 | wr.writerows(normalized_data)
71 | # for row in normalized_data:
72 | # wr.writerows(row)
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------