├── .coveragerc ├── .gitignore ├── .readthedocs.yml ├── .travis.yml ├── LICENSE ├── README.md ├── changelog.log ├── docs ├── algorithmfunctioning.rst ├── apiref.rst ├── conf.py ├── development.rst ├── documents │ └── HumanistPaper.pdf ├── heartpy.analysis.rst ├── heartpy.datautils.rst ├── heartpy.exceptions.rst ├── heartpy.filtering.rst ├── heartpy.heartpy.rst ├── heartpy.peakdetection.rst ├── heartpy.preprocessing.rst ├── heartpy.rst ├── heartpy.visualizeutils.rst ├── heartrateanalysis.rst ├── images │ ├── CO2_RRbreath.jpg │ ├── ECG_PPG_Comparison.jpg │ ├── Figure_PeakDetection.jpeg │ ├── Figure_PeakDetection2.jpeg │ ├── bootstrapped_errors.jpg │ ├── bootstrapped_errors.psd │ ├── butterworth.jpeg │ ├── clipping.jpg │ ├── clipping_correct_vertical.jpg │ ├── fitresultsimg.jpg │ ├── hampelcorrect.jpg │ ├── highnoise.png │ ├── output1.jpeg │ ├── output2.jpeg │ ├── output_3.jpeg │ ├── output_4.jpg │ ├── peakdetection_rmssd.jpg │ ├── peakdetection_rmssd.psd │ ├── peaknorm.jpeg │ ├── peakthresholding.jpeg │ └── ppg_ecg.jpg ├── index.rst ├── modules.rst └── quickstart.rst ├── examples ├── 1_regular_PPG │ └── Analysing_a_PPG_signal.ipynb ├── 2_regular_ECG │ ├── Analysing_a_regular_ECG_signal.ipynb │ ├── e0103.csv │ ├── e0110.csv │ └── e0124.csv ├── 3_smartwatch_data │ ├── Analysing_Smartwatch_Data.ipynb │ └── raw_ppg.csv ├── 4_smartring_data │ ├── Analysing_Smart_Ring_Data.ipynb │ └── ring_data.csv ├── 5_noisy_ECG │ ├── 118e00.csv │ ├── 118e00_ann.csv │ ├── 118e06.csv │ ├── 118e06_ann.csv │ ├── 118e12.csv │ ├── 118e12_ann.csv │ ├── 118e24.csv │ ├── 118e24_ann.csv │ ├── 118e24_annotations.csv │ └── Analysing_Noisy_ECG.ipynb └── 6_colorblind_mode │ └── Colorblind_mode.ipynb ├── heartpy ├── __init__.py ├── analysis.py ├── config.py ├── data │ ├── data.csv │ ├── data.log │ ├── data.mat │ ├── data2.csv │ ├── data2.log │ ├── data2.mat │ ├── data3.csv │ └── data3.mat ├── datautils.py ├── exceptions.py ├── filtering.py ├── heartpy.py ├── peakdetection.py ├── preprocessing.py └── visualizeutils.py ├── images ├── clipping_correct.jpg ├── output1.jpeg └── output2.jpeg ├── run_tests.py ├── setup.py └── validation └── validation_rawdata.zip /.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | [run] 3 | branch = True 4 | 5 | [report] 6 | # Regexes for lines to exclude from consideration 7 | exclude_lines = 8 | # Have to re-enable the standard pragma 9 | pragma: no cover 10 | 11 | # Don't complain if tests don't hit defensive assertion code: 12 | raise AssertionError 13 | raise NotImplementedError 14 | raise LookupError 15 | raise BadSignalWarning 16 | raise IncorrectFileType 17 | raise ValueError 18 | except 19 | return None 20 | 21 | # Don't complain if non-runnable code isn't run: 22 | if __name__ == .__main__.: 23 | 24 | ignore_errors = True -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.pyc 3 | 4 | docs/build 5 | docs/make.bat 6 | docs/Makefile 7 | build/ 8 | dist/ 9 | heartpy.egg-info/ 10 | papers/ -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | 4 | # Build documentation in the docs/ directory with Sphinx 5 | sphinx: 6 | configuration: docs/conf.py 7 | 8 | python: 9 | setup_py_install: true 10 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "2.7" 5 | - "3.5" 6 | - "3.6" 7 | - "3.7" 8 | - "3.8" 9 | 10 | install: 11 | # Install the codecov pip dependency 12 | - pip install codecov 13 | # run install to get dependencies and heartpy installed 14 | - python setup.py install 15 | 16 | # Run the unit test 17 | script: 18 | - coverage run run_tests.py 19 | 20 | # Push the results back to codecov 21 | after_success: 22 | - codecov -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Paul van Gent 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HeartPy - Python Heart Rate Analysis Toolkit 2 | 3 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1324311.svg)](https://doi.org/10.5281/zenodo.1324311) [![Build Status](https://travis-ci.org/paulvangentcom/heartrate_analysis_python.svg?branch=master)](https://travis-ci.org/paulvangentcom/heartrate_analysis_python) [![codecov](https://codecov.io/gh/paulvangentcom/heartrate_analysis_python/branch/master/graph/badge.svg)](https://codecov.io/gh/paulvangentcom/heartrate_analysis_python) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/heartpy) 4 | 5 | 6 | **Like HeartPy? Don't forget to leave a star!** 7 | 8 | 9 | # Structural update 10 | 11 | HeartPy V1.2 has landed! The structure of the package has been reworked to be in separate modules now in preparation of the next big update, which will feature many analysis expansions and the first steps towards a GUI for HeartPy. HeartPy has been growing steadily and had reached the point where it became cluttered and unwieldy to keep in a single file. The API remains unchanged. 12 | 13 | An 'Examples' folder has been added to the repo which will be expanded soon. Now there's two notebooks explaining how to analyse ppg signals from smartwatches and smart rings. 14 | 15 | Colorblind support has been added, see [this notebook in the examples folder](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/6_colorblind_mode/Colorblind_mode.ipynb) 16 | 17 | # Installation 18 | ``` 19 | python setup.py install 20 | ``` 21 | 22 | Alternatively, we're also on PIP: 23 | ``` 24 | python -m pip install heartpy 25 | ``` 26 | 27 | That's it! Note that Github always has the newest version. 28 | 29 | # Documentation 30 | 31 | The official documentation is online! [You can find the official documentation here](https://python-heart-rate-analysis-toolkit.readthedocs.io) 32 | 33 | # Python 2.7 34 | The module compiles and and runs fine on Python 2.7, **but** the some unit tests fail. 35 | 36 | # Tutorial notebooks are now available in Examples/ 37 | These show how to handle various analysis tasks with HeartPy, from smartwatch data, smart ring data, regular PPG, and regular (and very noisy) ECG. The notebooks sometimes don't render through the github engine, so either open them locally, or use an online viewer like [nbviewer](https://nbviewer.jupyter.org/). 38 | 39 | We recommend you follow the notebooks in order: 40 | - [1. Analysing a PPG signal](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/1_regular_PPG/Analysing_a_PPG_signal.ipynb), a notebook for starting out with HeartPy using built-in examples. 41 | - [2. Analysing an ECG signal](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/2_regular_ECG/Analysing_a_regular_ECG_signal.ipynb), a notebook for working with HeartPy and typical ECG data. 42 | - [3. Analysing smartwatch data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/3_smartwatch_data/Analysing_Smartwatch_Data.ipynb), a notebook on analysing low resolution PPG data from a smartwatch. 43 | - [4. Analysing smart ring data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/4_smartring_data/Analysing_Smart_Ring_Data.ipynb), a notebook on analysing smart ring PPG data. 44 | - [5. Analysing noisy ECG data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/5_noisy_ECG/Analysing_Noisy_ECG.ipynb), an advanced notebook on working with very noisy ECG data, using data from the MIT-BIH noise stress test dataset. 45 | - [6. Colorblind mode - How To and Styles](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/6_colorblind_mode/Colorblind_mode.ipynb) 46 | 47 | 48 | 49 | # More information 50 | **HeartPy**, the **Python Heart Rate Analysis Toolkit** is a module for heart rate analysis in Python. It started as pure-python implementation to analyse physiological data taken in naturalistic driving and cycling experiments. 51 | 52 | The module takes a discrete heart rate signal and outputs time-domain and frequency-domain measures often found in scientific literature: 53 | 54 | 55 | Time domain: 56 | * beats per minute, BPM 57 | * interbeat interval, IBI 58 | * standard deviation if intervals between adjacent beats, SDNN 59 | * standard deviation of successive differences between adjacent R-R intervals, SDSD 60 | * root mean square of successive differences between adjacend R-R intervals, RMSSD 61 | * proportion of differences between R-R intervals greater than 20ms, 50ms, pNN20, pNN50 62 | * median absolute deviation, MAD 63 | * Poincare analysis (SD1, SD2, S, SD1/SD2) 64 | * Poincare plotting 65 | 66 | Frequency domain (ranges per Shaffer and Ginsberg: https://doi.org/10.3389/fpubh.2017.00258) 67 | * very low frequency component (0.0033–0.04 Hz), VLF 68 | * low frequency component (0.04–0.15 Hz), LF 69 | * high frequency component (0.15–0.4 Hz), HF 70 | * lf/hf ratio, LF/HF 71 | 72 | **When using the package in your research, please cite**: 73 | 74 | van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). Analysing Noisy Driver Physiology Real-Time Using Off-the-Shelf Sensors: Heart Rate Analysis Software from the Taking the Fast Lane Project. Journal of Open Research Software, 7(1), 32. DOI: http://doi.org/10.5334/jors.241 75 | 76 | van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate algorithm for the analysis of noisy signals. Transportation Research Part F: Traffic Psychology and Behaviour, 66, 368–378. https://doi.org/10.1016/j.trf.2019.09.015 77 | 78 | ## Documentation 79 | 80 | [You can find the official documentation here](https://python-heart-rate-analysis-toolkit.readthedocs.io) 81 | 82 | The module is also to some extent described in my tutorial series: 83 | 84 | * [Analyzing a Discrete Heart Rate Signal Using Python - Part 1](http://www.paulvangent.com/2016/03/15/analyzing-a-discrete-heart-rate-signal-using-python-part-1/) 85 | * [Analyzing a Discrete Heart Rate Signal Using Python - Part 2](http://www.paulvangent.com/2016/03/21/analyzing-a-discrete-heart-rate-signal-using-python-part-2/) 86 | * [Analyzing a Discrete Heart Rate Signal Using Python - Part 3](http://www.paulvangent.com/2016/03/30/analyzing-a-discrete-heart-rate-signal-using-python-part-3/) 87 | * Analyzing a Discrete Heart Rate Signal Using Python - Part 4: in development 88 | 89 | 90 | ## License 91 | The module is licensed under the [MIT License](https://opensource.org/licenses/MIT) 92 | 93 | ## Validation 94 | Initial results of the validation have been reported in [1, 2]. 95 | 96 | 97 | [1]van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2018). Heart Rate Analysis for Human Factors: Development and Validation of an Open Source Toolkit for Noisy Naturalistic Heart Rate Data. In Proceedings of the 6th HUMANIST Conference (pp. 173–178). 98 | 99 | [2] van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate algorithm for the analysis of noisy signals. Transportation Research Part F: Traffic Psychology and Behaviour, 66, 368–378. https://doi.org/10.1016/j.trf.2019.09.015 100 | 101 | 102 | 103 | ## To-do 104 | 105 | The module is still in active development. See the changelog for past changes. The to-do for the coming months is: 106 | 107 | to do before V1.3 108 | - [X] Same but for PPG - morphology too variable, method unstable 109 | - [ ] Add 'strictness parameter' to affect how HeartPy evaluates peaks for acceptance/rejection 110 | - [ ] Add method to handle NaN data automatically 111 | - [ ] clean_rr method now removes incorrect values, update to allow for replacement by median of surrounding data points 112 | - [ ] add method that can fill in missing R-peaks, settable to search for either local optimum OR mean imputation. 113 | - [ ] Report validation performance on repo (published paper + key-points document once published) 114 | - [ ] Change backend structure in anticipation of GUI development 115 | - [ ] Develop GUI for HeartPy 116 | -------------------------------------------------------------------------------- /changelog.log: -------------------------------------------------------------------------------- 1 | V0.8.1 2 | - Added changelog to repository 3 | - Implemented clipping detection and interpolation functionality 4 | - Changed FFT calculation flag to default False, as in some cases the FFT takes very long to compute. Possible causes and fixes to be investigated 5 | - Pushed readthedocs.io documentation source structure to repository 6 | - Added encoding argument to get_data function, per the NumPy deprecation of not using encoding. For more info: https://docs.scipy.org/doc/numpy-1.14.0/release.html#encoding-argument-for-text-io-functions 7 | 8 | V0.8.2 9 | - RR_difference interval no longer taken into account when RR-intervals are not technically adjacent due to rejected peak presence in between 10 | - Moved matplotlib import statement so that it is no longer necessary unless calling the plot functionality, reduces need to install irrelevant dependencies when plotting functionality not needed 11 | - Added Hampel Filter with settable filtersize 12 | - Added method to suppress noisy segments called 'Hampel Corrector', called such as it's simply a Hampel Filter with large window size. Computationally on the expensive side so disabled by default, but very good at suppressing noisy segments without influencing peak positions in the rest of the signal. 13 | - Added breathing rate extraction method. Stores estimated breathing rate in measures['breathingrate'] 14 | - Made BPM threshold values settable 15 | - Added Periodogram- and Welch-based PSD estimation 16 | - Added support for edge case where clipping segment starts early in signal, meaning there is insufficient data to interpolate accurately. 17 | 18 | V1.0 19 | - Released Version 1.0 20 | - Added flag to disable scaling when interpolating clipping segments. Useful for data with large amplitude variations. 21 | - Added marking of rejected segments in plotter 22 | - Added automatic peak rejection when first peak occurs within 150ms, since the signal might start just after a peak, which creates slight inaccuracy. 23 | - Added segment rejection based on percentage of incorrect peaks. 24 | 25 | V1.0.1 26 | - Changed segmentwise rejection API to simplify plotting 27 | 28 | V1.1 29 | - We are now officially called HeartPy 30 | - Changed overall structure to get rid of global dicts, allows for modular or multithreaded use easier. 31 | - Changed docs to reflect changes 32 | 33 | V1.1.1 34 | - Updated doctests that failed due to rounding errors introduced by switching from FFT to Welch's PSD computation method as default option. 35 | - Added exceptions.py to implement custom exception handling, also for future versions. 36 | - Added custom exception BadSignalWarning to handle situations where no discernible heart rate is present in signal, replacing the default nondescript "empty list valid_ma[]" error with a verbose message. 37 | - Fixed function "fit_peaks()" not inheriting bpmmin and bpmmax variables from "process()", as outlined by Yurasmol in issue #5. 38 | 39 | V1.1.2 40 | - Added high-pass and band-pass Butterworth filters 41 | - Fixed case where no peak-peak differences over 20ms in a signal caused an exception 42 | - Fixed case where intermittent noisy signals resulted in exception when calculating breathing rate 43 | - Added scale_sections() function that uses local scaling rather than global 44 | - Added preprocess_ecg(data, sample_rate) function that attempts to preprocess ecg data. Note: doubles sampling rate of returned data. 45 | - Added highpass and bandpass filtering options. Called through filtersignal function with argument filtertype= lowpass/highpass/bandpass. 46 | - Changed way peak fitting works by adding extra peak validation step between fitting phases 47 | 48 | V1.1.3 49 | - Added functions to allow for continous measure output 50 | - Added make_windows() function to divide input data into evenly sized segments with settable windowsize and settable overlap 51 | - Added two functions to remove outliers from continous set: outliers_modified_z(), and outliers_iqr_method(). Both take a list of numpy array of one continous measure and remove outliers due to incorrectly analysed sections, if any outliers are persent. 52 | 53 | V1.1.4 54 | - Added wrapper function 'process_segmentwise()' that splits hrdata in sections (overlap between sections is settable), and analyses each section separately. Returns two dict objects with all outputs. 55 | - Changed rolling mean function to no longer flatten off the more it is raised, turned out to be more robust. 56 | - Removed peak validation step implemented in V1.1.2 -> after further testing it turned out to be detrimental to many cases. 57 | - Updated docs to reflect the changes to the codebase. 58 | 59 | V1.1.5 60 | - Adapted `make_windows()` to accept tail end of data. Updated `process_segmentise()` to make use of this. 61 | - Updated docs explaining the new functionality 62 | - Fixed error where 'fast' segmentwise method returned single instance in dict rather than sequence 63 | - Fixed error where 'fast' segmentwise method returned empty working_data object 64 | - Started properly structuring module. 65 | 66 | V1.1.6 67 | - moved nn20/nn50 temp containers to 'working_data' dict in stead of output measures (see issue #15). 68 | - fixed wrongly unpacked kwargs in process_segmentwise 69 | - deprecated doctests.txt for now - no longer functional and need updating. 70 | - process_segmentwise() now returns the indices of the slices made in the original data, these are appended to both the returned measures{} and working_data{}. Closes #14 71 | - updated process_segmentwise to allow control over last (likely incomplete) segment. Setting min_size to -1 now puts the tail end of the data into the last bin, making it larger than the others. Closes #16 72 | - fixed sample_rate not being passed to rolmean() when esitmating the breathing rate 73 | 74 | V1.1.7 75 | - added peak interpolation (high precision mode) method 'interpolate_peaks' that allows more accurate estimation of peak positions in signal of low sampling frequency 76 | - in segmentwise processing, fixed bug where the interquartile-range was also used when modified z-score approach was requested. 77 | - fixed mistake in argument order in process_segmentwise function docstring 78 | - implemented 'segment_plotter()' function. This will plot segments and save plots to folder after running 'process_segmentwise()'. 79 | - updated docs to include new functionality. 80 | 81 | V1.1.7a 82 | - hotfix for process_segmentwise issue where multiple copies of the same index range were placed in the output. 83 | 84 | V1.2 85 | - Changed organisation HeartPy, it is now split into multiple modules to keep the growing library ordered. This opens the way to the planned addition of a GUI. 86 | - Added examples that also function as doctests to all functions 87 | - Added extensive documentation docstrings to all functions 88 | - Added function load_exampledata() that loads the available example data files directly from github. 89 | - Added several jupyter notebooks in Examples folder, illustrating how to work with different types of data. 90 | - Added function to reject outliers in RR-list and compute measures based on cleaned list. See: clean_rr_intervals() 91 | - Added build and code coverage badges and support 92 | 93 | V1.2.4 94 | - Added colorblind mode 95 | - Added convolutional pipeline to filter very noisy ECG without distorting the QRS-complex 96 | - Added Savitzky-Golay signal smoothing filter (heartpy.smooth_signal). 97 | - Added baseline wander filter (heartpy.remove_baseline_wander()) based on a Notch filter. 98 | - Added Poincare analysis (written to measures{} dict) and plotting capabitity (heartpy.plot_poincare(working_data, measures) 99 | - API is now generated on the docs automatically. 100 | - Improved breathing rate estimation 101 | 102 | V1.2.5 103 | -------------------------------------------------------------------------------- /docs/algorithmfunctioning.rst: -------------------------------------------------------------------------------- 1 | .. _algorithm functioning: 2 | 3 | ********************* 4 | Algorithm functioning 5 | ********************* 6 | 7 | This section describes the details of the algorithm functionality. 8 | 9 | Pre-processing 10 | ============== 11 | Various options are available for pre-processing. These are described below 12 | 13 | 14 | Clipping detection and interpolation 15 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 16 | Whenever a measured property exceeds a sensor's sensitivity range, or when digitising an analog signal, clipping can occur. Clipping in this case means the peaks are flattened off because the signal continues outside the boundaries of the sensor you're using: 17 | 18 | .. image:: images/clipping.jpg 19 | :height: 300px 20 | :width: 300px 21 | :align: center 22 | 23 | Clipping functions by detecting (almost) flat parts of the signal near its maximum, preceded and followed by a steep angle on both ends. The 'missing' signal peak is interpolated using a cubic spline, which takes into account 100ms of data on both ends of the clipping portion of the signal. The reconstructed R-peak is overlaid on the original signal and used for further analysis. 24 | 25 | .. image:: images/clipping_correct_vertical.jpg 26 | :align: center 27 | 28 | 29 | Peak enhancement 30 | ~~~~~~~~~~~~~~~~ 31 | A peak enhancement function is available that attempts to normalise the amplitude, then increase R-peak amplitude relative to the rest of the signal. It only uses linear transformations, meaning absolute peak positions are not disturbed (in contrast to FIR filters). It runs a predefined number of iterations. Generally two iterations are sufficient. Be cautious not to over-iterate as this will start to suppress peaks of interest as well. 32 | 33 | .. code-block:: python 34 | 35 | import heartpy as hp 36 | 37 | enhanced = hp.enhance_peaks(data, iterations=2) 38 | 39 | .. image:: images/peaknorm.jpeg 40 | 41 | 42 | Butterworth filter 43 | ~~~~~~~~~~~~~~~~~~ 44 | A Butterworth filter implementation is available to remove high frequency noise. Note that this will disturb the absolute peak positions slightly, influencing the output measures. However, in cases of heavy noise this is the only way useful information can be extracted from the signal. 45 | 46 | .. code-block:: python 47 | 48 | import heartpy as hp 49 | 50 | filtered = hp.butter_lowpass_filter(data, cutoff=5, sample_rate=100.0, order=3) 51 | 52 | .. image:: images/butterworth.jpeg 53 | 54 | Filtering is generally not recommended unless there is high noise present in the signal. An extreme example is displayed below: 55 | 56 | .. image:: images/highnoise.png 57 | 58 | 59 | Hampel Correction 60 | ~~~~~~~~~~~~~~~~~ 61 | The Hampel Correction functions as an extended version of a Hampel Filter, with a larger window size than the standard datapoint + 3 datapoints on each side (=7). The downside is that it (the current implementation at least) takes significant processing time since a window median and median absolute deviation needs to be computed for each datapoint. 62 | 63 | In the current implementation, if called, a median filter is taken over a 1-sec window of the heart rate signal. The filter output is subsequently subtracted from the original signal. When doing so, the property of noise suppression arises: 64 | 65 | .. image:: images/hampelcorrect.jpg 66 | 67 | Note that the absolute peak positions will shift slightly when using this type of filter. With it, the output measures will start to deviate as error is induced. Generally the error is not high, but by default hampel filtering is disabled. It should only be used when encountering segments of heavy noise that the algorithm cannot handle properly. 68 | 69 | 70 | Peak detection 71 | ============== 72 | The peak detection phase attempts to accommodate amplitude variation and morphology changes of the PPG complexes by using an adaptive peak detection threshold (Fig 3, III), followed by several steps of outlier detection and rejection. To identify heartbeats, a moving average is calculated using a window of 0.75 seconds on both sides of each data point. The first and last 0.75 seconds of the signal are populated with the signal’s mean, no moving average is generated for these sections. Regions of interest (ROI) are marked between two points of intersection where the signal amplitude is larger than the moving average (Fig 3, I-II), which is a standard way of detecting peaks. R-peaks are marked at the maximum of each ROI. 73 | 74 | .. image:: images/fitresultsimg.jpg 75 | 76 | *Figure showing the process of peak extraction. A moving average is used as an intersection threshold (II). Candidate peaks are marked at the maximum between intersections (III). The moving average is adjusted stepwise to compensate for varying PPG waveform morphology (I).* 77 | 78 | During the peak detection phase, the algorithm adjusts the amplitude of the calculated threshold stepwise. To find the best fit, the standard deviation between successive differences (SDSD, see also 2.2) is minimised and the signal’s BPM is checked. This represents a fast method of approximating the optimal threshold by exploiting the relative regularity of the heart rate signal. As shown in the figure below, missing one R-peak (III.) already leads to a substantial increase in SDSD compared to the optimal fit (II.). Marking incorrect R-peaks also leads to an increase in SDSD (I.). The lowest SDSD value that is not zero, in combination with a likely BPM value, is selected as the best fit. The BPM must lie within a predetermined range (default: 40 <= BPM <= 180, range settable by user). 79 | 80 | The figure below displays how the SDSD relates to peak fitting. In essence the fitting function exploits the strong regularity expected in the heart rate signal. 81 | 82 | .. image:: images/Figure_PeakDetection2.jpeg 83 | 84 | *Figure showing how the SDSD responds strongly even to a single missed beat (bottom plot), and is lowest when all peaks are properly detected (middle plot).* 85 | 86 | Whenever clipping occurs, the algorithm detects this and will attempt to reconstruct the waveform by spline interpolation. This is discussed under `Clipping detection and interpolation`_ 87 | 88 | An optional 'high precision mode' is available that takes the signal surrounding each detected peak (+/- 100ms on both ends), and upsamples it to simulate a higher sampling rate for more accurate peak position estimation. By default it upsamples to 1000Hz to provide ms-accurate peak position estimations. 89 | 90 | 91 | Peak rejection 92 | ============== 93 | After the fitting phase, several incorrectly detected peaks may still remain due to various factors. These are tested and rejected based on a thresholded value for the RR-intervals in the section: 94 | 95 | .. image:: images/peakthresholding.jpeg 96 | 97 | Thresholds are computed based on the mean of the RR-intervals in the segments. Thresholds are determined as **RR_mean +/- (30% of RR_mean, with minimum value of 300)** (+ or - for upper and lower threshold, respectively). If the RR-interval exceeds one of the thresholds, it is ignored. 98 | 99 | 100 | Calculation of measures 101 | ======================= 102 | All measures are computed on the detected and accepted peaks in the segment. When RR-intervals are used in computation, only the intervals created by two adjacent, accepted, peaks are used. Whenever differences in RR-intervals are required (for example in the RMSSD), only intervals between two adjacens RR-intervals, which in turn are created by three adjacent, accepted, peaks are used. This ensures that any rejected peaks do not inject measurement error in the subsequent measure calculations. 103 | 104 | Time-series 105 | ~~~~~~~~~~~ 106 | Time series measurements are computed from detected peaks. The output measures are: 107 | 108 | - beats per minute (BPM) 109 | - interbeat interval (IBI) 110 | - standard deviation of RR intervals (SDNN) 111 | - standard deviation of successive differences (SDSD) 112 | - root mean square of successive differences (RMSSD) 113 | - proportion of successive differences above 20ms (pNN20) 114 | - proportion of successive differences above 50ms (pNN50) 115 | - median absolute deviation of RR intervals (MAD) 116 | 117 | 118 | Frequency Domain 119 | ~~~~~~~~~~~~~~~~ 120 | Frequency domain measures computed are: 121 | 122 | - low-frequency, frequency spectrum between 0.05-0.15Hz (LF) 123 | - high-frequency, frequency spectrum between 0.15-0.5Hz (HF) 124 | - the ration high frequency / low frequency (HF/LF) 125 | 126 | The measures are computed from the PSD (Power Spectral Density), which itself is estimated using either FFT-based, Periodogram-based, or Welch-based methods. The default is Welch's method. 127 | 128 | Estimating breathing rate 129 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 130 | One interesting property of the heart is that the frequency with which it beats is strongly influenced by breathing, through the autonomous nervous system. It is one of the reasons why deep breaths can calm nerves. We can also exploit this relationship to extract breathing rate from a segment of heart rate data. For example, using a dataset from [1]_ which contains both CO2 capnometry signals as well as PPG signals, we can see the relationship between breathing and the RR-intervals clearly. Below are plotted the CO2 capnometry signal (breathing signal measured at the nose), as well as the (upsampled) signal created by the RR-intervals: 131 | 132 | .. image:: images/CO2_RRbreath.jpg 133 | :align: center 134 | 135 | The problem is now reduced to one of frequency domain transformation. Breathing rate can be extracted using the toolkit. After calling the 'process' function, breathing rate (in Hz) is available in the dict{} object that is returned. 136 | 137 | .. code-block:: python 138 | 139 | import heartpy as hp 140 | 141 | data = hp.get_data('data.csv') 142 | fs = 100.0 143 | working_data, measures = hp.process(data, fs, report_time=True) 144 | print('breathing rate is: %s Hz' %measures['breathingrate']) 145 | 146 | This will result in: 147 | 148 | .. code-block:: python 149 | 150 | breathing rate is: 0.16109544905356424 Hz 151 | 152 | 153 | 154 | 155 | 156 | References 157 | ========== 158 | 159 | .. [1] W. Karlen, S. Raman, J. M. Ansermino, and G. A. Dumont, “Multiparameter respiratory rate estimation from the photoplethysmogram,” IEEE transactions on bio-medical engineering, vol. 60, no. 7, pp. 1946–53, 2013. DOI: 10.1109/TBME.2013.2246160 PMED: http://www.ncbi.nlm.nih.gov/pubmed/23399950 -------------------------------------------------------------------------------- /docs/apiref.rst: -------------------------------------------------------------------------------- 1 | ************* 2 | API Reference 3 | ************* 4 | 5 | .. toctree:: 6 | :maxdepth: 3 7 | :caption: . 8 | 9 | heartpy.heartpy 10 | 11 | heartpy.analysis 12 | heartpy.datautils 13 | heartpy.exceptions 14 | heartpy.filtering 15 | heartpy.peakdetection 16 | heartpy.preprocessing 17 | heartpy.visualizeutils -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | import sys 17 | sys.path.insert(0, os.path.abspath('..')) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = 'Python Heart Rate Analysis Toolkit' 23 | copyright = '2018, Paul van Gent' 24 | author = 'Paul van Gent' 25 | 26 | # The short X.Y version 27 | version = '1.2.5' 28 | # The full version, including alpha/beta/rc tags 29 | release = '1.2.5' 30 | 31 | 32 | # -- General configuration --------------------------------------------------- 33 | 34 | # If your documentation needs a minimal Sphinx version, state it here. 35 | # 36 | # needs_sphinx = '1.0' 37 | 38 | # Add any Sphinx extension module names here, as strings. They can be 39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 | # ones. 41 | extensions = [ 42 | 'sphinx.ext.autodoc', 43 | 'sphinx.ext.napoleon', 44 | 'sphinx.ext.viewcode', 45 | ] 46 | 47 | # Napoleon settings 48 | napoleon_numpy_docstring = True 49 | napoleon_include_init_with_doc = False 50 | napoleon_include_private_with_doc = False 51 | napoleon_include_special_with_doc = True 52 | napoleon_use_admonition_for_examples = True 53 | napoleon_use_admonition_for_notes = False 54 | napoleon_use_admonition_for_references = True 55 | napoleon_use_ivar = False 56 | napoleon_use_param = True 57 | napoleon_use_rtype = True 58 | 59 | # Add any paths that contain templates here, relative to this directory. 60 | templates_path = ['_templates'] 61 | 62 | # The suffix(es) of source filenames. 63 | # You can specify multiple suffix as a list of string: 64 | # 65 | # source_suffix = ['.rst', '.md'] 66 | source_suffix = '.rst' 67 | 68 | # The master toctree document. 69 | master_doc = 'index' 70 | 71 | # The language for content autogenerated by Sphinx. Refer to documentation 72 | # for a list of supported languages. 73 | # 74 | # This is also used if you do content translation via gettext catalogs. 75 | # Usually you set "language" from the command line for these cases. 76 | language = None 77 | 78 | # List of patterns, relative to source directory, that match files and 79 | # directories to ignore when looking for source files. 80 | # This pattern also affects html_static_path and html_extra_path . 81 | exclude_patterns = [] 82 | 83 | # The name of the Pygments (syntax highlighting) style to use. 84 | pygments_style = 'sphinx' 85 | 86 | 87 | # -- Options for HTML output ------------------------------------------------- 88 | 89 | # The theme to use for HTML and HTML Help pages. See the documentation for 90 | # a list of builtin themes. 91 | # 92 | html_theme = 'sphinx_rtd_theme' 93 | 94 | # Theme options are theme-specific and customize the look and feel of a theme 95 | # further. For a list of options available for each theme, see the 96 | # documentation. 97 | # 98 | # html_theme_options = {} 99 | 100 | # Add any paths that contain custom static files (such as style sheets) here, 101 | # relative to this directory. They are copied after the builtin static files, 102 | # so a file named "default.css" will overwrite the builtin "default.css". 103 | html_static_path = ['_static'] 104 | 105 | # Custom sidebar templates, must be a dictionary that maps document names 106 | # to template names. 107 | # 108 | # The default sidebars (for documents that don't match any pattern) are 109 | # defined by theme itself. Builtin themes are using these templates by 110 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 111 | # 'searchbox.html']``. 112 | # 113 | html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']} 114 | 115 | 116 | # -- Options for HTMLHelp output --------------------------------------------- 117 | 118 | # Output file base name for HTML help builder. 119 | htmlhelp_basename = 'PythonHeartRateAnalysisToolkitdoc' 120 | 121 | 122 | # -- Options for LaTeX output ------------------------------------------------ 123 | 124 | latex_elements = { 125 | # The paper size ('letterpaper' or 'a4paper'). 126 | # 127 | # 'papersize': 'letterpaper', 128 | 129 | # The font size ('10pt', '11pt' or '12pt'). 130 | # 131 | # 'pointsize': '10pt', 132 | 133 | # Additional stuff for the LaTeX preamble. 134 | # 135 | # 'preamble': '', 136 | 137 | # Latex figure (float) alignment 138 | # 139 | # 'figure_align': 'htbp', 140 | } 141 | 142 | # Grouping the document tree into LaTeX files. List of tuples 143 | # (source start file, target name, title, 144 | # author, documentclass [howto, manual, or own class]). 145 | latex_documents = [ 146 | (master_doc, 'PythonHeartRateAnalysisToolkit.tex', 'Python Heart Rate Analysis Toolkit Documentation', 147 | 'Paul van Gent', 'manual'), 148 | ] 149 | 150 | 151 | # -- Options for manual page output ------------------------------------------ 152 | 153 | # One entry per manual page. List of tuples 154 | # (source start file, name, description, authors, manual section). 155 | man_pages = [ 156 | (master_doc, 'pythonheartrateanalysistoolkit', 'Python Heart Rate Analysis Toolkit Documentation', 157 | [author], 1) 158 | ] 159 | 160 | 161 | # -- Options for Texinfo output ---------------------------------------------- 162 | 163 | # Grouping the document tree into Texinfo files. List of tuples 164 | # (source start file, target name, title, author, 165 | # dir menu entry, description, category) 166 | texinfo_documents = [ 167 | (master_doc, 'PythonHeartRateAnalysisToolkit', 'Python Heart Rate Analysis Toolkit Documentation', 168 | author, 'PythonHeartRateAnalysisToolkit', 'One line description of project.', 169 | 'Miscellaneous'), 170 | ] 171 | 172 | 173 | # -- Extension configuration ------------------------------------------------- -------------------------------------------------------------------------------- /docs/development.rst: -------------------------------------------------------------------------------- 1 | *********** 2 | Development 3 | *********** 4 | 5 | Release Notes 6 | ============= 7 | 8 | V0.8.1 9 | ~~~~~~ 10 | 11 | - Added changelog to repository 12 | - Implemented clipping detection and interpolation functionality 13 | - Changed FFT calculation flag to default False, as in some cases the FFT takes very long to compute. Possible causes and fixes to be investigated 14 | - Pushed readthedocs.io documentation source structure to repository 15 | - Added encoding argument to get_data function, per the NumPy deprecation of not using encoding. For more info: https://docs.scipy.org/doc/numpy-1.14.0/release.html#encoding-argument-for-text-io-functions 16 | 17 | V0.8.2 18 | ~~~~~~ 19 | 20 | - RR_difference interval no longer taken into account when RR-intervals are not technically adjacent due to rejected peak presence in between 21 | - Moved matplotlib import statement so that it is no longer necessary unless calling the plot functionality, reduces need to install irrelevant dependencies when plotting functionality not needed 22 | - Added Hampel Filter with settable filtersize 23 | - Added method to suppress noisy segments called 'Hampel Corrector', called such as it's simply a Hampel Filter with large window size. Computationally on the expensive side so disabled by default, but very good at suppressing noisy segments without influencing peak positions in the rest of the signal. 24 | - Added breathing rate extraction method. Stores estimated breathing rate in measures['breathingrate'] 25 | - Made BPM threshold values settable 26 | - Added Periodogram- and Welch-based PSD estimation 27 | - Added support for edge case where clipping segment starts early in signal, meaning there is insufficient data to interpolate accurately. 28 | 29 | V1.0 30 | ~~~~ 31 | - Released Version 1.0 32 | - Added flag to disable scaling when interpolating clipping segments. Useful for data with large amplitude variations. 33 | - Added marking of rejected segments in plotter 34 | - Added automatic peak rejection when first peak occurs within 150ms, since the signal might start just after a peak, which creates slight inaccuracy. 35 | - Added segment rejection based on percentage of incorrect peaks. 36 | 37 | V1.0.1 38 | ~~~~~~ 39 | - Changed segmentwise rejection API to simplify plotting 40 | 41 | V1.1 42 | ~~~~ 43 | - We are now officially called HeartPy 44 | - Changed overall structure to get rid of global dicts, allows for modular or multithreaded use easier. 45 | - Changed docs to reflect changes 46 | 47 | V1.1.2 48 | ~~~~~~ 49 | - Added high-pass and band-pass Butterworth filters 50 | - Fixed case where no peak-peak differences over 20ms in a signal caused an exception 51 | - Fixed case where intermittent noisy signals resulted in exception when calculating breathing rate 52 | - Added scale_sections() function that uses local scaling rather than global 53 | - Added preprocess_ecg(data, sample_rate) function that attempts to preprocess ecg data. Note: doubles sampling rate of returned data. 54 | - Added highpass and bandpass filtering options. Called through filtersignal function with argument filtertype= lowpass/highpass/bandpass. 55 | - Changed way peak fitting works by adding extra peak validation step between fitting phases 56 | 57 | V1.1.3 58 | ~~~~~~ 59 | - Added functions to allow for continous measure output 60 | - Added make_windows() function to divide input data into evenly sized segments with settable windowsize and settable overlap 61 | - Added two functions to remove outliers from continous set: outliers_modified_z(), and outliers_iqr_method(). Both take a list of numpy array of one continous measure and remove outliers due to incorrectly analysed sections, if any outliers are persent. 62 | 63 | V1.1.4 64 | ~~~~~~ 65 | - Added wrapper function 'process_segmentwise()' that splits hrdata in sections (overlap between sections is settable), and analyses each section separately. Returns two dict objects with all outputs. 66 | - Changed rolling mean function to no longer flatten off the more it is raised, turned out to be more robust. 67 | - Removed peak validation step implemented in V1.1.2 -> after further testing it turned out to be detrimental to many cases. 68 | - Updated docs to reflect the changes to the codebase. 69 | 70 | V1.1.5 71 | ~~~~~~ 72 | - Adapted `make_windows()` to accept tail end of data. Updated `process_segmentise()` to make use of this. 73 | - Updated docs explaining the new functionality 74 | - Fixed error where 'fast' segmentwise method returned single instance in dict rather than sequence 75 | - Fixed error where 'fast' segmentwise method returned empty working_data object 76 | - Started properly structuring module. 77 | 78 | V1.1.6 79 | ~~~~~~ 80 | - moved nn20/nn50 temp containers to 'working_data' dict in stead of output measures (see issue #15). 81 | - fixed wrongly unpacked kwargs in process_segmentwise 82 | - deprecated doctests.txt for now - no longer functional and need updating. 83 | - process_segmentwise() now returns the indices of the slices made in the original data, these are appended to both the returned measures{} and working_data{}. Closes #14 84 | - updated process_segmentwise to allow control over last (likely incomplete) segment. Setting min_size to -1 now puts the tail end of the data into the last bin, making it larger than the others. Closes #16 85 | - fixed sample_rate not being passed to rolmean() when esitmating the breathing rate 86 | 87 | V1.1.7 88 | ~~~~~~ 89 | - added peak interpolation (high precision mode) method 'interpolate_peaks' that allows more accurate estimation of peak positions in signal of low sampling frequency 90 | - in segmentwise processing, fixed bug where the interquartile-range was also used when modified z-score approach was requested. 91 | - fixed mistake in argument order in process_segmentwise function docstring 92 | - implemented 'segment_plotter()' function. This will plot segments and save plots to folder after running 'process_segmentwise()'. 93 | - updated docs to include new functionality. 94 | 95 | V1.1.7a 96 | ~~~~~~~ 97 | - hotfix for process_segmentwise issue where multiple copies of the same index range were placed in the output. 98 | 99 | V1.2 100 | ~~~~ 101 | - Changed organisation HeartPy, it is now split into multiple modules to keep the growing library ordered. This opens the way to the planned addition of a GUI. 102 | - Added examples that also function as doctests to all functions 103 | - Added extensive documentation docstrings to all functions 104 | - Added function load_exampledata() that loads the available example data files directly from github. 105 | - Added several jupyter notebooks in Examples folder, illustrating how to work with different types of data. 106 | - Added function to reject outliers in RR-list and compute measures based on cleaned list. See: clean_rr_intervals() 107 | 108 | 109 | Questions 110 | ========= 111 | contact me at P.vanGent@tudelft.nl -------------------------------------------------------------------------------- /docs/documents/HumanistPaper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/documents/HumanistPaper.pdf -------------------------------------------------------------------------------- /docs/heartpy.analysis.rst: -------------------------------------------------------------------------------- 1 | analysis 2 | ======== 3 | 4 | .. automodule:: heartpy.analysis 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/heartpy.datautils.rst: -------------------------------------------------------------------------------- 1 | datautils 2 | ========= 3 | 4 | .. automodule:: heartpy.datautils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/heartpy.exceptions.rst: -------------------------------------------------------------------------------- 1 | exceptions 2 | ========== 3 | 4 | .. automodule:: heartpy.exceptions 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/heartpy.filtering.rst: -------------------------------------------------------------------------------- 1 | Filtering 2 | ========= 3 | 4 | .. automodule:: heartpy.filtering 5 | :members: 6 | :undoc-members: 7 | :exclude-members: Intermediate -------------------------------------------------------------------------------- /docs/heartpy.heartpy.rst: -------------------------------------------------------------------------------- 1 | heartpy (main) 2 | ============== 3 | 4 | Main functions 5 | ~~~~~~~~~~~~~~ 6 | 7 | .. autofunction:: heartpy.process 8 | 9 | .. autofunction:: heartpy.process_segmentwise 10 | 11 | Visualisation 12 | ~~~~~~~~~~~~~ 13 | 14 | .. autofunction:: heartpy.plotter 15 | 16 | .. autofunction:: heartpy.segment_plotter 17 | 18 | Preprocessing functions 19 | ~~~~~~~~~~~~~~~~~~~~~~~ 20 | 21 | .. autofunction:: heartpy.enhance_peaks 22 | 23 | .. autofunction:: heartpy.enhance_ecg_peaks 24 | 25 | .. autofunction:: heartpy.flip_signal 26 | 27 | .. autofunction:: heartpy.remove_baseline_wander 28 | 29 | .. autofunction:: heartpy.scale_data 30 | 31 | .. autofunction:: heartpy.scale_sections 32 | 33 | Utilities 34 | ~~~~~~~~~ 35 | 36 | .. autofunction:: heartpy.get_data 37 | 38 | .. autofunction:: heartpy.load_exampledata 39 | 40 | .. autofunction:: heartpy.get_samplerate_mstimer 41 | 42 | .. autofunction:: heartpy.get_samplerate_datetime 43 | 44 | Filtering 45 | ~~~~~~~~~ 46 | 47 | .. autofunction:: heartpy.filter_signal 48 | 49 | .. autofunction:: heartpy.hampel_filter 50 | 51 | .. autofunction:: heartpy.hampel_correcter -------------------------------------------------------------------------------- /docs/heartpy.peakdetection.rst: -------------------------------------------------------------------------------- 1 | Peakdetection 2 | ============= 3 | 4 | .. automodule:: heartpy.peakdetection 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/heartpy.preprocessing.rst: -------------------------------------------------------------------------------- 1 | Preprocessing 2 | ============= 3 | 4 | .. automodule:: heartpy.preprocessing 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/heartpy.rst: -------------------------------------------------------------------------------- 1 | heartpy package 2 | =============== 3 | 4 | Submodules 5 | ---------- 6 | 7 | .. toctree:: 8 | 9 | heartpy.analysis 10 | heartpy.datautils 11 | heartpy.exceptions 12 | heartpy.filtering 13 | heartpy.heartpy 14 | heartpy.peakdetection 15 | heartpy.preprocessing 16 | heartpy.visualizeutils 17 | 18 | Module contents 19 | --------------- 20 | 21 | .. automodule:: heartpy 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | -------------------------------------------------------------------------------- /docs/heartpy.visualizeutils.rst: -------------------------------------------------------------------------------- 1 | visualizeutils 2 | ============== 3 | 4 | .. automodule:: heartpy.visualizeutils 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/heartrateanalysis.rst: -------------------------------------------------------------------------------- 1 | .. _heart rate analysis: 2 | 3 | ******************* 4 | Heart Rate Analysis 5 | ******************* 6 | 7 | A complete description of the algorithm can be found in: . 8 | 9 | 10 | Background 11 | ========== 12 | 13 | The Python Heart Rate Analysis Toolkit has been designed mainly with PPG signals in mind. The Raspberry Pi and the Arduino platforms have enabled more diverse data collection methods by providing affordable open hardware platforms. This is great for researchers, especially because traditional ECG may be considered to invasive or too disruptive for experiments. 14 | 15 | 16 | Measuring the heart rate signal 17 | =============================== 18 | 19 | Two often used ways of measuring the heart rate are the electrocardiogram (ECG) and the Photoplethysmogram (PPG). Many of the online available algorithms are designed for ECG measurements. Applying an ECG algorithm (like the famous Pan-Tompkins one [1]_) to PPG data does not necessarily make sense. Although both the ECG and PPG are measures for cardiac activity, they measure very different constructs to estimate it. 20 | 21 | The ECG measures the electrical activations that lead to the contraction of the heart muscle, using electrodes attached to the body, usually at the chest. The PPG uses a small optical sensor in conjunction with a light source to measure the discoloration of the skin as blood perfuses through it after each heartbeat. This measuring of electrical activation and pressure waves respectively, leads to very different signal and noise properties, that require specialised tools to process. This toolkit specialises in PPG data. 22 | 23 | | 24 | 25 | .. image:: images/ppg_ecg.jpg 26 | 27 | *Figure 1: a. and b. display the ECG and PPG waveform morphology, respectively. The ECG is divided into distinct waves (a, I-V), of which the R-wave (a, II) is used for heart beat extraction. With the PPG wave, the systolic peak (b, I) is used. The plot in c. shows the relationship between ECG and PPG signals.* 28 | 29 | Most notably in the ECG is the QRS-complex (Fig 1a, I-III), which represents the electrical activation that leads to the ventricles contracting and expelling blood from the heart muscle. The R-peak is the point of largest amplitude in the signal. When extracting heart beats, these peaks are marked in the ECG. Advantages of the ECG are that it provides a good signal/noise ratio, and the R-peak that is of interest generally has a large amplitude compared to the surrounding data points (Fig 1c). The main disadvantage is that the measurement of the ECG is invasive. It requires the attachment of wired electrodes to the chest of the participant, which can interfere with experimental tasks such as driving. 30 | 31 | The PPG measures the discoloration of the skin as blood perfuses through the capillaries and arteries after each heartbeat. The signal consists of the systolic peak (Fig 1-b, I), dicrotic notch (II), and the diastolic peak (III). When extracting heart beats, the systolic peaks (I) are used. PPG sensors offer a less invasive way of measuring heart rate data, which is one of their main advantages. Usually the sensors are placed at the fingertip, earlobe, or on the wrist using a bracelet. Contactless camera-based systems have recently been demonstrated [2]_, [3]_, [4]_. These offer non-intrusive ways of acquiring the PPG signal. PPG signals have the disadvantages of showing more noise, large amplitude variations, and the morphology of the peaks displays broader variation (Figure 2b, c). This complicates analysis of the signal, especially when using software designed for ECG, which the available open source tools generally are. 32 | 33 | .. image:: images/ECG_PPG_Comparison.jpg 34 | 35 | *Figure 2 – The ECG signal (a.) shows a strong QRS complex together with little amplitude variation. The PPG signal measured simultaneously while the patient is at rest in a hospital bed (b.) shows some amplitude variation but relatively stable morphology. When measuring PPG in a driving simulator using low-cost sensors (c.), strong amplitude and waveform morphology variation is visible.* 36 | 37 | | 38 | 39 | 40 | On the Accuracy of Peak Position 41 | ================================ 42 | When analysing heart rate, the main crux lies in the accuracy of the peak position labeling being used. When extracting instantaneous heart rate (BPM), accurate peak placement is not crucial. The BPM is an aggregate measure, which is calculated as the average beat-beat interval across the entire analysed signal (segment). This makes it quite robust to outliers. 43 | 44 | However, when extracting heart rate variability (HRV) measures, the peak positions are crucial. Take as an example two often used variability measures, the RMSSD (root mean square of successive differences) and the SDSD (standard deviation of successive differences). Given a segment of heart rate data as displayed in the figure below, the RMSSD is calculated as shown. The SDSD is the standard deviation between successive differences. 45 | 46 | .. image:: images/peakdetection_rmssd.jpg 47 | :align: center 48 | 49 | *Figure 3 - Image displaying the desired peak detection result, as well as the calculation of the RMSSD measure. The SDSD measure is the standard deviation between successive differences* 50 | 51 | | 52 | 53 | Now consider that two mistakes are possible: either a beat is not detected at all (missed), or a beat is placed at an incorrect time position (incorrectly placed). These will have an effect on the calculated HRV output measures, which are highly sensitive to outliers as they are designed to capture the slight natural variation between peak-peak intervals in the heart rate signal! 54 | 55 | To illustrate the problem we have run a few simulations. We took a sample of a heart rate signal which was annotated manually, and introduced two types of errors: 56 | 57 | - We randomly dropped n% of peaks from the signal, than re-ran the analysis considering only intervals between two peaks where no missing value occurred in between. 58 | - We introduced a random position error (0.1% - 10% of peak position, meaning between about 1ms and 100ms deviation) in n% of peaks. 59 | - The simulation ran bootstrapped for 10,000 iterations, with values n=[5, 10, 20]. 60 | 61 | Results show that the effect of incorrect beat placements **far outweigh** those of missing values. As described earlier, the instantaneous heart rate (BPM) is not sensitive to outliers, as is shown in the plots as well, where almost no discernible deviation is visible. 62 | 63 | .. image:: images/bootstrapped_errors.jpg 64 | :align: center 65 | 66 | *Figure 4 - Results for manually anotated measures (ground truth), and error induction of n% missed beats, as well as error induction on the detected position of n% beats (random error 0.1% - 10%, or 1-100ms).* 67 | 68 | 69 | Take into consideration that the scale for RMSSD doesn't typically exceed +/- 130, SDSD doesn't differ by much. This means that even a few incorrectly detected peaks are already introducing large measurement errors into the output variables. The algorithm described here is specifically designed to handle noisy PPG data from cheap sensors. The main design criteria was to minimise the number of incorrectly placed peaks as to minimise the error introduced into the output measures. 70 | 71 | More information on the functioning can be found in the rest of the documentation, as well as in the technical paper here [6]_. Information on the valiation can be found in [5]_. 72 | 73 | 74 | References 75 | ========== 76 | 77 | .. [1] Pan, J., & Tompkins, W. J. A simple real-time QRS detection algorithm. IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, BME-32(3), 230–236, 1985. https://doi.org/10.1109/IEMBS.1996.647473 78 | 79 | .. [2] Y. Sun, S. Hu, V. Azorin-Peris, R. Kalawsky, and S. Greenwald, “Noncontact imaging photoplethysmography to effectively access pulse rate variability,” J. Biomed. Opt., vol. 18, no. 6, p. 61205, 2012. 80 | 81 | .. [3] M. Lewandowska, J. Ruminsky, T. Kocejko, and J. Nowak, “Measuring Pulse Rate with a Webcam - a Non-contact Method for Evaluating Cardiac Activity,” in Proceedings of the Federated Conference on Computer Science and Information Systems, 2011, no. January, pp. 405–410. 82 | 83 | .. [4] F. Bousefsaf, C. Maaoui, and a. Pruski, “Remote detection of mental workload changes using cardiac parameters assessed with a low-cost webcam,” Comput. Biol. Med., vol. 53, pp. 1–10, 2014. 84 | 85 | .. [5] van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2018). “Heart Rate Analysis for Human Factors: Development and Validation of an Open Source Toolkit for Noisy Naturalistic Heart Rate Data.“ In proceedings of the Humanist 2018 conference, 2018, pp.173-17 86 | 87 | .. [6] van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2018). Analysing Noisy Driver Physiology Real-Time Using Off-the-Shelf Sensors: Heart rate analysis software from the Taking the Fast Lane Project. http://doi.org/10.13140/RG.2.2.24895.56485 88 | 89 | -------------------------------------------------------------------------------- /docs/images/CO2_RRbreath.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/CO2_RRbreath.jpg -------------------------------------------------------------------------------- /docs/images/ECG_PPG_Comparison.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/ECG_PPG_Comparison.jpg -------------------------------------------------------------------------------- /docs/images/Figure_PeakDetection.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/Figure_PeakDetection.jpeg -------------------------------------------------------------------------------- /docs/images/Figure_PeakDetection2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/Figure_PeakDetection2.jpeg -------------------------------------------------------------------------------- /docs/images/bootstrapped_errors.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/bootstrapped_errors.jpg -------------------------------------------------------------------------------- /docs/images/bootstrapped_errors.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/bootstrapped_errors.psd -------------------------------------------------------------------------------- /docs/images/butterworth.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/butterworth.jpeg -------------------------------------------------------------------------------- /docs/images/clipping.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/clipping.jpg -------------------------------------------------------------------------------- /docs/images/clipping_correct_vertical.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/clipping_correct_vertical.jpg -------------------------------------------------------------------------------- /docs/images/fitresultsimg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/fitresultsimg.jpg -------------------------------------------------------------------------------- /docs/images/hampelcorrect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/hampelcorrect.jpg -------------------------------------------------------------------------------- /docs/images/highnoise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/highnoise.png -------------------------------------------------------------------------------- /docs/images/output1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/output1.jpeg -------------------------------------------------------------------------------- /docs/images/output2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/output2.jpeg -------------------------------------------------------------------------------- /docs/images/output_3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/output_3.jpeg -------------------------------------------------------------------------------- /docs/images/output_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/output_4.jpg -------------------------------------------------------------------------------- /docs/images/peakdetection_rmssd.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/peakdetection_rmssd.jpg -------------------------------------------------------------------------------- /docs/images/peakdetection_rmssd.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/peakdetection_rmssd.psd -------------------------------------------------------------------------------- /docs/images/peaknorm.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/peaknorm.jpeg -------------------------------------------------------------------------------- /docs/images/peakthresholding.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/peakthresholding.jpeg -------------------------------------------------------------------------------- /docs/images/ppg_ecg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/docs/images/ppg_ecg.jpg -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to HeartPy - Python Heart Rate Analysis Toolkit's documentation! 2 | ======================================================================== 3 | 4 | .. image:: images/output_4.jpg 5 | 6 | Welcome to the documentation of the HeartPy, Python Heart Rate Analysis Toolkit. The toolkit is designed to handle (noisy) PPG data collected with either PPG or camera sensors. 7 | 8 | * The toolkit was presented at the Humanist 2018 conference in The Hague (`see paper here `_ ). 9 | 10 | * A technical paper about the functionality `is available here `_ 11 | 12 | **Please cite one or both of these papers when using the toolkit in your research!** 13 | 14 | The documentation will help you get up to speed quickly. Follow the :ref:`quickstart` guide for a general overview of how to use the toolkit in only a few lines of code. For a more in-depth review of the module's functionality you can refer to the papers mentioned above, or the :ref:`heart rate analysis` overview. 15 | 16 | Example Notebooks are available! 17 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 | If you're looking for a few hands-on examples on how to get started with HeartPy, have a look at the links below! These notebooks show how to handle various analysis tasks with HeartPy, from smartwatch data, smart ring data, regular PPG, and regular (and very noisy) ECG. The notebooks sometimes don't render through the github engine, so either open them locally, or use an online viewer like [nbviewer](https://nbviewer.jupyter.org/). 19 | 20 | We recommend you follow the notebooks in order: 21 | - [1. Analysing a PPG signal](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/1_regular_PPG/Analysing_a_PPG_signal.ipynb), a notebook for starting out with HeartPy using built-in examples. 22 | - [2. Analysing an ECG signal](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/1_regular_ECG/Analysing_a_regular_ECG_signal.ipynb), a notebook for working with HeartPy and typical ECG data. 23 | - [3. Analysing smartwatch data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/smartwatch_data/Analysing_Smartwatch_Data.ipynb), a notebook on analysing low resolution PPG data from a smartwatch. 24 | - [4. Analysing smart ring data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/smartring_data/Analysing_Smart_Ring_Data.ipynb), a notebook on analysing smart ring PPG data. 25 | - [5. Analysing noisy ECG data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/noisy_ECG/Analysing_Noisy_ECG.ipynb), an advanced notebook on working with very noisy ECG data, using data from the MIT-BIH noise stress test dataset. 26 | 27 | 28 | Note on using it in scientific research 29 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 30 | Support is available at P.vanGent@tudelft.nl. When using the toolkit in your scientific work: please include me in the process. I can help you implement the toolkit, and the collaboration will also help improve the toolkit so that it can handle more types of data in the future. 31 | 32 | Index 33 | ====== 34 | 35 | .. toctree:: 36 | :maxdepth: 3 37 | :caption: . 38 | 39 | 40 | quickstart 41 | apiref 42 | heartrateanalysis 43 | algorithmfunctioning 44 | development 45 | 46 | 47 | -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | heartpy 2 | ======= 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | heartpy 8 | -------------------------------------------------------------------------------- /docs/quickstart.rst: -------------------------------------------------------------------------------- 1 | .. _quickstart: 2 | 3 | **************** 4 | Quickstart Guide 5 | **************** 6 | 7 | Installation 8 | ============ 9 | 10 | pip 11 | ~~~ 12 | :code:`python -m pip install heartpy` 13 | 14 | github 15 | ~~~~~~ 16 | `Download the latest release here `_ 17 | 18 | :code:`python setup.py install` 19 | 20 | 21 | Basic Example 22 | ============= 23 | Import the `HeartPy` module and load a file 24 | 25 | 26 | .. code-block:: python 27 | 28 | import heartpy as hp 29 | 30 | hrdata = hp.get_data('data.csv') 31 | 32 | 33 | This returns a :code:`numpy.ndarray`. 34 | 35 | Analysis requires the sampling rate for your data. If you know this *a priori*, supply it when calling the `process()` function, which returns a `dict{}` object containing all measures: 36 | 37 | .. code-block:: python 38 | 39 | import heartpy as hp 40 | 41 | #load example data 42 | data, _ = hp.load_exampledata(0) #this example set is sampled at 100Hz 43 | 44 | working_data, measures = hp.process(data, 100.0) 45 | 46 | 47 | **process(dataset, sample_rate, windowsize=0.75, report_time=False, 48 | calc_freq=False, freq_method='welch', interp_clipping=False, 49 | clipping_scale=False, interp_threshold=1020, hampel_correct=False, 50 | bpmmin=40, bpmmax=180, reject_segmentwise=False, 51 | high_precision=False, high_precision_fs=1000.0, 52 | measures = {}, working_data = {})** 53 | 54 | requires two arguments: 55 | 56 | * **dataset:** An 1-dimensional list, numpy array or array-like object containing the heart rate data; 57 | * **sample_rate**: The samplerate of the signal in Hz; 58 | 59 | Several optional arguments are available: 60 | 61 | * **windowsize:** _optional_ `windowsize` is the window size used for the calculation of the moving average. The windowsize is defined as `windowsize * samplerate`. Default windowsize=0.75. 62 | * **report_time:** _optional_ whether to report total processing time of process() loop. 63 | * **calc_fft:** _optional_ whether to calculate frequency domain measures. Default = false Note: can cause slowdowns in some cases. 64 | * **calc_freq:** _optional_ whether to calculate frequency domain measures. Default = false Note: can cause slowdowns in some cases. 65 | * **freq_method:** _optional_ method used to extract the frequency spectrum. Available: 'fft' (Fourier Analysis), 'periodogram', and 'welch' (Welch's method), Default = 'welch' 66 | * **interp_clipping:** if True, clipping parts of the signal are identified and the implied peak shape is interpolated. Default=False 67 | * **clipping_scale:** whether to scale the data priod to clipping detection. Can correct errors if signal amplitude has been affected after digitization (for example through filtering). Default = False 68 | * **interp_threshold**: the amplitude threshold beyond which will be checked for clipping. Recommended is to take this as the maximum value of the ADC with some margin for signal noise (default 1020, default ADC max 1024) 69 | * **hampel_correct:** whether to reduce noisy segments using large median filter. Disabled by default due to computational complexity, and generally it is not necessary. Default = false. 70 | * **bpmmin:** minimum value to see as likely for BPM when fitting peaks. Default = 40 71 | * **bpmmax:** maximum value to see as likely for BPM when fitting peaks. Default = 180 72 | * **reject_segmentwise:** whether to reject segments with more than 30% rejected beats. By default looks at segments of 10 beats at a time. Default = false. 73 | * **high_precision:** _optional_ boolean, whether to estimate peak positions by upsampling hr signal to sample rate as specified in _high_precision_fs_. Default = False 74 | * **high_precision_fs:** _optional_: the sample rate to which to upsample for ore accurate peak position estimation. Default = 1000 Hz, resulting in 1 ms peak position accuracy 75 | 76 | 77 | * **measures:** measures dict in which results are stored. Custom dictionary can be passed, otherwise one is created and returned. 78 | * **working_data:** working_data dict in which results are stored. Custom dictionary can be passed, otherwise one is created and returned. 79 | 80 | Two :code:`dict{}` objects are returned: one working data dict, and one containing all measures. Access as such: 81 | 82 | .. code-block:: python 83 | 84 | import heartpy as hp 85 | 86 | data = hp.load_exampledata(0) 87 | fs = 100.0 #example file 0 is sampled at 100.0 Hz 88 | 89 | working_data, measures = hp.process(data, fs, report_time=True) 90 | 91 | print(measures['bpm']) #returns BPM value 92 | print(measures['rmssd']) # returns RMSSD HRV measure 93 | 94 | #You can also use Pandas if you so desire 95 | import pandas as pd 96 | df = pd.read_csv("data.csv", names=['hr']) 97 | #note we need calc_freq if we want frequency-domain measures 98 | working_data, measures = hp.process(df['hr'].values, fs, calc_freq=True) 99 | print(measures['bpm']) 100 | print(measures['lf/hf']) 101 | 102 | 103 | Getting Data From Files 104 | ======================= 105 | The toolkit has functionality to open and parse delimited .csv and .txt files, as well as matlab .mat files. [Find the data here](https://github.com/paulvangentcom/heartrate_analysis_python/tree/master/heartpy/data) Opening a file is done by the :code:`get_data()` function: 106 | 107 | .. code-block:: python 108 | 109 | import heartpy as hp 110 | 111 | data = hp.get_data('data.csv') 112 | 113 | This returns a 1-dimensional :code:`numpy.ndarray` containing the heart rate data. 114 | 115 | :code:`get_data(filename, delim = ',', column_name = 'None')` requires one argument: 116 | 117 | * **filename:** absolute or relative path to a valid (delimited .csv/.txt or matlab .mat) file; 118 | 119 | Several optional arguments are available: 120 | 121 | * **delim** _optional_: when loading a delimited .csv or .txt file, this specifies the delimiter used. Default delim = ','; 122 | * **column_name** _optional_: In delimited files with header: specifying column_name will return data from that column. Not specifying column_name for delimited files will assume the file contains only numerical data, returning np.nan values where data is not numerical. For matlab files: column_name specifies the table name in the matlab file. 123 | 124 | 125 | Examples: 126 | 127 | .. code-block:: python 128 | 129 | import heartpy as hp 130 | 131 | #load data from a delimited file without header info 132 | headerless_data = hp.get_data('data.csv') 133 | 134 | #load data from column labeles 'hr' in a delimited file with header info 135 | headered_data = hp.get_data('data2.csv', column_name = 'hr') 136 | 137 | #load matlab file 138 | matlabdata = hp.get_data('data2.mat', column_name = 'hr') 139 | #note that the column_name here represents the table name in the matlab file 140 | 141 | 142 | Estimating Sample Rate 143 | ====================== 144 | The toolkit has a simple built-in sample-rate detection. It can handle ms-based timers and datetime-based timers. 145 | 146 | .. code-block:: python 147 | 148 | import heartpy as hp 149 | 150 | #if you have a ms-based timer: 151 | mstimer_data = hp.get_data('data2.csv', column_name='timer') 152 | fs = hp.get_samplerate_mstimer(mstimer_data) 153 | print(fs) 154 | 155 | #if you have a datetime-based timer: 156 | datetime_data = hp.get_data('data3.csv', column_name='datetime') 157 | fs = hp.get_samplerate_datetime(datetime_data, timeformat='%Y-%m-%d %H:%M:%S.%f') 158 | print(fs) 159 | 160 | 161 | :code:`get_samplerate_mstimer(timerdata)` requires one argument: 162 | 163 | * **timerdata:** a list, numpy array or array-like object containing ms-based timestamps (float or int). 164 | 165 | 166 | :code:`get_samplerate_datetime(datetimedata, timeformat = '%H:%M:%S.f')` requires one argument: 167 | 168 | * **datetimedata:** a list, numpy array or array-like object containing datetime-based timestamps (string); 169 | 170 | One optional argument is available: 171 | 172 | * **timeformat** _optional_: the format of the datetime-strings in your dataset. Default timeformat='%H:%M:%S.f', 24-hour based time including ms: 21:43:12.569. 173 | 174 | 175 | Plotting Results 176 | ================ 177 | A plotting function is included. It plots the original signal and overlays the detected peaks and the rejected peaks (if any were rejected). 178 | 179 | Example with the included `data.csv` example file (recorded at 100.0Hz): 180 | 181 | .. code-block:: python 182 | 183 | import heartpy as hp 184 | 185 | data = hp.get_data('data.csv') 186 | working_data, measures = hp.process(data, 100.0) 187 | hp.plotter(working_data, measures) 188 | 189 | This returns: 190 | 191 | .. image:: images/output1.jpeg 192 | 193 | :code:`plotter(working_data, measures, show = True, title = 'Heart Rate Signal Peak Detection')` has two required arguments: 194 | 195 | * **working_data** The working data :code:`dict{}` container returned by the :code:`process()` function. 196 | * **measures** The measures :code:`dict{}` container returned by the :code:`process()` function. 197 | 198 | Several optional arguments are available: 199 | 200 | * **show** _optional_: if set to True a plot is visualised, if set to False a matplotlib.pyplot object is returned. Default show = True; 201 | * **title** _optional_: Sets the title of the plot. If not specified, default title is used. 202 | 203 | **Examples:** 204 | 205 | .. code-block:: python 206 | 207 | import heartpy as hp 208 | hrdata = hp.get_data('data2.csv', column_name='hr') 209 | timerdata = hp.get_data('data2.csv', column_name='timer') 210 | 211 | working_data, measures = hp.process(hrdata, hp.get_samplerate_mstimer(timerdata)) 212 | 213 | #plot with different title 214 | hp.plotter(working_data, measures, title='Heart Beat Detection on Noisy Signal') 215 | 216 | 217 | .. image:: images/output2.jpeg 218 | 219 | Measures are only calculated for non-rejected peaks and intervals between two non-rejected peaks. Rejected detections do not influence the calculated measures. 220 | 221 | By default a plot is visualised when plotter() is called. The function returns a matplotlib.pyplot object if the argument show=False is passed: 222 | 223 | .. code-block:: python 224 | 225 | working_data, measures = hp.process(hrdata, hp.get_samplerate_mstimer(timerdata)) 226 | plot_object = hp.plotter(working_data, measures, show=False) 227 | 228 | This returns: 229 | 230 | .. code-block:: python 231 | 232 | 233 | 234 | Object can then be saved, appended to, or visualised: 235 | 236 | .. code-block:: python 237 | 238 | working_data, measures = hp.process(hrdata, hp.get_samplerate_mstimer(timerdata)) 239 | plot_object = hp.plotter(working_data, measures, show=False) 240 | 241 | plot_object.savefig('plot_1.jpg') #saves the plot as JPEG image. 242 | 243 | plot_object.show() #displays plot 244 | 245 | 246 | Plotting results of segmentwise analysis 247 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 248 | After calling `process_segmentwise()`, the returned working_data and measures contain analysis results on the segmented data. This can be visualised using the function `segment_plotter()`: 249 | 250 | :code:`segment_plotter(working_data, measures, title='Heart Rate Signal Peak Detection', path = '', start=0, end=None, step=1)`. The function has two required arguments: 251 | 252 | * **working_data** The working data :code:`dict{}` container returned by the :code:`process_segmentwise()` function. 253 | * **measures** The measures :code:`dict{}` container returned by the :code:`process_segmentwise()` function. 254 | 255 | Several optional arguments are available: 256 | 257 | * **title** _optional_: Sets the title of the plot. If not specified, default title is used. 258 | * **path** _optional_: Where to save the plots. Folder will be created if it doesn't exist. 259 | * **start** _optional_: segment index to start at, default = 0, beginning of segments. 260 | * **end** _optional_: plotting stops when this segment index is reached. Default=None, which is interpreted as meaning plot until end of segment list. 261 | * **step** _optional_: the stepsize of the plotting. Every step'th segment will be visualised. Default=1, meaning every segment. 262 | 263 | 264 | Getting heart rate over time 265 | ============================ 266 | There may be situations where you have a long heart rate signal, and want to compute how the heart rate measures change over time in the signal. HeartPy includes the `process_segmentwise` function that does just that! 267 | 268 | Usage works like this: 269 | 270 | .. code-block:: python 271 | 272 | working_data, measures = hp.process_segmentwise(data, sample_rate=100.0, segment_width = 40, segment_overlap = 0.25) 273 | 274 | 275 | What this will do is segment the data into sections of 40 seconds each. In this example each window will have an overlap with the previous window of 25%, meaning each iteration the 40 second window moves by 30 seconds. 276 | 277 | `process_segmentwist()` expects two arguments: 278 | - data: 1-d numpy array or list containing heart rate data 279 | - sample_rate: the sample rate with which the data is collected, in Hz 280 | 281 | Several optional arguments are possible: 282 | 283 | - **segment_width**: the width of the window used, in seconds. 284 | - **segment_overlap**: the fraction of overlap between adjacent windows: 0 <= segment_overlap < 1.0 285 | - **replace_outliers**: bool, whether to replace outliers in the computed measures with the median 286 | - **segment_min_size**: When segmenting, the tail end of the data if often shorter than the specified size in segment_width. The tail end is only included if it is longer than the `segment_min_size`. Default = 20. Setting this too low is not recommended as it may make peak fitting unstable, and it also doesn't make much sense from a biosignal analysis perspective to use very short data segments. 287 | - **outlier_method**: which outlier detection method to use. The interquartile-range ('iqr') or modified z-score ('z-score') methods are available as of now. Default: 'iqr' 288 | - **mode**: 'fast' or 'full'. The 'fast' method detects peaks over the entire signal, then segments and computes heart rate and heart rate variability measures. The 'full' method segments the data first, then runs the full analysis pipelin on each segment. For small numbers of segments (<10), there is not much difference and the fast method can actually be slower. The more segments there are, the larger the difference becomes. By default you should choose the 'fast' method. If there are problems with peak fitting, consider trying the 'full' method. 289 | - **kwargs*: you can pass all the arguments normally passed to the `process()` function at the end of the arguments here as well. These will be passed on and used in the analysis. Example: 290 | 291 | .. code-block:: python 292 | 293 | working_data, measures = hp.process_segmentwise(data, sample_rate=100.0, segment_width = 40, segment_overlap = 0.25, calc_freq=True, reject_segmentwise=True, report_time=True) 294 | 295 | 296 | In this example the last three arguments will be passed on the the `process()` function and used in the analysis. For a full list of arguments that `process()` supports, see the `Basic Example`_ 297 | 298 | 299 | Example Notebooks are available for further reading! 300 | ==================================================== 301 | If you're looking for a few hands-on examples on how to get started with HeartPy, have a look at the links below! These notebooks show how to handle various analysis tasks with HeartPy, from smartwatch data, smart ring data, regular PPG, and regular (and very noisy) ECG. The notebooks sometimes don't render through the github engine, so either open them locally, or use an online viewer like [nbviewer](https://nbviewer.jupyter.org/). 302 | 303 | We recommend you follow the notebooks in order: 304 | - [1. Analysing a PPG signal](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/1_regular_PPG/Analysing_a_PPG_signal.ipynb), a notebook for starting out with HeartPy using built-in examples. 305 | - [2. Analysing an ECG signal](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/2_regular_ECG/Analysing_a_regular_ECG_signal.ipynb), a notebook for working with HeartPy and typical ECG data. 306 | - [3. Analysing smartwatch data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/3_smartwatch_data/Analysing_Smartwatch_Data.ipynb), a notebook on analysing low resolution PPG data from a smartwatch. 307 | - [4. Analysing smart ring data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/4_smartring_data/Analysing_Smart_Ring_Data.ipynb), a notebook on analysing smart ring PPG data. 308 | - [5. Analysing noisy ECG data](https://github.com/paulvangentcom/heartrate_analysis_python/blob/master/examples/5_noisy_ECG/Analysing_Noisy_ECG.ipynb), an advanced notebook on working with very noisy ECG data, using data from the MIT-BIH noise stress test dataset. -------------------------------------------------------------------------------- /examples/5_noisy_ECG/118e00_ann.csv: -------------------------------------------------------------------------------- 1 | 73 2 | 270 3 | 602 4 | 863 5 | 1122 6 | 1388 7 | 1647 8 | 1903 9 | 2162 10 | 2407 11 | 2681 12 | 2945 13 | 3200 14 | 3458 15 | 3715 16 | 3974 17 | 4187 18 | 4479 19 | 4739 20 | 4997 21 | 5254 22 | 5534 23 | 5779 24 | 6032 25 | 6283 26 | 6472 27 | 6756 28 | 7010 29 | 7269 30 | 7526 31 | 7772 32 | 8023 33 | 8274 34 | 8526 35 | 8774 36 | 9025 37 | 9273 38 | 9520 39 | 9767 40 | 10019 41 | 10267 42 | 10514 43 | 10763 44 | 10944 45 | 11222 46 | 11480 47 | 11729 48 | 11977 49 | 12225 50 | 12477 51 | 12723 52 | 12969 53 | 13221 54 | 13445 55 | 13719 56 | 13973 57 | 14237 58 | 14496 59 | 14756 60 | 15018 61 | 15285 62 | 15547 63 | 15813 64 | 16077 65 | 16344 66 | 16610 67 | 16880 68 | 17150 69 | 17422 70 | 17691 71 | 17961 72 | 18230 73 | 18502 74 | 18772 75 | 19046 76 | 19324 77 | 19600 78 | 19791 79 | 20095 80 | 20380 81 | 20575 82 | 20891 83 | 21165 84 | 21442 85 | 21715 86 | 21995 87 | 22273 88 | 22553 89 | 22835 90 | 23115 91 | 23401 92 | 23685 93 | 23965 94 | 24247 95 | 24421 96 | 24808 97 | 25089 98 | 25375 99 | 25652 100 | 25925 101 | 26210 102 | 26491 103 | 26770 104 | 27049 105 | 27335 106 | 27611 107 | 27889 108 | 28170 109 | 28444 110 | 28723 111 | 28996 112 | 29275 113 | 29552 114 | 29830 115 | 30104 116 | 30383 117 | 30660 118 | 30934 119 | 31217 120 | 31489 121 | 31755 122 | 32023 123 | 32298 124 | 32567 125 | 32832 126 | 33104 127 | 33374 128 | 33643 129 | 33917 130 | 34197 131 | 34468 132 | 34744 133 | 35023 134 | 35302 135 | 35586 136 | 35862 137 | 36142 138 | 36422 139 | 36701 140 | 36981 141 | 37180 142 | 37486 143 | 37763 144 | 38041 145 | 38318 146 | 38593 147 | 38860 148 | 39137 149 | 39410 150 | 39682 151 | 39961 152 | 40237 153 | 40507 154 | 40782 155 | 41065 156 | 41331 157 | 41609 158 | 41883 159 | 42157 160 | 42426 161 | 42698 162 | 42973 163 | -------------------------------------------------------------------------------- /examples/5_noisy_ECG/118e06_ann.csv: -------------------------------------------------------------------------------- 1 | 73 2 | 270 3 | 602 4 | 863 5 | 1122 6 | 1388 7 | 1647 8 | 1903 9 | 2162 10 | 2407 11 | 2681 12 | 2945 13 | 3200 14 | 3458 15 | 3715 16 | 3974 17 | 4187 18 | 4479 19 | 4739 20 | 4997 21 | 5254 22 | 5534 23 | 5779 24 | 6032 25 | 6283 26 | 6472 27 | 6756 28 | 7010 29 | 7269 30 | 7526 31 | 7772 32 | 8023 33 | 8274 34 | 8526 35 | 8774 36 | 9025 37 | 9273 38 | 9520 39 | 9767 40 | 10019 41 | 10267 42 | 10514 43 | 10763 44 | 10944 45 | 11222 46 | 11480 47 | 11729 48 | 11977 49 | 12225 50 | 12477 51 | 12723 52 | 12969 53 | 13221 54 | 13445 55 | 13719 56 | 13973 57 | 14237 58 | 14496 59 | 14756 60 | 15018 61 | 15285 62 | 15547 63 | 15813 64 | 16077 65 | 16344 66 | 16610 67 | 16880 68 | 17150 69 | 17422 70 | 17691 71 | 17961 72 | 18230 73 | 18502 74 | 18772 75 | 19046 76 | 19324 77 | 19600 78 | 19791 79 | 20095 80 | 20380 81 | 20575 82 | 20891 83 | 21165 84 | 21442 85 | 21715 86 | 21995 87 | 22273 88 | 22553 89 | 22835 90 | 23115 91 | 23401 92 | 23685 93 | 23965 94 | 24247 95 | 24421 96 | 24808 97 | 25089 98 | 25375 99 | 25652 100 | 25925 101 | 26210 102 | 26491 103 | 26770 104 | 27049 105 | 27335 106 | 27611 107 | 27889 108 | 28170 109 | 28444 110 | 28723 111 | 28996 112 | 29275 113 | 29552 114 | 29830 115 | 30104 116 | 30383 117 | 30660 118 | 30934 119 | 31217 120 | 31489 121 | 31755 122 | 32023 123 | 32298 124 | 32567 125 | 32832 126 | 33104 127 | 33374 128 | 33643 129 | 33917 130 | 34197 131 | 34468 132 | 34744 133 | 35023 134 | 35302 135 | 35586 136 | 35862 137 | 36142 138 | 36422 139 | 36701 140 | 36981 141 | 37180 142 | 37486 143 | 37763 144 | 38041 145 | 38318 146 | 38593 147 | 38860 148 | 39137 149 | 39410 150 | 39682 151 | 39961 152 | 40237 153 | 40507 154 | 40782 155 | 41065 156 | 41331 157 | 41609 158 | 41883 159 | 42157 160 | 42426 161 | 42698 162 | 42973 163 | -------------------------------------------------------------------------------- /examples/5_noisy_ECG/118e12_ann.csv: -------------------------------------------------------------------------------- 1 | 73 2 | 270 3 | 602 4 | 863 5 | 1122 6 | 1388 7 | 1647 8 | 1903 9 | 2162 10 | 2407 11 | 2681 12 | 2945 13 | 3200 14 | 3458 15 | 3715 16 | 3974 17 | 4187 18 | 4479 19 | 4739 20 | 4997 21 | 5254 22 | 5534 23 | 5779 24 | 6032 25 | 6283 26 | 6472 27 | 6756 28 | 7010 29 | 7269 30 | 7526 31 | 7772 32 | 8023 33 | 8274 34 | 8526 35 | 8774 36 | 9025 37 | 9273 38 | 9520 39 | 9767 40 | 10019 41 | 10267 42 | 10514 43 | 10763 44 | 10944 45 | 11222 46 | 11480 47 | 11729 48 | 11977 49 | 12225 50 | 12477 51 | 12723 52 | 12969 53 | 13221 54 | 13445 55 | 13719 56 | 13973 57 | 14237 58 | 14496 59 | 14756 60 | 15018 61 | 15285 62 | 15547 63 | 15813 64 | 16077 65 | 16344 66 | 16610 67 | 16880 68 | 17150 69 | 17422 70 | 17691 71 | 17961 72 | 18230 73 | 18502 74 | 18772 75 | 19046 76 | 19324 77 | 19600 78 | 19791 79 | 20095 80 | 20380 81 | 20575 82 | 20891 83 | 21165 84 | 21442 85 | 21715 86 | 21995 87 | 22273 88 | 22553 89 | 22835 90 | 23115 91 | 23401 92 | 23685 93 | 23965 94 | 24247 95 | 24421 96 | 24808 97 | 25089 98 | 25375 99 | 25652 100 | 25925 101 | 26210 102 | 26491 103 | 26770 104 | 27049 105 | 27335 106 | 27611 107 | 27889 108 | 28170 109 | 28444 110 | 28723 111 | 28996 112 | 29275 113 | 29552 114 | 29830 115 | 30104 116 | 30383 117 | 30660 118 | 30934 119 | 31217 120 | 31489 121 | 31755 122 | 32023 123 | 32298 124 | 32567 125 | 32832 126 | 33104 127 | 33374 128 | 33643 129 | 33917 130 | 34197 131 | 34468 132 | 34744 133 | 35023 134 | 35302 135 | 35586 136 | 35862 137 | 36142 138 | 36422 139 | 36701 140 | 36981 141 | 37180 142 | 37486 143 | 37763 144 | 38041 145 | 38318 146 | 38593 147 | 38860 148 | 39137 149 | 39410 150 | 39682 151 | 39961 152 | 40237 153 | 40507 154 | 40782 155 | 41065 156 | 41331 157 | 41609 158 | 41883 159 | 42157 160 | 42426 161 | 42698 162 | 42973 163 | -------------------------------------------------------------------------------- /examples/5_noisy_ECG/118e24_ann.csv: -------------------------------------------------------------------------------- 1 | 229 2 | 483 3 | 740 4 | 989 5 | 1237 6 | 1485 7 | 1674 8 | 1949 9 | 2202 10 | 2450 11 | 2698 12 | 2946 13 | 3198 14 | 3454 15 | 3707 16 | 3965 17 | 4224 18 | 4478 19 | 4743 20 | 5003 21 | 5261 22 | 5518 23 | 5783 24 | 5956 25 | 6214 26 | 6529 27 | 6805 28 | 7068 29 | 7334 30 | 7601 31 | 7870 32 | 8135 33 | 8401 34 | 8665 35 | 8923 36 | 9185 37 | 9445 38 | 9708 39 | 9965 40 | 10222 41 | 10474 42 | 10740 43 | 10997 44 | 11260 45 | 11511 46 | 11776 47 | 12035 48 | 12294 49 | 12560 50 | 12824 51 | 13079 52 | 13330 53 | 13595 54 | 13848 55 | 14106 56 | 14357 57 | 14616 58 | 14864 59 | 15124 60 | 15381 61 | 15644 62 | 15895 63 | 16155 64 | 16413 65 | 16662 66 | 16941 67 | 17201 68 | 17457 69 | 17717 70 | 17888 71 | 18197 72 | 18503 73 | 18769 74 | 19030 75 | 19287 76 | 19555 77 | 19782 78 | 20073 79 | 20270 80 | 20602 81 | 20863 82 | 21122 83 | 21388 84 | 21647 85 | 21903 86 | 22162 87 | 22407 88 | 22681 89 | 22945 90 | 23200 91 | 23458 92 | 23715 93 | 23974 94 | 24187 95 | 24479 96 | 24739 97 | 24997 98 | 25254 99 | 25534 100 | 25779 101 | 26032 102 | 26283 103 | 26472 104 | 26756 105 | 27010 106 | 27269 107 | 27526 108 | 27772 109 | 28023 110 | 28274 111 | 28526 112 | 28774 113 | 29025 114 | 29273 115 | 29520 116 | 29767 117 | 30019 118 | 30267 119 | 30514 120 | 30763 121 | 30944 122 | 31222 123 | 31480 124 | 31729 125 | 31977 126 | 32225 127 | 32477 128 | 32723 129 | 32969 130 | 33221 131 | 33445 132 | 33719 133 | 33973 134 | 34237 135 | 34496 136 | 34756 137 | 35018 138 | 35285 139 | 35547 140 | 35813 141 | 36077 142 | 36344 143 | 36610 144 | 36880 145 | 37150 146 | 37422 147 | 37691 148 | 37961 149 | 38230 150 | 38502 151 | 38772 152 | 39046 153 | 39324 154 | 39600 155 | 39791 156 | 40095 157 | 40380 158 | 40575 159 | 40891 160 | 41165 161 | 41442 162 | 41715 163 | 41995 164 | 42273 165 | 42553 166 | 42835 167 | 43115 168 | -------------------------------------------------------------------------------- /heartpy/__init__.py: -------------------------------------------------------------------------------- 1 | from .heartpy import * 2 | 3 | 4 | __name__ = "HeartPy" 5 | __author__ = "Paul van Gent" 6 | __version__ = "Version 1.2.6" 7 | __license__ = "GNU General Public License V3.0" 8 | 9 | 10 | #module level docstring 11 | __doc__ = ''' 12 | Noise-resistant heart rate analysis module for Python 13 | ===================================================== 14 | 15 | HeartPy started out as a Python script to detect beats in 16 | PPG heart rate data streams. It has since grown into a module capable 17 | of pre-processing and analysing PPG and ECG data. 18 | 19 | 20 | References: 21 | ----------- 22 | van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2018). 23 | Heart Rate Analysis for Human Factors: Development and Validation of an 24 | Open Source Toolkit for Noisy Naturalistic Heart Rate Data. 25 | In Proceedings of the 6th HUMANIST Conference (pp. 173 to 178) 26 | 27 | van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). 28 | Analysing Noisy Driver Physiology Real-Time Using Off-the-Shelf Sensors: 29 | Heart Rate Analysis Software from the Taking the Fast Lane Project. 30 | Journal of Open Research Software, 7(1), 32. 31 | DOI: http://doi.org/10.5334/jors.241 32 | 33 | 34 | Documentation 35 | ------------- 36 | https://python-heart-rate-analysis-toolkit.readthedocs.io/en/latest/ 37 | ''' -------------------------------------------------------------------------------- /heartpy/config.py: -------------------------------------------------------------------------------- 1 | ''' 2 | config file for heartpy 3 | ''' 4 | 5 | 6 | #based on https://venngage.com/blog/color-blind-friendly-palette/#4 7 | 8 | __all__ = ['get_colorpalette_poincare'] 9 | 10 | def init(): # pragma: no cover 11 | global colorblind 12 | colorblind = False 13 | global colorblind_type 14 | colorblind_type = 'deuteranopia' 15 | global color_style 16 | color_style = 'default' 17 | 18 | 19 | def get_colorpalette_poincare(): 20 | '''returns color palettes for poincare plotting 21 | 22 | Function that returns color palettes for poincare plotting. 23 | Takes arguments from config settings globals. 24 | 25 | Parameters 26 | ---------- 27 | None 28 | 29 | Returns 30 | ------- 31 | color_palette : list 32 | list conntaining color palette for poincare plot, in order 33 | of scatterplot, SD1 line, SD2 line. 34 | 35 | Examples 36 | -------- 37 | >>> import heartpy as hp 38 | >>> hp.config.colorblind = False 39 | >>> palette = hp.config.get_colorpalette_poincare() 40 | >>> palette 41 | ['gray', 'blue', 'red'] 42 | 43 | >>> hp.config.colorblind = True 44 | >>> hp.config.colorblind_type = 'protanopia' 45 | >>> palette = hp.config.get_colorpalette_poincare() 46 | >>> palette 47 | ['#4C4C9B', '#EBAFBE', '#DCDCC7'] 48 | ''' 49 | 50 | #poincare color palettes 51 | #palette color order: ['scatter', 'SD1', 'SD2'] 52 | #identity line is always gray, ellipse always black 53 | poincare = {'regular': {'default': ['gray', 'blue', 'red'], 54 | 'retro': ['#63ACBE', '#601A4A', '#EE442F'], 55 | 'elegant': ['#ABC3C9', '#E0DCD3', '#CCBE9F'], 56 | 'corporate': ['#44749D', '#BDB8AD', '#EBE7E0'], 57 | 'zesty': ['#85C0F9', '#F5793A', '#A95AA1'] 58 | }, 59 | 'deuteranopia': {'default': ['#43439C', '#C7C78E', '#787811'], 60 | 'retro': ['#9E9CC2', '#383745', '#A17724'], 61 | 'elegant': ['#CAB8CB', '#F4D4D4', '#DCB69F'], 62 | 'corporate': ['#636D97', '#BDB6AB', '#EDE6DE'], 63 | 'zesty': ['#C59434', '#092C48', '#6F7498'] 64 | }, 65 | 'protanopia': {'default': ['#4C4C9B', '#EBAFBE', '#DCDCC7'], 66 | 'retro': ['#9C9EB5', '#2A385B', '#8B7F47'], 67 | 'elegant': ['#BEBCC5', '#E2DAD1', '#C9BD9E'], 68 | 'corporate': ['#636D97', '#BDB6AB', '#EDE6DE'], 69 | 'zesty': ['#AE9C45', '#052955', '#6073B1'] 70 | }, 71 | 'tritanopia': {'default': ['#959595', '#46DBFF', '#DE2253'], 72 | 'retro': ['#6AAECF', '#9E3C50', '#DE2253'], 73 | 'elegant': ['#E1BE91', '#CD913C', '#78500F'], 74 | 'corporate': ['#256077', '#F8EAEC', '#E3FAFF'], 75 | 'zesty': ['#9E3C50', '#CD913C', '#46DBFF'] 76 | } 77 | } 78 | 79 | if colorblind: 80 | return poincare[colorblind_type.lower()][color_style.lower()] 81 | else: 82 | return poincare['regular'][color_style.lower()] 83 | 84 | 85 | def get_colorpalette_plotter(): 86 | '''returns color palettes for regular plotting 87 | 88 | Function that returns color palettes for regular plotting coloring. 89 | Takes arguments from config settings globals. 90 | 91 | Parameters 92 | ---------- 93 | None 94 | 95 | Returns 96 | ------- 97 | color_palette : list 98 | list conntaining color palette for plotter function, in order 99 | of line color, accepted peaks color, rejected peaks color. 100 | 101 | Examples 102 | -------- 103 | >>> import heartpy as hp 104 | >>> hp.config.colorblind = False 105 | >>> palette = hp.config.get_colorpalette_plotter() 106 | >>> palette 107 | ['#7F7FFF', 'green', 'red'] 108 | 109 | >>> hp.config.colorblind = True 110 | >>> hp.config.colorblind_type = 'protanopia' 111 | >>> palette = hp.config.get_colorpalette_plotter() 112 | >>> palette 113 | ['#4C4C9B', '#EBAFBE', '#DCDCC7'] 114 | ''' 115 | 116 | #plotter color palettes 117 | #color order: ['line color', 'accepted peaks color', 'rejected peaks color'] 118 | plotter_colors = {'regular': {'default': ['#7F7FFF', 'green', 'red'], 119 | 'retro': ['#601A4A', '#63ACBE', '#EE442F'], 120 | 'elegant': ['#382119', '#70B8CA', '#CCBE9F'], 121 | 'corporate': ['#93A7BA', '#44749D', '#CAAB68'], 122 | 'zesty': ['#A95AA1', '#0F2080', '#F5793A'] 123 | }, 124 | 'deuteranopia': {'default': ['#43439C', '#C7C78E', '#787811'], 125 | 'retro': ['#383745', '#9E9CC2', '#A17724'], 126 | 'elegant': ['#342A1F', '#CAB8CB', '#DCB69F'], 127 | 'corporate': ['#5D6E9E', '#CDB1AD', '#DECBE3'], 128 | 'zesty': ['#C59434', '#092C48', '#6F7498'] 129 | }, 130 | 'protanopia': {'default': ['#4C4C9B', '#EBAFBE', '#DCDCC7'], 131 | 'retro': ['#9C9EB5', '#2A385B', '#8B7F47'], 132 | 'elegant': ['#2E2B21', '#C9BD9E', '#BEBCC5'], 133 | 'corporate': ['#636D97', '#BDB6AB', '#D1D0DE'], 134 | 'zesty': ['#AE9C45', '#052955', '#6073B1'] 135 | }, 136 | 'tritanopia': {'default': ['#959595', '#46DBFF', '#DE2253'], 137 | 'retro': ['#6AAECF', '#9E3C50', '#DE2253'], 138 | 'elegant': ['#E1BE91', '#78500F', '#CD913C'], 139 | 'corporate': ['#256077', '#9AEBFD', '#F59AA7'], 140 | 'zesty': ['#CD913C', '#46DBFF', '#9E3C50'] 141 | } 142 | } 143 | 144 | if colorblind: 145 | return plotter_colors[colorblind_type.lower()][color_style.lower()] 146 | else: 147 | return plotter_colors['regular'][color_style.lower()] -------------------------------------------------------------------------------- /heartpy/data/data.csv: -------------------------------------------------------------------------------- 1 | 530 2 | 518 3 | 506 4 | 494 5 | 483 6 | 472 7 | 462 8 | 454 9 | 446 10 | 440 11 | 434 12 | 430 13 | 428 14 | 430 15 | 430 16 | 432 17 | 436 18 | 439 19 | 444 20 | 450 21 | 454 22 | 459 23 | 465 24 | 470 25 | 475 26 | 481 27 | 487 28 | 490 29 | 494 30 | 497 31 | 500 32 | 502 33 | 504 34 | 505 35 | 507 36 | 506 37 | 504 38 | 504 39 | 503 40 | 502 41 | 500 42 | 499 43 | 498 44 | 496 45 | 495 46 | 493 47 | 491 48 | 488 49 | 488 50 | 487 51 | 490 52 | 491 53 | 500 54 | 515 55 | 538 56 | 567 57 | 603 58 | 640 59 | 677 60 | 714 61 | 745 62 | 772 63 | 788 64 | 795 65 | 795 66 | 783 67 | 765 68 | 740 69 | 712 70 | 677 71 | 641 72 | 605 73 | 568 74 | 535 75 | 503 76 | 474 77 | 450 78 | 430 79 | 414 80 | 401 81 | 393 82 | 388 83 | 385 84 | 385 85 | 388 86 | 393 87 | 401 88 | 412 89 | 426 90 | 440 91 | 455 92 | 473 93 | 491 94 | 509 95 | 523 96 | 537 97 | 549 98 | 558 99 | 564 100 | 565 101 | 565 102 | 561 103 | 556 104 | 548 105 | 540 106 | 531 107 | 519 108 | 508 109 | 497 110 | 486 111 | 475 112 | 465 113 | 457 114 | 450 115 | 444 116 | 438 117 | 435 118 | 432 119 | 431 120 | 431 121 | 432 122 | 434 123 | 438 124 | 441 125 | 445 126 | 450 127 | 454 128 | 460 129 | 466 130 | 471 131 | 477 132 | 483 133 | 487 134 | 491 135 | 493 136 | 496 137 | 500 138 | 501 139 | 502 140 | 504 141 | 505 142 | 504 143 | 503 144 | 502 145 | 501 146 | 499 147 | 498 148 | 496 149 | 495 150 | 492 151 | 492 152 | 490 153 | 490 154 | 490 155 | 497 156 | 510 157 | 530 158 | 559 159 | 593 160 | 630 161 | 667 162 | 703 163 | 735 164 | 759 165 | 774 166 | 782 167 | 781 168 | 771 169 | 753 170 | 728 171 | 700 172 | 668 173 | 632 174 | 597 175 | 562 176 | 528 177 | 497 178 | 470 179 | 446 180 | 426 181 | 411 182 | 399 183 | 389 184 | 385 185 | 382 186 | 383 187 | 386 188 | 391 189 | 398 190 | 407 191 | 420 192 | 433 193 | 450 194 | 469 195 | 487 196 | 504 197 | 521 198 | 537 199 | 550 200 | 560 201 | 569 202 | 573 203 | 571 204 | 569 205 | 563 206 | 556 207 | 544 208 | 533 209 | 521 210 | 509 211 | 496 212 | 485 213 | 475 214 | 466 215 | 456 216 | 449 217 | 442 218 | 436 219 | 433 220 | 430 221 | 428 222 | 429 223 | 428 224 | 431 225 | 436 226 | 441 227 | 447 228 | 452 229 | 457 230 | 462 231 | 466 232 | 471 233 | 478 234 | 483 235 | 487 236 | 491 237 | 494 238 | 497 239 | 499 240 | 501 241 | 503 242 | 504 243 | 505 244 | 504 245 | 504 246 | 503 247 | 501 248 | 500 249 | 498 250 | 497 251 | 496 252 | 498 253 | 502 254 | 512 255 | 530 256 | 554 257 | 585 258 | 618 259 | 651 260 | 684 261 | 715 262 | 739 263 | 756 264 | 767 265 | 768 266 | 762 267 | 749 268 | 728 269 | 704 270 | 673 271 | 641 272 | 608 273 | 574 274 | 540 275 | 509 276 | 480 277 | 455 278 | 433 279 | 416 280 | 403 281 | 391 282 | 385 283 | 379 284 | 378 285 | 378 286 | 382 287 | 390 288 | 399 289 | 413 290 | 428 291 | 446 292 | 464 293 | 482 294 | 501 295 | 521 296 | 537 297 | 550 298 | 562 299 | 569 300 | 574 301 | 574 302 | 572 303 | 567 304 | 559 305 | 548 306 | 537 307 | 523 308 | 510 309 | 496 310 | 485 311 | 474 312 | 463 313 | 452 314 | 445 315 | 438 316 | 433 317 | 431 318 | 428 319 | 429 320 | 428 321 | 430 322 | 433 323 | 436 324 | 440 325 | 445 326 | 450 327 | 455 328 | 462 329 | 468 330 | 474 331 | 481 332 | 486 333 | 490 334 | 496 335 | 500 336 | 504 337 | 506 338 | 508 339 | 508 340 | 508 341 | 508 342 | 509 343 | 507 344 | 505 345 | 503 346 | 499 347 | 498 348 | 497 349 | 501 350 | 508 351 | 520 352 | 542 353 | 570 354 | 605 355 | 642 356 | 679 357 | 714 358 | 745 359 | 768 360 | 785 361 | 794 362 | 793 363 | 783 364 | 766 365 | 744 366 | 715 367 | 683 368 | 647 369 | 613 370 | 578 371 | 544 372 | 513 373 | 485 374 | 460 375 | 441 376 | 425 377 | 413 378 | 403 379 | 399 380 | 396 381 | 395 382 | 397 383 | 402 384 | 409 385 | 419 386 | 428 387 | 441 388 | 455 389 | 470 390 | 484 391 | 500 392 | 513 393 | 527 394 | 536 395 | 544 396 | 549 397 | 550 398 | 551 399 | 548 400 | 543 401 | 537 402 | 529 403 | 521 404 | 511 405 | 501 406 | 489 407 | 481 408 | 471 409 | 463 410 | 455 411 | 448 412 | 444 413 | 439 414 | 437 415 | 435 416 | 432 417 | 433 418 | 433 419 | 436 420 | 439 421 | 443 422 | 448 423 | 452 424 | 457 425 | 462 426 | 467 427 | 473 428 | 477 429 | 482 430 | 487 431 | 491 432 | 494 433 | 497 434 | 498 435 | 499 436 | 500 437 | 500 438 | 499 439 | 500 440 | 498 441 | 498 442 | 497 443 | 495 444 | 494 445 | 492 446 | 491 447 | 490 448 | 489 449 | 492 450 | 500 451 | 515 452 | 538 453 | 569 454 | 604 455 | 643 456 | 681 457 | 716 458 | 747 459 | 770 460 | 785 461 | 790 462 | 788 463 | 777 464 | 758 465 | 734 466 | 704 467 | 671 468 | 636 469 | 601 470 | 567 471 | 533 472 | 503 473 | 476 474 | 453 475 | 435 476 | 419 477 | 409 478 | 401 479 | 397 480 | 395 481 | 395 482 | 397 483 | 402 484 | 409 485 | 418 486 | 429 487 | 443 488 | 458 489 | 473 490 | 489 491 | 504 492 | 519 493 | 532 494 | 543 495 | 549 496 | 554 497 | 556 498 | 553 499 | 550 500 | 544 501 | 537 502 | 527 503 | 517 504 | 506 505 | 496 506 | 485 507 | 475 508 | 467 509 | 458 510 | 451 511 | 445 512 | 440 513 | 437 514 | 436 515 | 435 516 | 436 517 | 437 518 | 438 519 | 441 520 | 444 521 | 447 522 | 452 523 | 457 524 | 462 525 | 467 526 | 471 527 | 477 528 | 482 529 | 485 530 | 489 531 | 493 532 | 497 533 | 500 534 | 501 535 | 502 536 | 503 537 | 502 538 | 502 539 | 501 540 | 500 541 | 499 542 | 498 543 | 497 544 | 496 545 | 494 546 | 493 547 | 491 548 | 488 549 | 487 550 | 484 551 | 483 552 | 482 553 | 481 554 | 486 555 | 493 556 | 509 557 | 532 558 | 565 559 | 602 560 | 641 561 | 682 562 | 720 563 | 753 564 | 777 565 | 794 566 | 802 567 | 800 568 | 789 569 | 770 570 | 745 571 | 714 572 | 680 573 | 643 574 | 607 575 | 572 576 | 537 577 | 505 578 | 477 579 | 454 580 | 433 581 | 417 582 | 407 583 | 399 584 | 393 585 | 392 586 | 391 587 | 394 588 | 398 589 | 405 590 | 416 591 | 428 592 | 442 593 | 458 594 | 475 595 | 492 596 | 507 597 | 521 598 | 536 599 | 545 600 | 552 601 | 557 602 | 561 603 | 560 604 | 556 605 | 549 606 | 541 607 | 532 608 | 520 609 | 510 610 | 498 611 | 487 612 | 477 613 | 467 614 | 458 615 | 450 616 | 443 617 | 438 618 | 435 619 | 433 620 | 432 621 | 431 622 | 432 623 | 433 624 | 437 625 | 441 626 | 446 627 | 450 628 | 455 629 | 462 630 | 467 631 | 472 632 | 479 633 | 483 634 | 488 635 | 492 636 | 496 637 | 498 638 | 499 639 | 500 640 | 500 641 | 500 642 | 500 643 | 498 644 | 498 645 | 497 646 | 496 647 | 495 648 | 493 649 | 494 650 | 493 651 | 491 652 | 492 653 | 491 654 | 490 655 | 490 656 | 488 657 | 486 658 | 482 659 | 480 660 | 479 661 | 477 662 | 479 663 | 482 664 | 491 665 | 506 666 | 532 667 | 565 668 | 605 669 | 645 670 | 687 671 | 726 672 | 759 673 | 786 674 | 803 675 | 810 676 | 809 677 | 797 678 | 777 679 | 751 680 | 720 681 | 682 682 | 645 683 | 605 684 | 567 685 | 531 686 | 498 687 | 471 688 | 446 689 | 426 690 | 411 691 | 400 692 | 390 693 | 386 694 | 383 695 | 385 696 | 387 697 | 393 698 | 401 699 | 413 700 | 426 701 | 440 702 | 457 703 | 475 704 | 491 705 | 509 706 | 523 707 | 539 708 | 550 709 | 557 710 | 562 711 | 563 712 | 560 713 | 556 714 | 550 715 | 542 716 | 531 717 | 519 718 | 507 719 | 496 720 | 484 721 | 473 722 | 464 723 | 456 724 | 448 725 | 441 726 | 436 727 | 433 728 | 430 729 | 429 730 | 429 731 | 429 732 | 431 733 | 434 734 | 439 735 | 444 736 | 449 737 | 455 738 | 461 739 | 467 740 | 473 741 | 479 742 | 486 743 | 490 744 | 492 745 | 496 746 | 498 747 | 499 748 | 501 749 | 502 750 | 502 751 | 501 752 | 501 753 | 500 754 | 499 755 | 497 756 | 495 757 | 494 758 | 491 759 | 491 760 | 489 761 | 492 762 | 498 763 | 509 764 | 529 765 | 555 766 | 589 767 | 627 768 | 664 769 | 701 770 | 733 771 | 759 772 | 778 773 | 787 774 | 788 775 | 781 776 | 764 777 | 743 778 | 713 779 | 679 780 | 644 781 | 607 782 | 570 783 | 535 784 | 502 785 | 473 786 | 446 787 | 425 788 | 407 789 | 394 790 | 384 791 | 377 792 | 374 793 | 373 794 | 376 795 | 381 796 | 387 797 | 397 798 | 410 799 | 426 800 | 444 801 | 461 802 | 480 803 | 499 804 | 517 805 | 532 806 | 544 807 | 555 808 | 562 809 | 565 810 | 566 811 | 565 812 | 562 813 | 554 814 | 544 815 | 533 816 | 521 817 | 507 818 | 493 819 | 482 820 | 470 821 | 459 822 | 449 823 | 443 824 | 435 825 | 429 826 | 424 827 | 423 828 | 423 829 | 421 830 | 425 831 | 427 832 | 431 833 | 435 834 | 441 835 | 447 836 | 452 837 | 460 838 | 467 839 | 472 840 | 479 841 | 484 842 | 487 843 | 491 844 | 495 845 | 498 846 | 500 847 | 503 848 | 503 849 | 505 850 | 506 851 | 507 852 | 509 853 | 515 854 | 526 855 | 543 856 | 568 857 | 597 858 | 631 859 | 665 860 | 698 861 | 727 862 | 750 863 | 767 864 | 773 865 | 773 866 | 764 867 | 747 868 | 724 869 | 695 870 | 662 871 | 627 872 | 591 873 | 556 874 | 523 875 | 491 876 | 463 877 | 439 878 | 418 879 | 401 880 | 389 881 | 379 882 | 372 883 | 366 884 | 366 885 | 370 886 | 375 887 | 384 888 | 396 889 | 411 890 | 429 891 | 449 892 | 469 893 | 492 894 | 513 895 | 533 896 | 550 897 | 564 898 | 574 899 | 581 900 | 584 901 | 583 902 | 579 903 | 572 904 | 561 905 | 548 906 | 535 907 | 519 908 | 505 909 | 491 910 | 476 911 | 464 912 | 454 913 | 446 914 | 436 915 | 430 916 | 426 917 | 421 918 | 419 919 | 420 920 | 421 921 | 424 922 | 427 923 | 432 924 | 440 925 | 445 926 | 451 927 | 458 928 | 465 929 | 471 930 | 477 931 | 483 932 | 488 933 | 492 934 | 496 935 | 499 936 | 500 937 | 502 938 | 503 939 | 506 940 | 507 941 | 508 942 | 516 943 | 529 944 | 551 945 | 578 946 | 612 947 | 650 948 | 688 949 | 725 950 | 760 951 | 789 952 | 809 953 | 819 954 | 822 955 | 815 956 | 799 957 | 776 958 | 748 959 | 714 960 | 677 961 | 637 962 | 599 963 | 562 964 | 524 965 | 493 966 | 464 967 | 442 968 | 422 969 | 408 970 | 399 971 | 391 972 | 387 973 | 385 974 | 386 975 | 390 976 | 396 977 | 404 978 | 415 979 | 430 980 | 444 981 | 460 982 | 478 983 | 495 984 | 510 985 | 524 986 | 537 987 | 547 988 | 552 989 | 558 990 | 559 991 | 557 992 | 553 993 | 547 994 | 539 995 | 530 996 | 518 997 | 505 998 | 495 999 | 483 1000 | 472 1001 | 462 1002 | 453 1003 | 444 1004 | 437 1005 | 431 1006 | 428 1007 | 424 1008 | 424 1009 | 425 1010 | 426 1011 | 429 1012 | 431 1013 | 436 1014 | 440 1015 | 444 1016 | 450 1017 | 457 1018 | 464 1019 | 471 1020 | 476 1021 | 484 1022 | 487 1023 | 491 1024 | 494 1025 | 497 1026 | 499 1027 | 500 1028 | 500 1029 | 500 1030 | 498 1031 | 497 1032 | 496 1033 | 496 1034 | 493 1035 | 494 1036 | 495 1037 | 499 1038 | 511 1039 | 532 1040 | 560 1041 | 597 1042 | 637 1043 | 679 1044 | 719 1045 | 756 1046 | 788 1047 | 811 1048 | 827 1049 | 831 1050 | 826 1051 | 810 1052 | 789 1053 | 759 1054 | 725 1055 | 687 1056 | 648 1057 | 607 1058 | 570 1059 | 535 1060 | 503 1061 | 476 1062 | 452 1063 | 434 1064 | 419 1065 | 410 1066 | 401 1067 | 397 1068 | 394 1069 | 393 1070 | 395 1071 | 400 1072 | 407 1073 | 416 1074 | 429 1075 | 442 1076 | 458 1077 | 475 1078 | 491 1079 | 507 1080 | 522 1081 | 533 1082 | 543 1083 | 549 1084 | 551 1085 | 550 1086 | 547 1087 | 542 1088 | 534 1089 | 523 1090 | 514 1091 | 502 1092 | 491 1093 | 481 1094 | 470 1095 | 461 1096 | 453 1097 | 445 1098 | 441 1099 | 437 1100 | 433 1101 | 430 1102 | 429 1103 | 429 1104 | 428 1105 | 428 1106 | 430 1107 | 432 1108 | 436 1109 | 440 1110 | 445 1111 | 450 1112 | 456 1113 | 462 1114 | 469 1115 | 473 1116 | 477 1117 | 481 1118 | 487 1119 | 489 1120 | 492 1121 | 494 1122 | 496 1123 | 496 1124 | 496 1125 | 496 1126 | 497 1127 | 494 1128 | 493 1129 | 492 1130 | 490 1131 | 488 1132 | 488 1133 | 486 1134 | 484 1135 | 483 1136 | 480 1137 | 478 1138 | 477 1139 | 476 1140 | 474 1141 | 473 1142 | 472 1143 | 471 1144 | 474 1145 | 479 1146 | 489 1147 | 507 1148 | 535 1149 | 570 1150 | 613 1151 | 656 1152 | 702 1153 | 745 1154 | 783 1155 | 813 1156 | 833 1157 | 843 1158 | 843 1159 | 831 1160 | 812 1161 | 784 1162 | 750 1163 | 711 1164 | 671 1165 | 629 1166 | 586 1167 | 549 1168 | 513 1169 | 482 1170 | 455 1171 | 433 1172 | 417 1173 | 404 1174 | 395 1175 | 389 1176 | 385 1177 | 385 1178 | 388 1179 | 394 1180 | 400 1181 | 412 1182 | 426 1183 | 441 1184 | 459 1185 | 476 1186 | 492 1187 | 510 1188 | 526 1189 | 538 1190 | 548 1191 | 556 1192 | 559 1193 | 560 1194 | 556 1195 | 552 1196 | 543 1197 | 535 1198 | 523 1199 | 512 1200 | 501 1201 | 488 1202 | 476 1203 | 466 1204 | 455 1205 | 447 1206 | 438 1207 | 433 1208 | 428 1209 | 424 1210 | 423 1211 | 422 1212 | 423 1213 | 425 1214 | 427 1215 | 430 1216 | 433 1217 | 437 1218 | 443 1219 | 449 1220 | 456 1221 | 460 1222 | 465 1223 | 471 1224 | 474 1225 | 478 1226 | 484 1227 | 488 1228 | 491 1229 | 493 1230 | 496 1231 | 496 1232 | 496 1233 | 497 1234 | 496 1235 | 496 1236 | 495 1237 | 493 1238 | 492 1239 | 490 1240 | 486 1241 | 485 1242 | 484 1243 | 482 1244 | 480 1245 | 478 1246 | 476 1247 | 475 1248 | 473 1249 | 473 1250 | 473 1251 | 471 1252 | 469 1253 | 470 1254 | 469 1255 | 468 1256 | 470 1257 | 471 1258 | 472 1259 | 474 1260 | 479 1261 | 486 1262 | 496 1263 | 514 1264 | 542 1265 | 577 1266 | 619 1267 | 664 1268 | 711 1269 | 754 1270 | 793 1271 | 822 1272 | 844 1273 | 854 1274 | 851 1275 | 839 1276 | 817 1277 | 789 1278 | 753 1279 | 712 1280 | 668 1281 | 626 1282 | 583 1283 | 541 1284 | 504 1285 | 471 1286 | 442 1287 | 419 1288 | 402 1289 | 388 1290 | 380 1291 | 372 1292 | 371 1293 | 371 1294 | 375 1295 | 382 1296 | 391 1297 | 404 1298 | 418 1299 | 435 1300 | 454 1301 | 473 1302 | 493 1303 | 512 1304 | 530 1305 | 545 1306 | 556 1307 | 565 1308 | 569 1309 | 571 1310 | 567 1311 | 562 1312 | 555 1313 | 546 1314 | 534 1315 | 522 1316 | 509 1317 | 496 1318 | 483 1319 | 471 1320 | 460 1321 | 450 1322 | 440 1323 | 433 1324 | 426 1325 | 421 1326 | 419 1327 | 418 1328 | 418 1329 | 419 1330 | 421 1331 | 426 1332 | 430 1333 | 434 1334 | 441 1335 | 446 1336 | 452 1337 | 457 1338 | 463 1339 | 468 1340 | 474 1341 | 478 1342 | 482 1343 | 487 1344 | 490 1345 | 494 1346 | 496 1347 | 499 1348 | 500 1349 | 500 1350 | 500 1351 | 500 1352 | 499 1353 | 496 1354 | 495 1355 | 493 1356 | 491 1357 | 488 1358 | 486 1359 | 485 1360 | 483 1361 | 481 1362 | 479 1363 | 478 1364 | 476 1365 | 475 1366 | 473 1367 | 473 1368 | 472 1369 | 470 1370 | 469 1371 | 469 1372 | 471 1373 | 472 1374 | 479 1375 | 491 1376 | 511 1377 | 538 1378 | 574 1379 | 614 1380 | 656 1381 | 697 1382 | 736 1383 | 768 1384 | 793 1385 | 806 1386 | 812 1387 | 807 1388 | 791 1389 | 770 1390 | 741 1391 | 707 1392 | 670 1393 | 631 1394 | 592 1395 | 553 1396 | 518 1397 | 485 1398 | 455 1399 | 429 1400 | 408 1401 | 392 1402 | 378 1403 | 368 1404 | 362 1405 | 359 1406 | 359 1407 | 362 1408 | 370 1409 | 382 1410 | 395 1411 | 413 1412 | 433 1413 | 455 1414 | 478 1415 | 500 1416 | 521 1417 | 541 1418 | 557 1419 | 569 1420 | 578 1421 | 583 1422 | 585 1423 | 582 1424 | 575 1425 | 566 1426 | 556 1427 | 542 1428 | 531 1429 | 517 1430 | 501 1431 | 487 1432 | 473 1433 | 460 1434 | 448 1435 | 440 1436 | 432 1437 | 426 1438 | 422 1439 | 418 1440 | 417 1441 | 417 1442 | 419 1443 | 422 1444 | 427 1445 | 430 1446 | 436 1447 | 442 1448 | 449 1449 | 455 1450 | 463 1451 | 470 1452 | 477 1453 | 482 1454 | 486 1455 | 490 1456 | 493 1457 | 496 1458 | 500 1459 | 502 1460 | 504 1461 | 505 1462 | 504 1463 | 505 1464 | 505 1465 | 503 1466 | 501 1467 | 499 1468 | 497 1469 | 494 1470 | 491 1471 | 488 1472 | 485 1473 | 482 1474 | 481 1475 | 480 1476 | 483 1477 | 490 1478 | 504 1479 | 527 1480 | 556 1481 | 590 1482 | 626 1483 | 666 1484 | 702 1485 | 734 1486 | 760 1487 | 777 1488 | 786 1489 | 786 1490 | 779 1491 | 763 1492 | 740 1493 | 711 1494 | 680 1495 | 644 1496 | 608 1497 | 572 1498 | 539 1499 | 507 1500 | 478 1501 | 453 1502 | 433 1503 | 418 1504 | 404 1505 | 395 1506 | 389 1507 | 383 1508 | 383 1509 | 384 1510 | 387 1511 | 394 1512 | 404 1513 | 417 1514 | 433 1515 | 449 1516 | 468 1517 | 487 1518 | 505 1519 | 522 1520 | 536 1521 | 547 1522 | 556 1523 | 561 1524 | 564 1525 | 562 1526 | 559 1527 | 552 1528 | 543 1529 | 534 1530 | 524 1531 | 512 1532 | 499 1533 | 488 1534 | 474 1535 | 463 1536 | 453 1537 | 445 1538 | 436 1539 | 431 1540 | 426 1541 | 426 1542 | 424 1543 | 424 1544 | 425 1545 | 427 1546 | 427 1547 | 431 1548 | 436 1549 | 442 1550 | 448 1551 | 453 1552 | 461 1553 | 467 1554 | 471 1555 | 476 1556 | 482 1557 | 486 1558 | 489 1559 | 492 1560 | 495 1561 | 498 1562 | 499 1563 | 500 1564 | 500 1565 | 500 1566 | 500 1567 | 498 1568 | 498 1569 | 497 1570 | 495 1571 | 494 1572 | 493 1573 | 490 1574 | 488 1575 | 487 1576 | 485 1577 | 483 1578 | 483 1579 | 483 1580 | 485 1581 | 493 1582 | 506 1583 | 527 1584 | 557 1585 | 591 1586 | 631 1587 | 671 1588 | 710 1589 | 746 1590 | 773 1591 | 793 1592 | 803 1593 | 804 1594 | 797 1595 | 783 1596 | 760 1597 | 731 1598 | 698 1599 | 663 1600 | 625 1601 | 590 1602 | 554 1603 | 522 1604 | 493 1605 | 468 1606 | 448 1607 | 432 1608 | 419 1609 | 408 1610 | 403 1611 | 399 1612 | 397 1613 | 398 1614 | 402 1615 | 407 1616 | 415 1617 | 425 1618 | 437 1619 | 451 1620 | 465 1621 | 481 1622 | 496 1623 | 510 1624 | 522 1625 | 533 1626 | 541 1627 | 547 1628 | 550 1629 | 551 1630 | 549 1631 | 544 1632 | 538 1633 | 531 1634 | 522 1635 | 511 1636 | 500 1637 | 491 1638 | 481 1639 | 470 1640 | 461 1641 | 453 1642 | 447 1643 | 441 1644 | 436 1645 | 433 1646 | 432 1647 | 433 1648 | 432 1649 | 435 1650 | 437 1651 | 438 1652 | 442 1653 | 445 1654 | 449 1655 | 454 1656 | 457 1657 | 463 1658 | 467 1659 | 471 1660 | 474 1661 | 479 1662 | 482 1663 | 486 1664 | 488 1665 | 490 1666 | 492 1667 | 493 1668 | 494 1669 | 495 1670 | 495 1671 | 495 1672 | 495 1673 | 496 1674 | 496 1675 | 494 1676 | 495 1677 | 495 1678 | 493 1679 | 492 1680 | 490 1681 | 489 1682 | 488 1683 | 486 1684 | 485 1685 | 484 1686 | 484 1687 | 488 1688 | 498 1689 | 516 1690 | 539 1691 | 572 1692 | 607 1693 | 647 1694 | 684 1695 | 719 1696 | 750 1697 | 773 1698 | 787 1699 | 792 1700 | 790 1701 | 777 1702 | 757 1703 | 732 1704 | 700 1705 | 666 1706 | 629 1707 | 594 1708 | 557 1709 | 525 1710 | 494 1711 | 467 1712 | 445 1713 | 427 1714 | 412 1715 | 402 1716 | 397 1717 | 394 1718 | 393 1719 | 395 1720 | 399 1721 | 404 1722 | 412 1723 | 422 1724 | 434 1725 | 448 1726 | 465 1727 | 483 1728 | 499 1729 | 515 1730 | 529 1731 | 540 1732 | 549 1733 | 557 1734 | 562 1735 | 563 1736 | 562 1737 | 558 1738 | 553 1739 | 544 1740 | 534 1741 | 523 1742 | 511 1743 | 500 1744 | 487 1745 | 476 1746 | 466 1747 | 457 1748 | 448 1749 | 440 1750 | 435 1751 | 431 1752 | 426 1753 | 425 1754 | 425 1755 | 427 1756 | 429 1757 | 432 1758 | 436 1759 | 441 1760 | 444 1761 | 451 1762 | 456 1763 | 462 1764 | 468 1765 | 474 1766 | 479 1767 | 482 1768 | 485 1769 | 489 1770 | 493 1771 | 494 1772 | 496 1773 | 497 1774 | 499 1775 | 500 1776 | 499 1777 | 499 1778 | 498 1779 | 497 1780 | 498 1781 | 496 1782 | 496 1783 | 494 1784 | 492 1785 | 489 1786 | 486 1787 | 484 1788 | 481 1789 | 479 1790 | 478 1791 | 478 1792 | 481 1793 | 489 1794 | 505 1795 | 528 1796 | 558 1797 | 594 1798 | 630 1799 | 668 1800 | 701 1801 | 730 1802 | 752 1803 | 767 1804 | 772 1805 | 768 1806 | 757 1807 | 736 1808 | 712 1809 | 683 1810 | 650 1811 | 615 1812 | 581 1813 | 546 1814 | 514 1815 | 484 1816 | 457 1817 | 434 1818 | 414 1819 | 398 1820 | 387 1821 | 379 1822 | 373 1823 | 371 1824 | 370 1825 | 372 1826 | 377 1827 | 386 1828 | 399 1829 | 415 1830 | 434 1831 | 454 1832 | 477 1833 | 498 1834 | 518 1835 | 536 1836 | 553 1837 | 565 1838 | 573 1839 | 579 1840 | 581 1841 | 578 1842 | 574 1843 | 566 1844 | 557 1845 | 544 1846 | 531 1847 | 518 1848 | 505 1849 | 490 1850 | 478 1851 | 467 1852 | 455 1853 | 447 1854 | 438 1855 | 434 1856 | 429 1857 | 426 1858 | 424 1859 | 425 1860 | 427 1861 | 429 1862 | 431 1863 | 434 1864 | 440 1865 | 443 1866 | 448 1867 | 455 1868 | 462 1869 | 468 1870 | 473 1871 | 479 1872 | 483 1873 | 486 1874 | 491 1875 | 494 1876 | 496 1877 | 500 1878 | 505 1879 | 507 1880 | 508 1881 | 508 1882 | 508 1883 | 506 1884 | 504 1885 | 504 1886 | 506 1887 | 512 1888 | 525 1889 | 545 1890 | 573 1891 | 604 1892 | 638 1893 | 675 1894 | 709 1895 | 739 1896 | 762 1897 | 779 1898 | 787 1899 | 785 1900 | 776 1901 | 759 1902 | 736 1903 | 707 1904 | 675 1905 | 641 1906 | 606 1907 | 572 1908 | 540 1909 | 510 1910 | 482 1911 | 457 1912 | 436 1913 | 420 1914 | 408 1915 | 398 1916 | 391 1917 | 387 1918 | 385 1919 | 386 1920 | 389 1921 | 396 1922 | 406 1923 | 419 1924 | 433 1925 | 451 1926 | 470 1927 | 487 1928 | 506 1929 | 525 1930 | 539 1931 | 552 1932 | 561 1933 | 565 1934 | 568 1935 | 567 1936 | 564 1937 | 559 1938 | 551 1939 | 541 1940 | 530 1941 | 518 1942 | 506 1943 | 493 1944 | 480 1945 | 468 1946 | 459 1947 | 450 1948 | 444 1949 | 438 1950 | 433 1951 | 428 1952 | 426 1953 | 424 1954 | 425 1955 | 427 1956 | 428 1957 | 432 1958 | 435 1959 | 441 1960 | 446 1961 | 452 1962 | 457 1963 | 463 1964 | 468 1965 | 474 1966 | 478 1967 | 484 1968 | 489 1969 | 493 1970 | 496 1971 | 500 1972 | 501 1973 | 502 1974 | 501 1975 | 501 1976 | 498 1977 | 496 1978 | 495 1979 | 494 1980 | 492 1981 | 491 1982 | 494 1983 | 501 1984 | 515 1985 | 538 1986 | 569 1987 | 606 1988 | 646 1989 | 686 1990 | 725 1991 | 761 1992 | 790 1993 | 810 1994 | 822 1995 | 823 1996 | 815 1997 | 798 1998 | 774 1999 | 743 2000 | 708 2001 | 671 2002 | 631 2003 | 595 2004 | 557 2005 | 523 2006 | 494 2007 | 467 2008 | 446 2009 | 429 2010 | 418 2011 | 410 2012 | 404 2013 | 402 2014 | 401 2015 | 402 2016 | 405 2017 | 411 2018 | 419 2019 | 429 2020 | 442 2021 | 455 2022 | 471 2023 | 485 2024 | 500 2025 | 514 2026 | 526 2027 | 537 2028 | 544 2029 | 548 2030 | 551 2031 | 551 2032 | 548 2033 | 544 2034 | 539 2035 | 530 2036 | 521 2037 | 512 2038 | 503 2039 | 494 2040 | 483 2041 | 474 2042 | 465 2043 | 456 2044 | 448 2045 | 442 2046 | 436 2047 | 433 2048 | 430 2049 | 428 2050 | 427 2051 | 427 2052 | 428 2053 | 430 2054 | 434 2055 | 438 2056 | 441 2057 | 446 2058 | 451 2059 | 456 2060 | 461 2061 | 467 2062 | 471 2063 | 475 2064 | 481 2065 | 485 2066 | 487 2067 | 489 2068 | 493 2069 | 493 2070 | 493 2071 | 494 2072 | 494 2073 | 493 2074 | 494 2075 | 494 2076 | 493 2077 | 493 2078 | 491 2079 | 489 2080 | 487 2081 | 485 2082 | 482 2083 | 480 2084 | 479 2085 | 480 2086 | 484 2087 | 497 2088 | 515 2089 | 542 2090 | 576 2091 | 616 2092 | 658 2093 | 699 2094 | 740 2095 | 774 2096 | 801 2097 | 820 2098 | 828 2099 | 825 2100 | 815 2101 | 794 2102 | 769 2103 | 737 2104 | 702 2105 | 664 2106 | 625 2107 | 586 2108 | 548 2109 | 513 2110 | 482 2111 | 458 2112 | 437 2113 | 421 2114 | 407 2115 | 399 2116 | 394 2117 | 389 2118 | 387 2119 | 389 2120 | 393 2121 | 398 2122 | 408 2123 | 421 2124 | 434 2125 | 450 2126 | 467 2127 | 485 2128 | 502 2129 | 517 2130 | 530 2131 | 540 2132 | 549 2133 | 553 2134 | 556 2135 | 555 2136 | 552 2137 | 546 2138 | 539 2139 | 530 2140 | 520 2141 | 509 2142 | 498 2143 | 486 2144 | 476 2145 | 464 2146 | 456 2147 | 447 2148 | 440 2149 | 434 2150 | 430 2151 | 426 2152 | 424 2153 | 423 2154 | 424 2155 | 425 2156 | 429 2157 | 432 2158 | 438 2159 | 443 2160 | 447 2161 | 451 2162 | 456 2163 | 461 2164 | 465 2165 | 469 2166 | 473 2167 | 477 2168 | 480 2169 | 483 2170 | 486 2171 | 489 2172 | 490 2173 | 492 2174 | 495 2175 | 495 2176 | 495 2177 | 494 2178 | 492 2179 | 491 2180 | 488 2181 | 489 2182 | 488 2183 | 487 2184 | 485 2185 | 484 2186 | 483 2187 | 481 2188 | 481 2189 | 480 2190 | 476 2191 | 475 2192 | 473 2193 | 474 2194 | 475 2195 | 478 2196 | 488 2197 | 503 2198 | 526 2199 | 558 2200 | 596 2201 | 637 2202 | 678 2203 | 716 2204 | 752 2205 | 779 2206 | 798 2207 | 807 2208 | 807 2209 | 798 2210 | 779 2211 | 755 2212 | 725 2213 | 689 2214 | 652 2215 | 614 2216 | 576 2217 | 538 2218 | 504 2219 | 473 2220 | 447 2221 | 425 2222 | 407 2223 | 393 2224 | 384 2225 | 377 2226 | 373 2227 | 372 2228 | 374 2229 | 379 2230 | 388 2231 | 400 2232 | 415 2233 | 433 2234 | 453 2235 | 474 2236 | 495 2237 | 515 2238 | 532 2239 | 548 2240 | 560 2241 | 569 2242 | 574 2243 | 575 2244 | 574 2245 | 569 2246 | 561 2247 | 553 2248 | 542 2249 | 530 2250 | 516 2251 | 503 2252 | 490 2253 | 478 2254 | 465 2255 | 455 2256 | 444 2257 | 436 2258 | 430 2259 | 426 2260 | 422 2261 | 420 2262 | 421 2263 | 423 2264 | 424 2265 | 428 2266 | 432 2267 | 436 2268 | 442 2269 | 448 2270 | 454 2271 | 460 2272 | 464 2273 | 471 2274 | 476 2275 | 482 2276 | 486 2277 | 492 2278 | 494 2279 | 498 2280 | 500 2281 | 501 2282 | 503 2283 | 503 2284 | 502 2285 | 502 2286 | 501 2287 | 499 2288 | 498 2289 | 496 2290 | 493 2291 | 491 2292 | 488 2293 | 488 2294 | 486 2295 | 486 2296 | 487 2297 | 495 2298 | 508 2299 | 528 2300 | 555 2301 | 586 2302 | 623 2303 | 659 2304 | 693 2305 | 726 2306 | 753 2307 | 772 2308 | 784 2309 | 786 2310 | 780 2311 | 765 2312 | 744 2313 | 719 2314 | 687 2315 | 654 2316 | 619 2317 | 584 2318 | 550 2319 | 516 2320 | 488 2321 | 463 2322 | 440 2323 | 421 2324 | 406 2325 | 397 2326 | 390 2327 | 386 2328 | 384 2329 | 385 2330 | 387 2331 | 393 2332 | 402 2333 | 415 2334 | 431 2335 | 448 2336 | 467 2337 | 488 2338 | 507 2339 | 524 2340 | 541 2341 | 553 2342 | 562 2343 | 568 2344 | 571 2345 | 570 2346 | 565 2347 | 560 2348 | 551 2349 | 542 2350 | 530 2351 | 516 2352 | 502 2353 | 488 2354 | 476 2355 | 465 2356 | 455 2357 | 447 2358 | 438 2359 | 431 2360 | 426 2361 | 423 2362 | 422 2363 | 421 2364 | 422 2365 | 423 2366 | 426 2367 | 431 2368 | 436 2369 | 441 2370 | 448 2371 | 453 2372 | 459 2373 | 465 2374 | 472 2375 | 479 2376 | 484 2377 | 488 2378 | 493 2379 | 498 2380 | 502 2381 | 503 2382 | 504 2383 | 506 2384 | 506 2385 | 505 2386 | 503 2387 | 501 2388 | 499 2389 | 498 2390 | 496 2391 | 494 2392 | 490 2393 | 490 2394 | 493 2395 | 500 2396 | 515 2397 | 536 2398 | 566 2399 | 603 2400 | 641 2401 | 681 2402 | 721 2403 | 758 2404 | 786 2405 | 809 2406 | 822 2407 | 824 2408 | 815 2409 | 799 2410 | 777 2411 | 748 2412 | 715 2413 | 679 2414 | 642 2415 | 603 2416 | 566 2417 | 532 2418 | 503 2419 | 477 2420 | 454 2421 | 437 2422 | 425 2423 | 415 2424 | 409 2425 | 404 2426 | 404 2427 | 402 2428 | 406 2429 | 410 2430 | 417 2431 | 426 2432 | 438 2433 | 452 2434 | 466 2435 | 479 2436 | 494 2437 | 508 2438 | 521 2439 | 529 2440 | 538 2441 | 544 2442 | 547 2443 | 548 2444 | 546 2445 | 543 2446 | 536 2447 | 529 2448 | 519 2449 | 509 2450 | 500 2451 | 490 2452 | 481 2453 | 472 2454 | 464 2455 | 456 2456 | 449 2457 | 443 2458 | 437 2459 | 433 2460 | 430 2461 | 428 2462 | 427 2463 | 425 2464 | 427 2465 | 429 2466 | 431 2467 | 435 2468 | 440 2469 | 444 2470 | 448 2471 | 452 2472 | 459 2473 | 464 2474 | 469 2475 | 474 2476 | 479 2477 | 482 2478 | 485 2479 | 489 2480 | 491 2481 | 492 2482 | 493 2483 | 494 2484 | -------------------------------------------------------------------------------- /heartpy/data/data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/heartpy/data/data.mat -------------------------------------------------------------------------------- /heartpy/data/data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/heartpy/data/data2.mat -------------------------------------------------------------------------------- /heartpy/data/data3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/heartpy/data/data3.mat -------------------------------------------------------------------------------- /heartpy/datautils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Functions for loading and slicing data 3 | ''' 4 | 5 | from datetime import datetime 6 | from pkg_resources import resource_filename 7 | 8 | import numpy as np 9 | from scipy.io import loadmat 10 | from scipy.ndimage.filters import uniform_filter1d 11 | 12 | __all__ = ['get_data', 13 | 'get_samplerate_mstimer', 14 | 'get_samplerate_datetime', 15 | 'rolling_mean', 16 | 'outliers_iqr_method', 17 | 'outliers_modified_z', 18 | 'MAD', 19 | 'load_exampledata'] 20 | 21 | 22 | def get_data(filename, delim=',', column_name='None', encoding=None, 23 | ignore_extension=False): 24 | '''load data from file 25 | 26 | Function to load data from a .CSV or .MAT file into numpy array. 27 | File can be accessed from local disk or url. 28 | 29 | Parameters 30 | ---------- 31 | filename : string 32 | absolute or relative path to the file object to read 33 | 34 | delim : string 35 | the delimiter used if CSV file passed 36 | default : ',' 37 | 38 | column_name : string 39 | for CSV files with header: specify column that contains the data 40 | for matlab files it specifies the table name that contains the data 41 | default : 'None' 42 | 43 | ignore_extension : bool 44 | if True, extension is not tested, use for example for files where 45 | the extention is not .csv or .txt but the data is formatted as if 46 | it is. 47 | default : False 48 | 49 | Returns 50 | ------- 51 | out : 1-d numpy array 52 | array containing the data from the requested column of the specified file 53 | 54 | Examples 55 | -------- 56 | As an example, let's load two example data files included in the package 57 | For this we use pkg_resources for automated testing purposes, you don't need 58 | this when using the function. 59 | 60 | >>> from pkg_resources import resource_filename 61 | >>> filepath = resource_filename(__name__, 'data/data.csv') 62 | 63 | So, assuming your file lives at 'filepath', you open it as such: 64 | 65 | >>> get_data(filepath) 66 | array([530., 518., 506., ..., 492., 493., 494.]) 67 | 68 | Files with multiple columns can be opened by specifying the 'column_name' where 69 | the data resides: 70 | 71 | >>> filepath = resource_filename(__name__, 'data/data2.csv') 72 | 73 | Again you don't need the above. It is there for automated testing. 74 | 75 | >>> get_data(filepath, column_name='timer') 76 | array([0.00000000e+00, 8.54790319e+00, 1.70958064e+01, ..., 77 | 1.28192904e+05, 1.28201452e+05, 1.28210000e+05]) 78 | 79 | You can open matlab files in much the same way by specifying the column 80 | where the data lives: 81 | 82 | >>> filepath = resource_filename(__name__, 'data/data2.mat') 83 | 84 | Again you don't need the above. It is there for automated testing. 85 | Open matlab file by specifying the column name as well: 86 | 87 | >>> get_data(filepath, column_name='hr') 88 | array([515., 514., 514., ..., 492., 494., 496.]) 89 | 90 | You can any csv formatted text file no matter the extension if you 91 | set ignore_extension to True: 92 | 93 | >>> filepath = resource_filename(__name__, 'data/data.log') 94 | >>> get_data(filepath, ignore_extension = True) 95 | array([530., 518., 506., ..., 492., 493., 494.]) 96 | 97 | You can specify column names in the same way when using ignore_extension 98 | 99 | >>> filepath = resource_filename(__name__, 'data/data2.log') 100 | >>> data = get_data(filepath, column_name = 'hr', ignore_extension = True) 101 | ''' 102 | file_ext = filename.split('.')[-1] 103 | if file_ext == 'csv' or file_ext == 'txt': 104 | if column_name != 'None': 105 | hrdata = np.genfromtxt(filename, delimiter=delim, names=True, dtype=None, encoding=None) 106 | try: 107 | hrdata = hrdata[column_name] 108 | except Exception as error: 109 | raise LookupError('\nError loading column "%s" from file "%s". \ 110 | Is column name specified correctly?\n The following error was provided: %s' 111 | %(column_name, filename, error)) 112 | elif column_name == 'None': 113 | hrdata = np.genfromtxt(filename, delimiter=delim, dtype=np.float64) 114 | else: # pragma: no cover 115 | raise LookupError('\nError: column name "%s" not found in header of "%s".\n' 116 | %(column_name, filename)) 117 | elif file_ext == 'mat': 118 | data = loadmat(filename) 119 | if column_name != "None": 120 | hrdata = np.array(data[column_name][:, 0], dtype=np.float64) 121 | else: # pragma: no cover 122 | raise LookupError('\nError: column name required for Matlab .mat files\n\n') 123 | else: 124 | if ignore_extension: 125 | if column_name != 'None': 126 | hrdata = np.genfromtxt(filename, delimiter=delim, names=True, dtype=None, encoding=None) 127 | try: 128 | hrdata = hrdata[column_name] 129 | except Exception as error: 130 | raise LookupError('\nError loading column "%s" from file "%s". \ 131 | Is column name specified correctly?\n' 132 | %(column_name, filename)) 133 | elif column_name == 'None': # pragma: no cover 134 | hrdata = np.genfromtxt(filename, delimiter=delim, dtype=np.float64) 135 | else: # pragma: no cover 136 | raise LookupError('\nError: column name "%s" not found in header of "%s".\n' 137 | %(column_name, filename)) 138 | else: 139 | raise IncorrectFileType('unknown file format') 140 | return None 141 | return hrdata 142 | 143 | 144 | def get_samplerate_mstimer(timerdata): 145 | '''detemine sample rate based on ms timer 146 | 147 | Function to determine sample rate of data from ms-based timer list or array. 148 | 149 | Parameters 150 | ---------- 151 | timerdata : 1d numpy array or list 152 | sequence containing values of a timer, in ms 153 | 154 | Returns 155 | ------- 156 | out : float 157 | the sample rate as determined from the timer sequence provided 158 | 159 | Examples 160 | -------- 161 | first we load a provided example dataset 162 | 163 | >>> data, timer = load_exampledata(example = 1) 164 | 165 | since it's a timer that counts miliseconds, we use this function. 166 | Let's also round to three decimals 167 | 168 | >>> round(get_samplerate_mstimer(timer), 3) 169 | 116.996 170 | 171 | of course if another time unit is used, converting it to ms-based 172 | should be trivial. 173 | ''' 174 | sample_rate = ((len(timerdata) / (timerdata[-1]-timerdata[0]))*1000) 175 | return sample_rate 176 | 177 | 178 | def get_samplerate_datetime(datetimedata, timeformat='%H:%M:%S.%f'): 179 | '''determine sample rate based on datetime 180 | 181 | Function to determine sample rate of data from datetime-based timer 182 | list or array. 183 | 184 | Parameters 185 | ---------- 186 | timerdata : 1-d numpy array or list 187 | sequence containing datetime strings 188 | 189 | timeformat : string 190 | the format of the datetime-strings in datetimedata 191 | default : '%H:%M:%S.f' (24-hour based time including ms: e.g. 21:43:12.569) 192 | 193 | Returns 194 | ------- 195 | out : float 196 | the sample rate as determined from the timer sequence provided 197 | 198 | Examples 199 | -------- 200 | We load the data like before 201 | 202 | >>> data, timer = load_exampledata(example = 2) 203 | >>> timer[0] 204 | '2016-11-24 13:58:58.081000' 205 | 206 | Note that we need to specify the timeformat used so that datetime understands 207 | what it's working with: 208 | 209 | >>> round(get_samplerate_datetime(timer, timeformat = '%Y-%m-%d %H:%M:%S.%f'), 3) 210 | 100.42 211 | ''' 212 | datetimedata = np.asarray(datetimedata, dtype='str') #cast as str in case of np.bytes type 213 | elapsed = ((datetime.strptime(datetimedata[-1], timeformat) - 214 | datetime.strptime(datetimedata[0], timeformat)).total_seconds()) 215 | sample_rate = (len(datetimedata) / elapsed) 216 | return sample_rate 217 | 218 | 219 | def rolling_mean(data, windowsize, sample_rate): 220 | '''calculates rolling mean 221 | 222 | Function to calculate the rolling mean (also: moving average) over the passed data. 223 | 224 | Parameters 225 | ---------- 226 | data : 1-dimensional numpy array or list 227 | sequence containing data over which rolling mean is to be computed 228 | 229 | windowsize : int or float 230 | the window size to use, in seconds 231 | calculated as windowsize * sample_rate 232 | 233 | sample_rate : int or float 234 | the sample rate of the data set 235 | 236 | Returns 237 | ------- 238 | out : 1-d numpy array 239 | sequence containing computed rolling mean 240 | 241 | Examples 242 | -------- 243 | >>> data, _ = load_exampledata(example = 1) 244 | >>> rmean = rolling_mean(data, windowsize=0.75, sample_rate=100) 245 | >>> rmean[100:110] 246 | array([514.49333333, 514.49333333, 514.49333333, 514.46666667, 247 | 514.45333333, 514.45333333, 514.45333333, 514.45333333, 248 | 514.48 , 514.52 ]) 249 | ''' 250 | 251 | rol_mean = uniform_filter1d(np.asarray(data, dtype='float'), size=int(windowsize*sample_rate)) 252 | return rol_mean 253 | 254 | 255 | def outliers_iqr_method(hrvalues): 256 | '''removes outliers 257 | 258 | Function that removes outliers based on the interquartile range method and 259 | substitutes them for the median 260 | see: https://en.wikipedia.org/wiki/Interquartile_range 261 | 262 | Parameters 263 | ---------- 264 | hrvalues : 1-d numpy array or list 265 | sequence of values, from which outliers need to be identified 266 | 267 | Returns 268 | ------- 269 | out : tuple 270 | [0] cleaned sequence with identified outliers substituted for the median 271 | [1] list of indices that have been replaced in the original array or list 272 | 273 | Examples 274 | -------- 275 | >>> x = [2, 4, 3, 4, 6, 7, 35, 2, 3, 4] 276 | >>> outliers_iqr_method(x) 277 | ([2, 4, 3, 4, 6, 7, 4.0, 2, 3, 4], [6]) 278 | ''' 279 | med = np.median(hrvalues) 280 | q1, q3 = np.percentile(hrvalues, [25, 75]) 281 | iqr = q3 - q1 282 | lower = q1 - (1.5 * iqr) 283 | upper = q3 + (1.5 * iqr) 284 | output = [] 285 | replaced_indices = [] 286 | for i in range(0,len(hrvalues)): 287 | if hrvalues[i] < lower or hrvalues[i] > upper: 288 | output.append(med) 289 | replaced_indices.append(i) 290 | else: 291 | output.append(hrvalues[i]) 292 | return output, replaced_indices 293 | 294 | 295 | def outliers_modified_z(hrvalues): 296 | '''removes outliers 297 | 298 | Function that removes outliers based on the modified Z-score metric and 299 | substitutes them for the median 300 | 301 | Parameters 302 | ---------- 303 | hrvalues : 1-d numpy array or list 304 | sequence of values, from which outliers need to be identified 305 | 306 | Returns 307 | ------- 308 | out : tuple 309 | [0] cleaned sequence with identified outliers substituted for the median 310 | [1] list of indices that have been replaced in the original array or list 311 | 312 | Examples 313 | -------- 314 | >>> x = [2, 4, 3, 4, 6, 7, 35, 2, 3, 4] 315 | >>> outliers_modified_z(x) 316 | ([2, 4, 3, 4, 6, 7, 4.0, 2, 3, 4], [6]) 317 | ''' 318 | hrvalues = np.array(hrvalues) 319 | threshold = 3.5 320 | med = np.median(hrvalues) 321 | mean_abs_dev = MAD(hrvalues) 322 | modified_z_result = 0.6745 * (hrvalues - med) / mean_abs_dev 323 | output = [] 324 | replaced_indices = [] 325 | for i in range(0, len(hrvalues)): 326 | if np.abs(modified_z_result[i]) <= threshold: 327 | output.append(hrvalues[i]) 328 | else: 329 | output.append(med) 330 | replaced_indices.append(i) 331 | return output, replaced_indices 332 | 333 | 334 | def MAD(data): 335 | '''computes median absolute deviation 336 | 337 | Function that compute median absolute deviation of data slice 338 | See: https://en.wikipedia.org/wiki/Median_absolute_deviation 339 | 340 | Parameters 341 | ---------- 342 | data : 1-dimensional numpy array or list 343 | sequence containing data over which to compute the MAD 344 | 345 | Returns 346 | ------- 347 | out : float 348 | the Median Absolute Deviation as computed 349 | 350 | Examples 351 | -------- 352 | >>> x = [2, 4, 3, 4, 6, 7, 35, 2, 3, 4] 353 | >>> MAD(x) 354 | 1.5 355 | ''' 356 | med = np.median(data) 357 | return np.median(np.abs(data - med)) 358 | 359 | 360 | def load_exampledata(example=0): 361 | '''loads example data 362 | 363 | Function to load one of the example datasets included in HeartPy 364 | and used in the documentation. 365 | 366 | Parameters 367 | ---------- 368 | example : int (0, 1, 2) 369 | selects example data used in docs of three datafiles. 370 | Available (see github repo for source of files): 371 | 0 : data.csv 372 | 1 : data2.csv 373 | 2 : data3.csv 374 | default : 0 375 | 376 | Returns 377 | ------- 378 | out : tuple of two arrays 379 | Contains the data and timer column. If no timer data is 380 | available, such as in example 0, an empty second 381 | array is returned. 382 | 383 | Examples 384 | -------- 385 | This function can load one of the three example data files provided 386 | with HeartPy. It returns both the data and a timer if that is present 387 | 388 | For example: 389 | 390 | >>> data, _ = load_exampledata(0) 391 | >>> data[0:5] 392 | array([530., 518., 506., 494., 483.]) 393 | 394 | And another example: 395 | 396 | >>> data, timer = load_exampledata(1) 397 | >>> [round(x, 2) for x in timer[0:5]] 398 | [0.0, 8.55, 17.1, 25.64, 34.19] 399 | ''' 400 | 401 | timer = [] 402 | 403 | if example == 0: 404 | path = path = 'data/data.csv' 405 | filepath = resource_filename(__name__, path) 406 | data = get_data(filepath) 407 | elif example == 1: 408 | path = path = 'data/data2.csv' 409 | filepath = resource_filename(__name__, path) 410 | data = get_data(filepath, column_name = 'hr') 411 | timer = get_data(filepath, column_name = 'timer') 412 | elif example == 2: 413 | path = path = 'data/data3.csv' 414 | filepath = resource_filename(__name__, path) 415 | data = get_data(filepath, column_name = 'hr') 416 | timer = get_data(filepath, column_name = 'datetime') 417 | else: 418 | raise ValueError('Incorrect data file specified.\ 419 | available datafiles are data.csv (0), data2.csv(1), data3.csv(2).') 420 | 421 | return data, timer 422 | -------------------------------------------------------------------------------- /heartpy/exceptions.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Custom exceptions and warnings for HeartPy 3 | ''' 4 | 5 | __all__ = ['BadSignalWarning'] 6 | 7 | class BadSignalWarning(UserWarning): 8 | ''' 9 | warning class to raise when no heart rate is detectable 10 | in supplied signal. 11 | 12 | This warning notifies the user that the supplied signal is 13 | of insufficient quality and/or does not contain enough information 14 | to properly process. 15 | ''' 16 | 17 | class IncorrectFileType(UserWarning): 18 | ''' 19 | warning class to raise when incorrect file type or incorrectly 20 | formatted file is provided to data loader 21 | ''' 22 | -------------------------------------------------------------------------------- /heartpy/filtering.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Functions for data filtering tasks. 3 | ''' 4 | 5 | from scipy.signal import butter, filtfilt, iirnotch, savgol_filter 6 | import numpy as np 7 | 8 | from .datautils import MAD 9 | 10 | __all__ = ['filter_signal', 11 | 'hampel_filter', 12 | 'hampel_correcter', 13 | 'smooth_signal'] 14 | 15 | def butter_lowpass(cutoff, sample_rate, order=2): 16 | '''standard lowpass filter. 17 | 18 | Function that defines standard Butterworth lowpass filter 19 | 20 | Parameters 21 | ---------- 22 | cutoff : int or float 23 | frequency in Hz that acts as cutoff for filter. 24 | All frequencies above cutoff are filtered out. 25 | 26 | sample_rate : int or float 27 | sample rate of the supplied signal 28 | 29 | order : int 30 | filter order, defines the strength of the roll-off 31 | around the cutoff frequency. Typically orders above 6 32 | are not used frequently. 33 | default: 2 34 | 35 | Returns 36 | ------- 37 | out : tuple 38 | numerator and denominator (b, a) polynomials 39 | of the defined Butterworth IIR filter. 40 | 41 | Examples 42 | -------- 43 | >>> b, a = butter_lowpass(cutoff = 2, sample_rate = 100, order = 2) 44 | >>> b, a = butter_lowpass(cutoff = 4.5, sample_rate = 12.5, order = 5) 45 | ''' 46 | nyq = 0.5 * sample_rate 47 | normal_cutoff = cutoff / nyq 48 | b, a = butter(order, normal_cutoff, btype='low', analog=False) 49 | return b, a 50 | 51 | 52 | def butter_highpass(cutoff, sample_rate, order=2): 53 | '''standard highpass filter. 54 | 55 | Function that defines standard Butterworth highpass filter 56 | 57 | Parameters 58 | ---------- 59 | cutoff : int or float 60 | frequency in Hz that acts as cutoff for filter. 61 | All frequencies below cutoff are filtered out. 62 | 63 | sample_rate : int or float 64 | sample rate of the supplied signal 65 | 66 | order : int 67 | filter order, defines the strength of the roll-off 68 | around the cutoff frequency. Typically orders above 6 69 | are not used frequently. 70 | default : 2 71 | 72 | Returns 73 | ------- 74 | out : tuple 75 | numerator and denominator (b, a) polynomials 76 | of the defined Butterworth IIR filter. 77 | 78 | Examples 79 | -------- 80 | we can specify the cutoff and sample_rate as ints or floats. 81 | 82 | >>> b, a = butter_highpass(cutoff = 2, sample_rate = 100, order = 2) 83 | >>> b, a = butter_highpass(cutoff = 4.5, sample_rate = 12.5, order = 5) 84 | ''' 85 | nyq = 0.5 * sample_rate 86 | normal_cutoff = cutoff / nyq 87 | b, a = butter(order, normal_cutoff, btype='high', analog=False) 88 | return b, a 89 | 90 | 91 | def butter_bandpass(lowcut, highcut, sample_rate, order=2): 92 | '''standard bandpass filter. 93 | Function that defines standard Butterworth bandpass filter. 94 | Filters out frequencies outside the frequency range 95 | defined by [lowcut, highcut]. 96 | 97 | Parameters 98 | ---------- 99 | lowcut : int or float 100 | Lower frequency bound of the filter in Hz 101 | 102 | highcut : int or float 103 | Upper frequency bound of the filter in Hz 104 | 105 | sample_rate : int or float 106 | sample rate of the supplied signal 107 | 108 | order : int 109 | filter order, defines the strength of the roll-off 110 | around the cutoff frequency. Typically orders above 6 111 | are not used frequently. 112 | default : 2 113 | 114 | Returns 115 | ------- 116 | out : tuple 117 | numerator and denominator (b, a) polynomials 118 | of the defined Butterworth IIR filter. 119 | 120 | Examples 121 | -------- 122 | we can specify lowcut, highcut and sample_rate as ints or floats. 123 | 124 | >>> b, a = butter_bandpass(lowcut = 1, highcut = 6, sample_rate = 100, order = 2) 125 | >>> b, a = butter_bandpass(lowcut = 0.4, highcut = 3.7, sample_rate = 72.6, order = 2) 126 | ''' 127 | nyq = 0.5 * sample_rate 128 | low = lowcut / nyq 129 | high = highcut / nyq 130 | b, a = butter(order, [low, high], btype='band') 131 | return b, a 132 | 133 | 134 | def filter_signal(data, cutoff, sample_rate, order=2, filtertype='lowpass', 135 | return_top = False): 136 | '''Apply the specified filter 137 | 138 | Function that applies the specified lowpass, highpass or bandpass filter to 139 | the provided dataset. 140 | 141 | Parameters 142 | ---------- 143 | data : 1-dimensional numpy array or list 144 | Sequence containing the to be filtered data 145 | 146 | cutoff : int, float or tuple 147 | the cutoff frequency of the filter. Expects float for low and high types 148 | and for bandpass filter expects list or array of format [lower_bound, higher_bound] 149 | 150 | sample_rate : int or float 151 | the sample rate with which the passed data sequence was sampled 152 | 153 | order : int 154 | the filter order 155 | default : 2 156 | 157 | filtertype : str 158 | The type of filter to use. Available: 159 | - lowpass : a lowpass butterworth filter 160 | - highpass : a highpass butterworth filter 161 | - bandpass : a bandpass butterworth filter 162 | - notch : a notch filter around specified frequency range 163 | both the highpass and notch filter are useful for removing baseline wander. The notch 164 | filter is especially useful for removing baseling wander in ECG signals. 165 | 166 | 167 | Returns 168 | ------- 169 | out : 1d array 170 | 1d array containing the filtered data 171 | 172 | Examples 173 | -------- 174 | >>> import numpy as np 175 | >>> import heartpy as hp 176 | 177 | Using standard data provided 178 | 179 | >>> data, _ = hp.load_exampledata(0) 180 | 181 | We can filter the signal, for example with a lowpass cutting out all frequencies 182 | of 5Hz and greater (with a sloping frequency cutoff) 183 | 184 | >>> filtered = filter_signal(data, cutoff = 5, sample_rate = 100.0, order = 3, filtertype='lowpass') 185 | >>> print(np.around(filtered[0:6], 3)) 186 | [530.175 517.893 505.768 494.002 482.789 472.315] 187 | 188 | Or we can cut out all frequencies below 0.75Hz with a highpass filter: 189 | 190 | >>> filtered = filter_signal(data, cutoff = 0.75, sample_rate = 100.0, order = 3, filtertype='highpass') 191 | >>> print(np.around(filtered[0:6], 3)) 192 | [-17.975 -28.271 -38.609 -48.992 -58.422 -67.902] 193 | 194 | Or specify a range (here: 0.75 - 3.5Hz), outside of which all frequencies 195 | are cut out. 196 | 197 | >>> filtered = filter_signal(data, cutoff = [0.75, 3.5], sample_rate = 100.0, 198 | ... order = 3, filtertype='bandpass') 199 | >>> print(np.around(filtered[0:6], 3)) 200 | [-12.012 -23.159 -34.261 -45.12 -55.541 -65.336] 201 | 202 | A 'Notch' filtertype is also available (see remove_baseline_wander). 203 | 204 | >>> filtered = filter_signal(data, cutoff = 0.05, sample_rate = 100.0, filtertype='notch') 205 | 206 | Finally we can use the return_top flag to only return the filter response that 207 | has amplitute above zero. We're only interested in the peaks, and sometimes 208 | this can improve peak prediction: 209 | 210 | >>> filtered = filter_signal(data, cutoff = [0.75, 3.5], sample_rate = 100.0, 211 | ... order = 3, filtertype='bandpass', return_top = True) 212 | >>> print(np.around(filtered[48:53], 3)) 213 | [ 0. 0. 0.409 17.088 35.673] 214 | ''' 215 | if filtertype.lower() == 'lowpass': 216 | b, a = butter_lowpass(cutoff, sample_rate, order=order) 217 | elif filtertype.lower() == 'highpass': 218 | b, a = butter_highpass(cutoff, sample_rate, order=order) 219 | elif filtertype.lower() == 'bandpass': 220 | assert type(cutoff) == tuple or list or np.array, 'if bandpass filter is specified, \ 221 | cutoff needs to be array or tuple specifying lower and upper bound: [lower, upper].' 222 | b, a = butter_bandpass(cutoff[0], cutoff[1], sample_rate, order=order) 223 | elif filtertype.lower() == 'notch': 224 | b, a = iirnotch(cutoff, Q = 0.005, fs = sample_rate) 225 | else: 226 | raise ValueError('filtertype: %s is unknown, available are: \ 227 | lowpass, highpass, bandpass, and notch' %filtertype) 228 | 229 | filtered_data = filtfilt(b, a, data) 230 | 231 | if return_top: 232 | return np.clip(filtered_data, a_min = 0, a_max = None) 233 | else: 234 | return filtered_data 235 | 236 | 237 | def remove_baseline_wander(data, sample_rate, cutoff=0.05): 238 | '''removes baseline wander 239 | 240 | Function that uses a Notch filter to remove baseline 241 | wander from (especially) ECG signals 242 | 243 | Parameters 244 | ---------- 245 | data : 1-dimensional numpy array or list 246 | Sequence containing the to be filtered data 247 | 248 | sample_rate : int or float 249 | the sample rate with which the passed data sequence was sampled 250 | 251 | cutoff : int, float 252 | the cutoff frequency of the Notch filter. We recommend 0.05Hz. 253 | default : 0.05 254 | 255 | Returns 256 | ------- 257 | out : 1d array 258 | 1d array containing the filtered data 259 | 260 | Examples 261 | -------- 262 | >>> import heartpy as hp 263 | >>> data, _ = hp.load_exampledata(0) 264 | 265 | baseline wander is removed by calling the function and specifying 266 | the data and sample rate. 267 | 268 | >>> filtered = remove_baseline_wander(data, 100.0) 269 | ''' 270 | 271 | return filter_signal(data = data, cutoff = cutoff, sample_rate = sample_rate, 272 | filtertype='notch') 273 | 274 | 275 | def hampel_filter(data, filtsize=6): 276 | '''Detect outliers based on hampel filter 277 | 278 | Funcion that detects outliers based on a hampel filter. 279 | The filter takes datapoint and six surrounding samples. 280 | Detect outliers based on being more than 3std from window mean. 281 | See: 282 | https://www.mathworks.com/help/signal/ref/hampel.html 283 | 284 | Parameters 285 | ---------- 286 | data : 1d list or array 287 | list or array containing the data to be filtered 288 | 289 | filtsize : int 290 | the filter size expressed the number of datapoints 291 | taken surrounding the analysed datapoint. a filtsize 292 | of 6 means three datapoints on each side are taken. 293 | total filtersize is thus filtsize + 1 (datapoint evaluated) 294 | 295 | Returns 296 | ------- 297 | out : array containing filtered data 298 | 299 | Examples 300 | -------- 301 | >>> from .datautils import get_data, load_exampledata 302 | >>> data, _ = load_exampledata(0) 303 | >>> filtered = hampel_filter(data, filtsize = 6) 304 | >>> print('%i, %i' %(data[1232], filtered[1232])) 305 | 497, 496 306 | ''' 307 | 308 | #generate second list to prevent overwriting first 309 | #cast as array to be sure, in case list is passed 310 | output = np.copy(np.asarray(data)) 311 | onesided_filt = filtsize // 2 312 | for i in range(onesided_filt, len(data) - onesided_filt - 1): 313 | dataslice = output[i - onesided_filt : i + onesided_filt] 314 | mad = MAD(dataslice) 315 | median = np.median(dataslice) 316 | if output[i] > median + (3 * mad): 317 | output[i] = median 318 | return output 319 | 320 | 321 | def hampel_correcter(data, sample_rate): 322 | '''apply altered version of hampel filter to suppress noise. 323 | 324 | Function that returns te difference between data and 1-second 325 | windowed hampel median filter. Results in strong noise suppression 326 | characteristics, but relatively expensive to compute. 327 | 328 | Result on output measures is present but generally not large. However, 329 | use sparingly, and only when other means have been exhausted. 330 | 331 | Parameters 332 | ---------- 333 | data : 1d numpy array 334 | array containing the data to be filtered 335 | 336 | sample_rate : int or float 337 | sample rate with which data was recorded 338 | 339 | Returns 340 | ------- 341 | out : 1d numpy array 342 | array containing filtered data 343 | 344 | Examples 345 | -------- 346 | >>> from .datautils import get_data, load_exampledata 347 | >>> data, _ = load_exampledata(1) 348 | >>> filtered = hampel_correcter(data, sample_rate = 116.995) 349 | 350 | ''' 351 | 352 | return data - hampel_filter(data, filtsize=int(sample_rate)) 353 | 354 | 355 | def quotient_filter(RR_list, RR_list_mask = [], iterations=2): 356 | '''applies a quotient filter 357 | 358 | Function that applies a quotient filter as described in 359 | "Piskorki, J., Guzik, P. (2005), Filtering Poincare plots" 360 | 361 | Parameters 362 | ---------- 363 | RR_list - 1d array or list 364 | array or list of peak-peak intervals to be filtered 365 | 366 | RR_list_mask - 1d array or list 367 | array or list containing the mask for which intervals are 368 | rejected. If not supplied, it will be generated. Mask is 369 | zero for accepted intervals, one for rejected intervals. 370 | 371 | iterations - int 372 | how many times to apply the quotient filter. Multipled 373 | iterations have a stronger filtering effect 374 | default : 2 375 | 376 | Returns 377 | ------- 378 | RR_list_mask : 1d array 379 | mask for RR_list, 1 where intervals are rejected, 0 where 380 | intervals are accepted. 381 | 382 | Examples 383 | -------- 384 | Given some example data let's generate an RR-list first 385 | >>> import heartpy as hp 386 | >>> data, timer = hp.load_exampledata(1) 387 | >>> sample_rate = hp.get_samplerate_mstimer(timer) 388 | >>> wd, m = hp.process(data, sample_rate) 389 | >>> rr = wd['RR_list'] 390 | >>> rr_mask = wd['RR_masklist'] 391 | 392 | Given this data we can use this function to further clean the data: 393 | >>> new_mask = quotient_filter(rr, rr_mask) 394 | 395 | Although specifying the mask is optional, as you may not always have a 396 | pre-computed mask available: 397 | >>> new_mask = quotient_filter(rr) 398 | 399 | ''' 400 | 401 | if len(RR_list_mask) == 0: 402 | RR_list_mask = np.zeros((len(RR_list))) 403 | else: 404 | assert len(RR_list) == len(RR_list_mask), \ 405 | 'error: RR_list and RR_list_mask should be same length if RR_list_mask is specified' 406 | 407 | for iteration in range(iterations): 408 | for i in range(len(RR_list) - 1): 409 | if RR_list_mask[i] + RR_list_mask[i + 1] != 0: 410 | pass #skip if one of both intervals is already rejected 411 | elif 0.8 <= RR_list[i] / RR_list[i + 1] <= 1.2: 412 | pass #if R-R pair seems ok, do noting 413 | else: #update mask 414 | RR_list_mask[i] = 1 415 | #RR_list_mask[i + 1] = 1 416 | 417 | return np.asarray(RR_list_mask) 418 | 419 | 420 | def smooth_signal(data, sample_rate, window_length=None, polyorder=3): 421 | '''smooths given signal using savitzky-golay filter 422 | 423 | Function that smooths data using savitzky-golay filter using default settings. 424 | 425 | Functionality requested by Eirik Svendsen. Added since 1.2.4 426 | 427 | Parameters 428 | ---------- 429 | data : 1d array or list 430 | array or list containing the data to be filtered 431 | 432 | sample_rate : int or float 433 | the sample rate with which data is sampled 434 | 435 | window_length : int or None 436 | window length parameter for savitzky-golay filter, see Scipy.signal.savgol_filter docs. 437 | Must be odd, if an even int is given, one will be added to make it uneven. 438 | default : 0.1 * sample_rate 439 | 440 | polyorder : int 441 | the order of the polynomial fitted to the signal. See scipy.signal.savgol_filter docs. 442 | default : 3 443 | 444 | Returns 445 | ------- 446 | smoothed : 1d array 447 | array containing the smoothed data 448 | 449 | Examples 450 | -------- 451 | Given a fictional signal, a smoothed signal can be obtained by smooth_signal(): 452 | 453 | >>> x = [1, 3, 4, 5, 6, 7, 5, 3, 1, 1] 454 | >>> smoothed = smooth_signal(x, sample_rate = 2, window_length=4, polyorder=2) 455 | >>> np.around(smoothed[0:4], 3) 456 | array([1.114, 2.743, 4.086, 5. ]) 457 | 458 | If you don't specify the window_length, it is computed to be 10% of the 459 | sample rate (+1 if needed to make odd) 460 | >>> import heartpy as hp 461 | >>> data, timer = hp.load_exampledata(0) 462 | >>> smoothed = smooth_signal(data, sample_rate = 100) 463 | 464 | ''' 465 | 466 | if window_length == None: 467 | window_length = sample_rate // 10 468 | 469 | if window_length % 2 == 0 or window_length == 0: window_length += 1 470 | 471 | smoothed = savgol_filter(data, window_length = window_length, 472 | polyorder = polyorder) 473 | 474 | return smoothed -------------------------------------------------------------------------------- /heartpy/peakdetection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | functions for peak detection and related tasks 3 | ''' 4 | 5 | import numpy as np 6 | from scipy.signal import resample 7 | 8 | from .analysis import calc_rr, update_rr 9 | from .exceptions import BadSignalWarning 10 | 11 | 12 | __all__ = ['make_windows', 13 | 'append_dict', 14 | 'detect_peaks', 15 | 'fit_peaks', 16 | 'check_peaks', 17 | 'check_binary_quality', 18 | 'interpolate_peaks'] 19 | 20 | 21 | def make_windows(data, sample_rate, windowsize=120, overlap=0, min_size=20): 22 | '''slices data into windows 23 | 24 | Funcion that slices data into windows for concurrent analysis. 25 | Used by process_segmentwise wrapper function. 26 | 27 | Parameters 28 | ---------- 29 | data : 1-d array 30 | array containing heart rate sensor data 31 | 32 | sample_rate : int or float 33 | sample rate of the data stream in 'data' 34 | 35 | windowsize : int 36 | size of the window that is sliced in seconds 37 | 38 | overlap : float 39 | fraction of overlap between two adjacent windows: 0 <= float < 1.0 40 | 41 | min_size : int 42 | the minimum size for the last (partial) window to be included. Very short windows 43 | might not stable for peak fitting, especially when significant noise is present. 44 | Slightly longer windows are likely stable but don't make much sense from a 45 | signal analysis perspective. 46 | 47 | Returns 48 | ------- 49 | out : array 50 | tuples of window indices 51 | 52 | Examples 53 | -------- 54 | Assuming a given example data file: 55 | 56 | >>> import heartpy as hp 57 | >>> data, _ = hp.load_exampledata(1) 58 | 59 | We can split the data into windows: 60 | 61 | >>> indices = make_windows(data, 100.0, windowsize = 30, overlap = 0.5, min_size = 20) 62 | >>> indices.shape 63 | (9, 2) 64 | 65 | Specifying min_size = -1 will include the last window no matter what: 66 | 67 | >>> indices = make_windows(data, 100.0, windowsize = 30, overlap = 0.5, min_size = -1) 68 | ''' 69 | ln = len(data) 70 | window = windowsize * sample_rate 71 | stepsize = (1 - overlap) * window 72 | start = 0 73 | end = window 74 | 75 | slices = [] 76 | while end < len(data): 77 | slices.append((start, end)) 78 | start += stepsize 79 | end += stepsize 80 | 81 | if min_size == -1: 82 | slices[-1] = (slices[-1][0], len(data)) 83 | elif (ln - start) / sample_rate >= min_size: 84 | slices.append((start, ln)) 85 | 86 | return np.array(slices, dtype=np.int32) 87 | 88 | 89 | def append_dict(dict_obj, measure_key, measure_value): 90 | '''appends data to keyed dict. 91 | 92 | Function that appends key to continuous dict, creates if doesn't exist. 93 | 94 | Parameters 95 | ---------- 96 | dict_obj : dict 97 | dictionary object that contains continuous output measures 98 | 99 | measure_key : str 100 | key for the measure to be stored in continuous_dict 101 | 102 | measure_value : any data container 103 | value to be appended to dictionary 104 | 105 | Returns 106 | ------- 107 | dict_obj : dict 108 | dictionary object passed to function, with specified data container appended 109 | 110 | Examples 111 | -------- 112 | Given a dict object 'example' with some data in it: 113 | 114 | >>> example = {} 115 | >>> example['call'] = ['hello'] 116 | 117 | We can use the function to append it: 118 | 119 | >>> example = append_dict(example, 'call', 'world') 120 | >>> example['call'] 121 | ['hello', 'world'] 122 | 123 | A new key will be created if it doesn't exist: 124 | 125 | >>> example = append_dict(example, 'different_key', 'hello there!') 126 | >>> sorted(example.keys()) 127 | ['call', 'different_key'] 128 | ''' 129 | try: 130 | dict_obj[measure_key].append(measure_value) 131 | except KeyError: 132 | dict_obj[measure_key] = [measure_value] 133 | return dict_obj 134 | 135 | 136 | def detect_peaks(hrdata, rol_mean, ma_perc, sample_rate, update_dict=True, working_data={}): 137 | '''detect peaks in signal 138 | 139 | Function that detects heartrate peaks in the given dataset. 140 | 141 | Parameters 142 | ---------- 143 | hr data : 1-d numpy array or list 144 | array or list containing the heart rate data 145 | 146 | rol_mean : 1-d numpy array 147 | array containing the rolling mean of the heart rate signal 148 | 149 | ma_perc : int or float 150 | the percentage with which to raise the rolling mean, 151 | used for fitting detection solutions to data 152 | 153 | sample_rate : int or float 154 | the sample rate of the provided data set 155 | 156 | update_dict : bool 157 | whether to update the peak information in the module's data structure 158 | Settable to False to allow this function to be re-used for example by 159 | the breath analysis module. 160 | default : True 161 | 162 | Examples 163 | -------- 164 | Normally part of the peak detection pipeline. Given the first example data 165 | it would work like this: 166 | 167 | >>> import heartpy as hp 168 | >>> from heartpy.datautils import rolling_mean 169 | >>> data, _ = hp.load_exampledata(0) 170 | >>> rol_mean = rolling_mean(data, windowsize = 0.75, sample_rate = 100.0) 171 | >>> wd = detect_peaks(data, rol_mean, ma_perc = 20, sample_rate = 100.0) 172 | 173 | Now the peaklist has been appended to the working data dict. Let's look 174 | at the first five peak positions: 175 | 176 | >>> wd['peaklist'][0:5] 177 | array([ 63, 165, 264, 360, 460], dtype=int64) 178 | ''' 179 | rmean = np.array(rol_mean) 180 | 181 | #rol_mean = rmean + ((rmean / 100) * ma_perc) 182 | mn = np.mean(rmean / 100) * ma_perc 183 | rol_mean = rmean + mn 184 | 185 | peaksx = np.where((hrdata > rol_mean))[0] 186 | peaksy = hrdata[peaksx] 187 | peakedges = np.concatenate((np.array([0]), 188 | (np.where(np.diff(peaksx) > 1)[0]), 189 | np.array([len(peaksx)]))) 190 | peaklist = [] 191 | 192 | for i in range(0, len(peakedges)-1): 193 | try: 194 | y_values = peaksy[peakedges[i]:peakedges[i+1]].tolist() 195 | peaklist.append(peaksx[peakedges[i] + y_values.index(max(y_values))]) 196 | except: 197 | pass 198 | 199 | if update_dict: 200 | working_data['peaklist'] = peaklist 201 | working_data['ybeat'] = [hrdata[x] for x in peaklist] 202 | working_data['rolling_mean'] = rol_mean 203 | working_data = calc_rr(working_data['peaklist'], sample_rate, 204 | working_data=working_data) 205 | if len(working_data['RR_list']) > 0: 206 | working_data['rrsd'] = np.std(working_data['RR_list']) 207 | else: 208 | working_data['rrsd'] = np.inf 209 | return working_data 210 | else: 211 | return peaklist, working_data 212 | 213 | 214 | def fit_peaks(hrdata, rol_mean, sample_rate, bpmmin=40, bpmmax=180, working_data={}): 215 | '''optimize for best peak detection 216 | 217 | Function that runs fitting with varying peak detection thresholds given a 218 | heart rate signal. 219 | 220 | Parameters 221 | ---------- 222 | hrdata : 1d array or list 223 | array or list containing the heart rate data 224 | 225 | rol_mean : 1-d array 226 | array containing the rolling mean of the heart rate signal 227 | 228 | sample_rate : int or float 229 | the sample rate of the data set 230 | 231 | bpmmin : int 232 | minimum value of bpm to see as likely 233 | default : 40 234 | 235 | bpmmax : int 236 | maximum value of bpm to see as likely 237 | default : 180 238 | 239 | Returns 240 | ------- 241 | working_data : dict 242 | dictionary object that contains all heartpy's working data (temp) objects. 243 | will be created if not passed to function 244 | 245 | Examples 246 | -------- 247 | Part of peak detection pipeline. Uses moving average as a peak detection 248 | threshold and rises it stepwise. Determines best fit by minimising 249 | standard deviation of peak-peak distances as well as getting a bpm that 250 | lies within the expected range. 251 | 252 | Given included example data let's show how this works 253 | 254 | >>> import heartpy as hp 255 | >>> from heartpy.datautils import rolling_mean 256 | >>> data, _ = hp.load_exampledata(0) 257 | >>> rol_mean = rolling_mean(data, windowsize = 0.75, sample_rate = 100.0) 258 | 259 | We can then call this function and let the optimizer do its work: 260 | 261 | >>> wd = fit_peaks(data, rol_mean, sample_rate = 100.0) 262 | 263 | Now the wd dict contains the best fit paramater(s): 264 | 265 | >>> wd['best'] 266 | 20 267 | 268 | This indicates the best fit can be obtained by raising the moving average 269 | with 20%. 270 | 271 | The results of the peak detection using these parameters are included too. 272 | To illustrate, these are the first five detected peaks: 273 | 274 | >>> wd['peaklist'][0:5] 275 | array([ 63, 165, 264, 360, 460], dtype=int64) 276 | 277 | and the corresponding peak-peak intervals: 278 | 279 | >>> wd['RR_list'][0:4] 280 | array([1020., 990., 960., 1000.]) 281 | ''' 282 | 283 | # moving average values to test 284 | ma_perc_list = [5, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 150, 200, 300] 285 | 286 | rrsd = [] 287 | valid_ma = [] 288 | 289 | for ma_perc in ma_perc_list: 290 | working_data = detect_peaks(hrdata, rol_mean, ma_perc, sample_rate, 291 | update_dict=True, working_data=working_data) 292 | bpm = ((len(working_data['peaklist'])/(len(hrdata)/sample_rate))*60) 293 | rrsd.append([working_data['rrsd'], bpm, ma_perc]) 294 | 295 | for _rrsd, _bpm, _ma_perc in rrsd: 296 | if (_rrsd > 0.1) and ((bpmmin <= _bpm <= bpmmax)): 297 | valid_ma.append([_rrsd, _ma_perc]) 298 | 299 | if len(valid_ma) > 0: 300 | working_data['best'] = min(valid_ma, key=lambda t: t[0])[1] 301 | working_data = detect_peaks(hrdata, rol_mean, min(valid_ma, key=lambda t: t[0])[1], 302 | sample_rate, update_dict=True, working_data=working_data) 303 | return working_data 304 | else: 305 | raise BadSignalWarning('\n----------------\nCould not determine best fit for \ 306 | given signal. Please check the source signal.\n Probable causes:\n- detected heart rate falls \ 307 | outside of bpmmin<->bpmmax constraints\n- no detectable heart rate present in signal\n\ 308 | - very noisy signal (consider filtering and scaling)\nIf you\'re sure the signal contains heart\ 309 | rate data, consider filtering and/or scaling first.\n----------------\n') 310 | 311 | 312 | def check_peaks(rr_arr, peaklist, ybeat, reject_segmentwise=False, working_data={}): 313 | '''find anomalous peaks. 314 | 315 | Funcion that checks peaks for outliers based on anomalous peak-peak distances and corrects 316 | by excluding them from further analysis. 317 | 318 | Parameters 319 | ---------- 320 | rr_arr : 1d array or list 321 | list or array containing peak-peak intervals 322 | 323 | peaklist : 1d array or list 324 | list or array containing detected peak positions 325 | 326 | ybeat : 1d array or list 327 | list or array containing corresponding signal values at 328 | detected peak positions. Used for plotting functionality 329 | later on. 330 | 331 | reject_segmentwise : bool 332 | if set, checks segments per 10 detected peaks. Marks segment 333 | as rejected if 30% of peaks are rejected. 334 | default : False 335 | 336 | working_data : dict 337 | dictionary object that contains all heartpy's working data (temp) objects. 338 | will be created if not passed to function 339 | 340 | Returns 341 | ------- 342 | working_data : dict 343 | working_data dictionary object containing all of heartpy's temp objects 344 | 345 | Examples 346 | -------- 347 | Part of peak detection pipeline. No standalone examples exist. See docstring 348 | for hp.process() function for more info 349 | ''' 350 | 351 | rr_arr = np.array(rr_arr) 352 | peaklist = np.array(peaklist) 353 | ybeat = np.array(ybeat) 354 | 355 | # define RR range as mean +/- 30%, with a minimum of 300 356 | mean_rr = np.mean(rr_arr) 357 | thirty_perc = 0.3 * mean_rr 358 | if thirty_perc <= 300: 359 | upper_threshold = mean_rr + 300 360 | lower_threshold = mean_rr - 300 361 | else: 362 | upper_threshold = mean_rr + thirty_perc 363 | lower_threshold = mean_rr - thirty_perc 364 | 365 | # identify peaks to exclude based on RR interval 366 | rem_idx = np.where((rr_arr <= lower_threshold) | (rr_arr >= upper_threshold))[0] + 1 367 | 368 | working_data['removed_beats'] = peaklist[rem_idx] 369 | working_data['removed_beats_y'] = ybeat[rem_idx] 370 | working_data['binary_peaklist'] = np.asarray([0 if x in working_data['removed_beats'] 371 | else 1 for x in peaklist]) 372 | 373 | if reject_segmentwise: 374 | working_data = check_binary_quality(peaklist, working_data['binary_peaklist'], 375 | working_data=working_data) 376 | 377 | working_data = update_rr(working_data=working_data) 378 | 379 | return working_data 380 | 381 | 382 | def check_binary_quality(peaklist, binary_peaklist, maxrejects=3, working_data={}): 383 | '''checks signal in chunks of 10 beats. 384 | 385 | Function that checks signal in chunks of 10 beats. It zeros out chunk if 386 | number of rejected peaks > maxrejects. Also marks rejected segment coordinates 387 | in tuples (x[0], x[1] in working_data['rejected_segments'] 388 | 389 | Parameters 390 | ---------- 391 | peaklist : 1d array or list 392 | list or array containing detected peak positions 393 | 394 | binary_peaklist : 1d array or list 395 | list or array containing mask for peaklist, coding which peaks are rejected 396 | 397 | maxjerects : int 398 | maximum number of rejected peaks per 10-beat window 399 | default : 3 400 | 401 | working_data : dict 402 | dictionary object that contains all heartpy's working data (temp) objects. 403 | will be created if not passed to function 404 | 405 | Returns 406 | ------- 407 | working_data : dict 408 | working_data dictionary object containing all of heartpy's temp objects 409 | 410 | Examples 411 | -------- 412 | Part of peak detection pipeline. No standalone examples exist. See docstring 413 | for hp.process() function for more info 414 | 415 | Given some peaklist and binary mask: 416 | >>> peaklist = [30, 60, 90, 110, 130, 140, 160, 170, 200, 220] 417 | >>> binary_peaklist = [0, 1, 1, 0, 0, 1, 0, 1, 0, 0] 418 | >>> wd = check_binary_quality(peaklist, binary_peaklist) 419 | >>> wd['rejected_segments'] 420 | [(30, 220)] 421 | 422 | The whole segment is rejected as it contains more than the specified 3 rejections 423 | per 10 beats. 424 | ''' 425 | idx = 0 426 | working_data['rejected_segments'] = [] 427 | for i in range(int(len(binary_peaklist) / 10)): 428 | if np.bincount(binary_peaklist[idx:idx + 10])[0] > maxrejects: 429 | binary_peaklist[idx:idx + 10] = [0 for i in range(len(binary_peaklist[idx:idx+10]))] 430 | if idx + 10 < len(peaklist): 431 | working_data['rejected_segments'].append((peaklist[idx], peaklist[idx + 10])) 432 | else: 433 | working_data['rejected_segments'].append((peaklist[idx], peaklist[-1])) 434 | idx += 10 435 | return working_data 436 | 437 | 438 | def interpolate_peaks(data, peaks, sample_rate, desired_sample_rate=1000.0, working_data={}): 439 | '''interpolate detected peak positions and surrounding data points 440 | 441 | Function that enables high-precision mode by taking the estimated peak position, 442 | then upsampling the peak position +/- 100ms to the specified sampling rate, subsequently 443 | estimating the peak position with higher accuracy. 444 | 445 | Parameters 446 | ---------- 447 | data : 1d list or array 448 | list or array containing heart rate data 449 | 450 | peaks : 1d list or array 451 | list or array containing x-positions of peaks in signal 452 | 453 | sample_rate : int or float 454 | the sample rate of the signal (in Hz) 455 | 456 | desired_sampled-rate : int or float 457 | the sample rate to which to upsample. 458 | Must be sample_rate < desired_sample_rate 459 | 460 | Returns 461 | ------- 462 | working_data : dict 463 | working_data dictionary object containing all of heartpy's temp objects 464 | 465 | Examples 466 | -------- 467 | Given the output of a normal analysis and the first five peak-peak intervals: 468 | 469 | >>> import heartpy as hp 470 | >>> data, _ = hp.load_exampledata(0) 471 | >>> wd, m = hp.process(data, 100.0) 472 | >>> wd['peaklist'][0:5] 473 | array([ 63, 165, 264, 360, 460], dtype=int64) 474 | 475 | Now, the resolution is at max 10ms as that's the distance between data points. 476 | We can use the high precision mode for example to approximate a more precise 477 | position, for example if we had recorded at 1000Hz: 478 | 479 | >>> wd = interpolate_peaks(data = data, peaks = wd['peaklist'], 480 | ... sample_rate = 100.0, desired_sample_rate = 1000.0, working_data = wd) 481 | >>> wd['peaklist'][0:5] 482 | array([ 63.5, 165.4, 263.6, 360.4, 460.2]) 483 | 484 | As you can see the accuracy of peak positions has increased. 485 | Note that you cannot magically upsample nothing into something. Be reasonable. 486 | ''' 487 | assert desired_sample_rate > sample_rate, "desired sample rate is lower than actual sample rate \ 488 | this would result in downsampling which will hurt accuracy." 489 | 490 | num_samples = int(0.1 * sample_rate) 491 | ratio = sample_rate / desired_sample_rate 492 | interpolation_slices = [(x - num_samples, x + num_samples) for x in peaks] 493 | peaks = [] 494 | 495 | for i in interpolation_slices: 496 | slice = data[i[0]:i[1]] 497 | resampled = resample(slice, int(len(slice) * (desired_sample_rate / sample_rate))) 498 | peakpos = np.argmax(resampled) 499 | peaks.append((i[0] + (peakpos * ratio))) 500 | 501 | working_data['peaklist'] = np.asarray(peaks) 502 | 503 | return working_data 504 | -------------------------------------------------------------------------------- /heartpy/preprocessing.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | from scipy.interpolate import UnivariateSpline, interp1d 5 | 6 | np.seterr(divide='ignore') #disable div by zero warnings 7 | np.seterr(invalid='ignore') 8 | 9 | from .filtering import filter_signal 10 | 11 | 12 | __all__ = ['scale_data', 13 | 'scale_sections', 14 | 'enhance_peaks', 15 | 'enhance_ecg_peaks', 16 | 'interpolate_clipping', 17 | 'flip_signal'] 18 | 19 | 20 | def scale_data(data, lower=0, upper=1024): 21 | '''scales passed sequence between thresholds 22 | 23 | Function that scales passed data so that it has specified lower 24 | and upper bounds. 25 | 26 | Parameters 27 | ---------- 28 | data : 1-d array or list 29 | Sequence to be scaled 30 | 31 | lower : int or float 32 | lower threshold for scaling 33 | default : 0 34 | 35 | upper : int or float 36 | upper threshold for scaling 37 | default : 1024 38 | 39 | Returns 40 | ------- 41 | out : 1-d array 42 | contains scaled data 43 | 44 | Examples 45 | -------- 46 | When passing data without further arguments to the function means it scales 0-1024 47 | 48 | >>> x = [2, 3, 4, 5] 49 | >>> scale_data(x) 50 | array([ 0. , 341.33333333, 682.66666667, 1024. ]) 51 | 52 | Or you can specify a range: 53 | 54 | >>> scale_data(x, lower = 50, upper = 124) 55 | array([ 50. , 74.66666667, 99.33333333, 124. ]) 56 | ''' 57 | 58 | rng = np.max(data) - np.min(data) 59 | minimum = np.min(data) 60 | data = (upper - lower) * ((data - minimum) / rng) + lower 61 | return data 62 | 63 | 64 | def scale_sections(data, sample_rate, windowsize=2.5, lower=0, upper=1024): 65 | '''scales data using sliding window approach 66 | 67 | Function that scales the data within the defined sliding window between 68 | the defined lower and upper bounds. 69 | 70 | Parameters 71 | ---------- 72 | data : 1-d array or list 73 | Sequence to be scaled 74 | 75 | sample_rate : int or float 76 | Sample rate of the passed signal 77 | 78 | windowsize : int or float 79 | size of the window within which signal is scaled, in seconds 80 | default : 2.5 81 | 82 | lower : int or float 83 | lower threshold for scaling. Passed to scale_data. 84 | default : 0 85 | 86 | upper : int or float 87 | upper threshold for scaling. Passed to scale_data. 88 | default : 1024 89 | 90 | Returns 91 | ------- 92 | out : 1-d array 93 | contains scaled data 94 | 95 | Examples 96 | -------- 97 | >>> x = [20, 30, 20, 30, 70, 80, 20, 30, 20, 30] 98 | >>> scale_sections(x, sample_rate=1, windowsize=2, lower=20, upper=30) 99 | array([20., 30., 20., 30., 20., 30., 20., 30., 20., 30.]) 100 | ''' 101 | 102 | #deprecation suppresses since 2.7, disable filter 103 | warnings.simplefilter('always', DeprecationWarning) 104 | warnings.warn('scale_sections function is deprecated and will be removed in a future release', 105 | category=DeprecationWarning, 106 | stacklevel=2) 107 | warnings.simplefilter('default', DeprecationWarning) #enable default filter 108 | 109 | total_length = len(data) / sample_rate 110 | window_dimension = int(windowsize * sample_rate) 111 | 112 | data_start = 0 113 | data_end = window_dimension 114 | 115 | output = np.empty(len(data)) 116 | 117 | while data_end <= len(data): 118 | sliced = data[data_start:data_end] 119 | sliced = np.power(sliced, 2) 120 | scaled = scale_data(sliced, lower, upper) 121 | 122 | output[data_start:data_end] = scaled 123 | data_start += window_dimension 124 | data_end += window_dimension 125 | 126 | return np.array(output[0:data_start]) 127 | 128 | 129 | def mark_clipping(data, threshold=1020): 130 | '''marks clipping sections 131 | 132 | Function that marks start and end of clipping part 133 | it detects the start and end of clipping segments and returns them 134 | 135 | Parameters 136 | ---------- 137 | data : 1-d numpy array 138 | Sequence to be scaled 139 | 140 | threshold: int or float 141 | the threshold for clipping, recommended to 142 | be a few data points below ADC or sensor max value, 143 | to compensate for signal noise 144 | default : 1020 145 | 146 | Returns 147 | ------- 148 | out : list of tuples 149 | the output is a list of tuples. Each tuple marks the start 150 | and endpoint of the detected clipping segment 151 | 152 | Examples 153 | -------- 154 | Import heartpy and load example data 155 | 156 | >>> import heartpy as hp 157 | >>> data, _ = hp.load_exampledata(example=2) 158 | 159 | Let's slice a part of the data that I know contains clipping 160 | 161 | >>> x = data[2000:3000] 162 | >>> mark_clipping(x, threshold=970) 163 | [(369, 375), (426, 437), (486, 493), (544, 552), (604, 610), (663, 665), \ 164 | (721, 722), (776, 781), (831, 836), (883, 891), (995, 999)] 165 | ''' 166 | 167 | clip_binary = np.where(data > threshold) 168 | clipping_edges = np.where(np.diff(clip_binary) > 1)[1] 169 | 170 | clipping_segments = [] 171 | 172 | for i in range(0, len(clipping_edges)): 173 | if i == 0: #if first clipping segment 174 | clipping_segments.append((clip_binary[0][0], 175 | clip_binary[0][clipping_edges[0]])) 176 | elif i == len(clipping_edges) - 1: 177 | #append last entry 178 | clipping_segments.append((clip_binary[0][clipping_edges[i]+1], 179 | clip_binary[0][-1])) 180 | else: 181 | clipping_segments.append((clip_binary[0][clipping_edges[i-1] + 1], 182 | clip_binary[0][clipping_edges[i]])) 183 | 184 | return clipping_segments 185 | 186 | 187 | def interpolate_clipping(data, sample_rate, threshold=1020): 188 | '''interpolate peak waveform 189 | 190 | Function that interpolates peaks between the clipping segments using 191 | cubic spline interpolation. It takes the clipping start +/- 100ms to 192 | calculate the spline. 193 | 194 | Parameters 195 | ---------- 196 | data : 1d list or numpy array 197 | data section to be evaluated 198 | 199 | sample_rate : int or float 200 | sample rate with which the data array is sampled 201 | 202 | threshold : int or float 203 | the threshold for clipping, recommended to 204 | be a few data points below ADC or sensor max value, 205 | to compensate for signal noise 206 | default : 1020 207 | 208 | Returns 209 | ------- 210 | out : array 211 | the output is an array with clipping segments replaced 212 | by interpolated segments 213 | 214 | Examples 215 | -------- 216 | First let's load some example data: 217 | 218 | >>> import heartpy as hp 219 | >>> data, _ = hp.load_exampledata(example=2) 220 | >>> x = data[2000:3000] 221 | >>> x[425:445] 222 | array([948, 977, 977, 977, 977, 978, 978, 977, 978, 977, 977, 977, 977, 223 | 914, 820, 722, 627, 536, 460, 394]) 224 | 225 | And interpolate any clipping segments as such: 226 | 227 | >>> intp = interpolate_clipping(x, sample_rate=117, threshold=970) 228 | >>> intp[425:445] 229 | array([ 972, 1043, 1098, 1138, 1163, 1174, 1173, 1159, 1134, 1098, 1053, 230 | 998, 934, 848, 747, 646, 552, 470, 402, 348]) 231 | ''' 232 | 233 | clipping_segments = mark_clipping(data, threshold) 234 | num_datapoints = int(0.1 * sample_rate) 235 | newx = [] 236 | newy = [] 237 | 238 | i = 0 239 | 240 | for segment in clipping_segments: 241 | if segment[0] < num_datapoints: 242 | #if clipping is present at start of signal, skip. 243 | #We cannot interpolate accurately when there is insufficient data prior to clipping segment. 244 | pass 245 | else: 246 | antecedent = data[segment[0] - num_datapoints : segment[0]] 247 | consequent = data[segment[1] : segment[1] + num_datapoints] 248 | segment_data = np.concatenate((antecedent, consequent)) 249 | 250 | interpdata_x = np.concatenate(([x for x in range(segment[0] - num_datapoints, segment[0])], 251 | [x for x in range(segment[1], segment[1] + num_datapoints)])) 252 | x_new = np.linspace(segment[0] - num_datapoints, 253 | segment[1] + num_datapoints, 254 | ((segment[1] - segment[0]) + (2 * num_datapoints))) 255 | 256 | try: 257 | interp_func = UnivariateSpline(interpdata_x, segment_data, k=3) 258 | interp_data = interp_func(x_new) 259 | 260 | data[segment[0] - num_datapoints : 261 | segment[1] + num_datapoints] = interp_data 262 | except: 263 | #pass over failed interpolation: leave original data alone 264 | pass 265 | 266 | return data 267 | 268 | 269 | def flip_signal(data, enhancepeaks=False, keep_range=True): 270 | '''invert signal waveforms. 271 | 272 | Function that flips raw signal with negative mV peaks to normal ECG. 273 | Required for proper peak finding in case peaks are expressed as 274 | negative dips. 275 | 276 | Parameters 277 | ---------- 278 | data : 1d list or numpy array 279 | data section to be evaluated 280 | 281 | enhance_peaks : bool 282 | whether to apply peak accentuation 283 | default : False 284 | 285 | keep_range : bool 286 | whether to scale the inverted data so that the original 287 | range is maintained 288 | 289 | Returns 290 | ------- 291 | out : 1d array 292 | 293 | Examples 294 | -------- 295 | Given an array of data 296 | 297 | >>> x = [200, 300, 500, 900, 500, 300, 200] 298 | 299 | We can call the function. If keep_range is False, the signal 300 | will be inverted relative to its mean. 301 | 302 | >>> flip_signal(x, keep_range=False) 303 | array([628.57142857, 528.57142857, 328.57142857, -71.42857143, 304 | 328.57142857, 528.57142857, 628.57142857]) 305 | 306 | However, by specifying keep_range, the inverted signal will be 307 | put 'back in place' in its original range. 308 | 309 | >>> flip_signal(x, keep_range=True) 310 | array([900., 800., 600., 200., 600., 800., 900.]) 311 | 312 | It's also possible to use the enhance_peaks function: 313 | 314 | >>> flip_signal(x, enhancepeaks=True) 315 | array([1024. , 621.75746332, 176.85545623, 0. , 316 | 176.85545623, 621.75746332, 1024. ]) 317 | ''' 318 | data_mean = np.mean(data) 319 | data_min = np.min(data) 320 | data_max = np.max(data) 321 | 322 | #invert signal 323 | data = (data_mean - data) + data_mean 324 | 325 | if keep_range: 326 | #scale data so original range is maintained 327 | data = scale_data(data, lower = data_min, upper = data_max) 328 | if enhancepeaks: 329 | data = enhance_peaks(data) 330 | return data 331 | 332 | 333 | def enhance_peaks(hrdata, iterations=2): 334 | '''enhances peak amplitude relative to rest of signal 335 | 336 | Function thta attempts to enhance the signal-noise ratio by accentuating 337 | the highest peaks. Note: denoise first 338 | 339 | Parameters 340 | ---------- 341 | hrdata : 1-d numpy array or list 342 | sequence containing heart rate data 343 | 344 | iterations : int 345 | the number of scaling steps to perform 346 | default : 2 347 | 348 | Returns 349 | ------- 350 | out : 1-d numpy array 351 | array containing enhanced peaks 352 | 353 | Examples 354 | -------- 355 | Given an array of data, the peaks can be enhanced using the function 356 | 357 | >>> x = [200, 300, 500, 900, 500, 300, 200] 358 | >>> enhance_peaks(x) 359 | array([ 0. , 4.31776016, 76.16528926, 1024. , 360 | 76.16528926, 4.31776016, 0. ]) 361 | ''' 362 | scale_data(hrdata) 363 | for i in range(iterations): 364 | hrdata = np.power(hrdata, 2) 365 | hrdata = scale_data(hrdata) 366 | return hrdata 367 | 368 | 369 | def enhance_ecg_peaks(hrdata, sample_rate, iterations=4, aggregation='mean', 370 | notch_filter=True): 371 | '''enhances ECG peaks 372 | 373 | Function that convolves synthetic QRS templates with the signal, leading 374 | to a strong increase signal-to-noise ratio. Function ends with an optional 375 | Notch filterstep (default : true) to reduce noise from the iterating 376 | convolution steps. 377 | 378 | Parameters 379 | ---------- 380 | hrdata : 1-d numpy array or list 381 | sequence containing heart rate data 382 | 383 | sample_rate : int or float 384 | sample rate with which the data is sampled 385 | 386 | iterations : int 387 | how many convolutional iterations should be run. More will result in 388 | stronger peak enhancement, but over a certain point (usually between 12-16) 389 | overtones start appearing in the signal. Only increase this if the peaks 390 | aren't amplified enough. 391 | default : 4 392 | 393 | aggregation : str 394 | how the data from the different convolutions should be aggregated. 395 | Can be either 'mean' or 'median'. 396 | default : mean 397 | 398 | notch_filter : bool 399 | whether to apply a notch filter after the last convolution to get rid of 400 | remaining low frequency noise. 401 | default : true 402 | 403 | Returns 404 | ------- 405 | output : 1d array 406 | The array containing the filtered data with enhanced peaks 407 | 408 | Examples 409 | -------- 410 | First let's import the module and load the data 411 | 412 | >>> import heartpy as hp 413 | >>> data, timer = hp.load_exampledata(1) 414 | >>> sample_rate = hp.get_samplerate_mstimer(timer) 415 | 416 | After loading the data we call the function like so: 417 | 418 | >>> filtered_data = enhance_ecg_peaks(data, sample_rate, iterations = 3) 419 | 420 | By default the module uses the mean to aggregate convolutional outputs. It 421 | is also possible to use the median. 422 | 423 | >>> filtered_data = enhance_ecg_peaks(data, sample_rate, iterations = 3, 424 | ... aggregation = 'median', notch_filter = False) 425 | 426 | In the last example we also disabled the notch filter. 427 | ''' 428 | 429 | #assign output 430 | output = np.copy(hrdata) 431 | 432 | #generate synthetic QRS complexes 433 | templates = generate_ecg_templates(sample_rate) 434 | 435 | for i in range(int(iterations)): 436 | convolved = denoise_convolutions(output, sample_rate, templates) 437 | if aggregation == 'mean': 438 | output = np.nanmean(convolved, axis=0) 439 | elif aggregation == 'median': 440 | output = np.nanmedian(convolved, axis=0) 441 | 442 | #offset signal shift (shifts 1 datapoint for every iteration after the first) 443 | output = output[int(iterations) - 1:-int(iterations)] 444 | 445 | if notch_filter: 446 | output = filter_signal(output, 0.05, sample_rate, filtertype='notch') 447 | 448 | return output 449 | 450 | 451 | def generate_ecg_templates(sample_rate, widths=[50, 60, 70, 80, 100], 452 | presets=[[0, 2, 2.5, 3, 3.5, 5], 453 | [0, 1, 1.5, 2, 2.5, 3], 454 | [0, 3, 3.5, 4, 4.5, 6]], 455 | amplitude = [0, -0.1, 1, -0.5, 0, 0]): 456 | '''helper function for enhance_ecg_peaks 457 | 458 | Helper function that generates synthetic QRS complexes of varying sizes 459 | to convolve with the signal. 460 | ''' 461 | 462 | templates = [] 463 | 464 | for i in presets: 465 | for j in widths: 466 | #duration < 120ms 467 | duration = (j / 1000) * sample_rate 468 | step = duration / len(i) 469 | new_t = [int(step * x) for x in i] 470 | new_x = np.linspace(new_t[0], new_t[-1], new_t[-1]) 471 | #interpolate peak to fit in correct sampling rate 472 | interp_func = interp1d(new_t, amplitude, kind='linear') 473 | templates.append(interp_func(new_x)) 474 | 475 | return templates 476 | 477 | 478 | def denoise_convolutions(data, sample_rate, templates): 479 | '''helper function for enhance_ecg_peaks 480 | 481 | Helper function that convolves the generated synthetic QRS templates 482 | with the provided signal. 483 | ''' 484 | 485 | convolutions = [] 486 | 487 | for i in range(len(templates)): 488 | convolved = np.convolve(data, templates[i], mode='same') 489 | convolutions.append(convolved) 490 | 491 | return np.asarray(convolutions) -------------------------------------------------------------------------------- /heartpy/visualizeutils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Functions that help visualize results 3 | ''' 4 | 5 | import os 6 | 7 | import matplotlib.pyplot as plt 8 | from matplotlib.patches import Ellipse 9 | import numpy as np 10 | 11 | from . import config 12 | 13 | __all__ = ['plotter', 14 | 'segment_plotter', 15 | 'plot_poincare', 16 | 'plot_breathing'] 17 | 18 | def plotter(working_data, measures, show=True, figsize=None, 19 | title='Heart Rate Signal Peak Detection', moving_average=False): # pragma: no cover 20 | '''plots the analysis results. 21 | 22 | Function that uses calculated measures and data stored in the working_data{} and measures{} 23 | dict objects to visualise the fitted peak detection solution. 24 | 25 | Parameters 26 | ---------- 27 | working_data : dict 28 | dictionary object that contains all heartpy's working data (temp) objects. 29 | will be created if not passed to function 30 | 31 | measures : dict 32 | dictionary object used by heartpy to store computed measures. Will be created 33 | if not passed to function 34 | 35 | show : bool 36 | when False, function will return a plot object rather than display the results. 37 | default : True 38 | 39 | figsize: tuple 40 | Set dimensions of image in inches like in matplotlib. figsize=(x, y) 41 | default: None => (6.4, 4.8) 42 | 43 | title : string 44 | title for the plot. 45 | default : "Heart Rate Signal Peak Detection" 46 | 47 | moving_average : bool 48 | whether to display the moving average on the plot. 49 | The moving average is used for peak fitting. 50 | default: False 51 | 52 | Returns 53 | ------- 54 | out : matplotlib plot object 55 | only returned if show == False. 56 | 57 | Examples 58 | -------- 59 | First let's load and analyse some data to visualise 60 | 61 | >>> import heartpy as hp 62 | >>> data, _ = hp.load_exampledata(0) 63 | >>> wd, m = hp.process(data, 100.0) 64 | 65 | Then we can visualise 66 | 67 | >>> plot_object = plotter(wd, m, show=False, title='some awesome title') 68 | 69 | This returns a plot object which can be visualized or saved or appended. 70 | See matplotlib API for more information on how to do this. 71 | 72 | A matplotlib plotting object is returned. This can be further processed and saved 73 | to a file. 74 | 75 | ''' 76 | #get color palette 77 | colorpalette = config.get_colorpalette_plotter() 78 | 79 | # create plot x-var 80 | fs = working_data['sample_rate'] 81 | plotx = np.arange(0, len(working_data['hr'])/fs, 1/fs) 82 | #check if there's a rounding error causing differing lengths of plotx and signal 83 | diff = len(plotx) - len(working_data['hr']) 84 | if diff < 0: 85 | #add to linspace 86 | plotx = np.append(plotx, plotx[-1] + (plotx[-2] - plotx[-1])) 87 | elif diff > 0: 88 | #trim linspace 89 | plotx = plotx[0:-diff] 90 | 91 | peaklist = working_data['peaklist'] 92 | ybeat = working_data['ybeat'] 93 | rejectedpeaks = working_data['removed_beats'] 94 | rejectedpeaks_y = working_data['removed_beats_y'] 95 | 96 | fig, ax = plt.subplots(figsize=figsize) 97 | 98 | ax.set_title(title) 99 | ax.plot(plotx, working_data['hr'], color=colorpalette[0], label='heart rate signal', zorder=-10) 100 | ax.set_xlabel('Time (s)') 101 | 102 | if moving_average: 103 | ax.plot(plotx, working_data['rolling_mean'], color='gray', alpha=0.5) 104 | 105 | ax.scatter(np.asarray(peaklist)/fs, ybeat, color=colorpalette[1], label='BPM:%.2f' %(measures['bpm'])) 106 | ax.scatter(rejectedpeaks/fs, rejectedpeaks_y, color=colorpalette[2], label='rejected peaks') 107 | 108 | #check if rejected segment detection is on and has rejected segments 109 | try: 110 | if len(working_data['rejected_segments']) >= 1: 111 | for segment in working_data['rejected_segments']: 112 | ax.axvspan(segment[0], segment[1], facecolor='red', alpha=0.5) 113 | except: 114 | pass 115 | 116 | ax.legend(loc=4, framealpha=0.6) 117 | 118 | if show: 119 | fig.show() 120 | else: 121 | return fig 122 | 123 | def segment_plotter(working_data, measures, title='Heart Rate Signal Peak Detection', 124 | figsize=(6, 6), path='', start=0, end=None, step=1): # pragma: no cover 125 | '''plots analysis results 126 | 127 | Function that plots the results of segmentwise processing of heart rate signal 128 | and writes all results to separate files at the path provided. 129 | 130 | Parameters 131 | ---------- 132 | working_data : dict 133 | dictionary object that contains all heartpy's working data (temp) objects. 134 | will be created if not passed to function 135 | 136 | measures : dict 137 | dictionary object used by heartpy to store computed measures. Will be created 138 | if not passed to function 139 | 140 | title : str 141 | the title used in the plot 142 | 143 | figsize : tuple 144 | figsize tuple to be passed to matplotlib 145 | 146 | path : str 147 | the path where the files will be stored, folder must exist. 148 | 149 | start : int 150 | what segment to start plotting with 151 | default : 0 152 | 153 | end : int 154 | last segment to plot. Must be smaller than total number of segments 155 | default : None, will plot until end 156 | 157 | step : int 158 | stepsize used when iterating over plots every step'th segment will be plotted 159 | default : 1 160 | 161 | Returns 162 | ------- 163 | None 164 | 165 | Examples 166 | -------- 167 | This function has no examples. See documentation of heartpy for more info. 168 | ''' 169 | #sanity check 170 | assert 0 < step < len(working_data['hr']), 'step must be larger than zero and smaller than total number of segments' 171 | 172 | #set endpoint if not explicitly defined 173 | if end == None: 174 | end = len(working_data['hr']) 175 | else: 176 | #make sure it is defined within boundary conditions 177 | assert end <= len(working_data['hr']), 'defined "end" endpoint is larger than number of segments' 178 | 179 | #add trailing path slash if user omitted it 180 | if not (path.endswith('/') or path.endswith('\\')) and len(path) > 0: 181 | path += '/' 182 | #create path if it doesn't exist 183 | if not os.path.isdir(path): 184 | os.makedirs(path) 185 | 186 | #make plots 187 | filenum = 0 188 | for i in range(start, end, step): 189 | wd_segment = {} 190 | m_segment = {} 191 | #assign values to sub-object for plotting purposes 192 | wd_segment['peaklist'] = working_data['peaklist'][i] 193 | wd_segment['ybeat'] = working_data['ybeat'][i] 194 | wd_segment['removed_beats'] = working_data['removed_beats'][i] 195 | wd_segment['removed_beats_y'] = working_data['removed_beats_y'][i] 196 | wd_segment['hr'] = working_data['hr'][i] 197 | wd_segment['rolling_mean'] = working_data['rolling_mean'][i] 198 | wd_segment['sample_rate'] = working_data['sample_rate'][i] 199 | m_segment['bpm'] = measures['bpm'][i] 200 | try: 201 | wd_segment['rejected_segments'] = working_data['rejected_segments'][i] 202 | except: 203 | pass 204 | 205 | #plot it using built-in plotter 206 | plt.figure(figsize = figsize) 207 | p = plotter(wd_segment, m_segment, show=False) 208 | p.savefig('%s%i.png' %(path, filenum)) 209 | plt.close('all') 210 | filenum += 1 211 | 212 | 213 | def plot_poincare(working_data, measures, show = True, figsize=None, 214 | title='Poincare plot'): # pragma: no cover 215 | '''visualize poincare plot 216 | 217 | function that visualises poincare plot. 218 | 219 | Parameters 220 | ---------- 221 | working_data : dict 222 | dictionary object that contains all heartpy's working data (temp) objects. 223 | will be created if not passed to function 224 | 225 | measures : dict 226 | dictionary object used by heartpy to store computed measures. Will be created 227 | if not passed to function 228 | 229 | show : bool 230 | whether to show the plot right away, or return a matplotlib object for 231 | further manipulation 232 | 233 | figsize: tuple 234 | Set dimensions of image in inches like in matplotlib. figsize=(x, y) 235 | default: None => (6.4, 4.8) 236 | 237 | title : str 238 | the title used in the plot 239 | 240 | Returns 241 | ------- 242 | out : matplotlib plot object 243 | only returned if show == False. 244 | 245 | Examples 246 | -------- 247 | This function has no examples. See documentation of heartpy for more info. 248 | ''' 249 | 250 | #get color palette 251 | colorpalette = config.get_colorpalette_poincare() 252 | 253 | #get values from dict 254 | x_plus = working_data['poincare']['x_plus'] 255 | x_minus = working_data['poincare']['x_minus'] 256 | sd1 = measures['sd1'] 257 | sd2 = measures['sd2'] 258 | 259 | #define figure 260 | fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'}, figsize=figsize) 261 | 262 | #plot scatter 263 | ax.scatter(x_plus, x_minus, color = colorpalette[0], 264 | alpha = 0.75, label = 'peak-peak intervals') 265 | 266 | #plot identity line 267 | mins = np.min([x_plus, x_minus]) 268 | maxs = np.max([x_plus, x_minus]) 269 | identity_line = np.linspace(np.min(mins), np.max(maxs)) 270 | ax.plot(identity_line, identity_line, color='black', alpha=0.5, 271 | label = 'identity line') 272 | 273 | #rotate SD1, SD2 vectors 45 degrees counterclockwise 274 | sd1_xrot, sd1_yrot = rotate_vec(0, sd1, 45) 275 | sd2_xrot, sd2_yrot = rotate_vec(0, sd2, 45) 276 | 277 | #plot rotated SD1, SD2 lines 278 | ax.plot([np.mean(x_plus), np.mean(x_plus) + sd1_xrot], 279 | [np.mean(x_minus), np.mean(x_minus) + sd1_yrot], 280 | color = colorpalette[1], label = 'SD1') 281 | ax.plot([np.mean(x_plus), np.mean(x_plus) - sd2_xrot], 282 | [np.mean(x_minus), np.mean(x_minus) + sd2_yrot], 283 | color = colorpalette[2], label = 'SD2') 284 | 285 | #plot ellipse 286 | xmn = np.mean(x_plus) 287 | ymn = np.mean(x_minus) 288 | el = Ellipse((xmn, ymn), width = sd2 * 2, height = sd1 * 2, angle = 45.0) 289 | ax.add_artist(el) 290 | el.set_edgecolor((0,0,0)) 291 | el.fill = False 292 | 293 | ax.set_xlabel(r'RRi$_n$ (ms)') 294 | ax.set_ylabel(r'RRi$_{n+1}$ (ms)') 295 | ax.legend(loc=4, framealpha=0.6) 296 | ax.set_title(title) 297 | 298 | if show: 299 | fig.show() 300 | else: 301 | return fig 302 | 303 | 304 | def rotate_vec(x, y, angle): 305 | '''rotates vector around origin point 306 | 307 | Function that takes vector and angle, and rotates around origin point 308 | with given amount of degrees. 309 | 310 | Helper function for poincare plotting 311 | 312 | Parameters 313 | ---------- 314 | x : int or float 315 | vector x coordinate 316 | 317 | y : int or float 318 | vector y coordinate 319 | 320 | angle: int or float 321 | the angle of rotation applied to the vecftor 322 | 323 | Returns 324 | ------- 325 | x_rot : float 326 | new x coordinate with rotation applied 327 | 328 | y_rot : float 329 | new x coordinate with rotation applied 330 | 331 | Examples 332 | -------- 333 | Given a vector (0,1), if we apply a rotation of 90 degrees clockwise 334 | we expect to get (1,0). Let's test 335 | 336 | >>> x_new, y_new = rotate_vec(0, 1, -90) 337 | >>> print('%.3f, %.3f' %(x_new, y_new)) 338 | 1.000, 0.000 339 | ''' 340 | theta = np.radians(angle) 341 | 342 | cs = np.cos(theta) 343 | sn = np.sin(theta) 344 | 345 | x_rot = (x * cs) - (y * sn) 346 | y_rot = (x * sn) + (y * cs) 347 | 348 | return x_rot, y_rot 349 | 350 | 351 | def plot_breathing(working_data, measures, show=True, figsize=None): # pragma: no cover 352 | '''plots extracted breathing signal and spectrogram 353 | 354 | Function that plots the breathing signal extracted from RR-intervals alongside 355 | its computed spectrogram representation. 356 | 357 | Parameters 358 | ---------- 359 | working_data : dict 360 | dictionary object that contains all heartpy's working data (temp) objects. 361 | will be created if not passed to function 362 | 363 | measures : dict 364 | dictionary object used by heartpy to store computed measures. Will be created 365 | if not passed to function 366 | 367 | show : bool 368 | whether to show the plot right away, or return a matplotlib object for 369 | further manipulation 370 | 371 | figsize: tuple 372 | Set dimensions of image in inches like in matplotlib. figsize=(x, y) 373 | default: None => (6.4, 4.8) 374 | 375 | Returns 376 | ------- 377 | out : matplotlib plot object 378 | only returned if show == False. 379 | 380 | Examples 381 | -------- 382 | This function has no examples. See documentation of heartpy for more info. 383 | ''' 384 | 385 | fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize) 386 | 387 | ax1.plot(working_data['breathing_signal'], label='breathing signal') 388 | ax1.set_xlabel('ms') 389 | ax1.set_title('breathing signal extracted from RR-intervals') 390 | 391 | ax2.plot(working_data['breathing_frq'], working_data['breathing_psd'], label='spectrogram') 392 | ax2.set_xlim(0, 1) 393 | ax2.set_xlabel('Hz') 394 | ax2.set_title('spectrogram extracted from breathing rate signal') 395 | 396 | ax2.legend() 397 | plt.tight_layout() 398 | 399 | if show: 400 | fig.show() 401 | else: 402 | return fig 403 | 404 | -------------------------------------------------------------------------------- /images/clipping_correct.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/images/clipping_correct.jpg -------------------------------------------------------------------------------- /images/output1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/images/output1.jpeg -------------------------------------------------------------------------------- /images/output2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/images/output2.jpeg -------------------------------------------------------------------------------- /run_tests.py: -------------------------------------------------------------------------------- 1 | import heartpy as hp 2 | 3 | if __name__ == '__main__': 4 | hp.run_tests(verbose=0) -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r", encoding = "utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="heartpy", 8 | version="1.2.7", 9 | author="Paul van Gent", 10 | author_email="P.vanGent@tudelft.nl", 11 | description="Heart Rate Analysis Toolkit", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/paulvangentcom/heartrate_analysis_python", 15 | packages=["heartpy"], 16 | install_requires=[ 17 | "cycler==0.10.0;python_version<='3.5'", 18 | "kiwisolver==1.1.0;python_version<='3.5'", 19 | "pyparsing==2.4.7;python_version=='2.7'", 20 | "numpy<=1.15;python_version=='2.7'", 21 | "numpy<=1.15;python_version=='3.4'", 22 | "numpy<=1.15;python_version=='3.5'", 23 | "numpy<=1.17;python_version=='3.6'", 24 | "numpy>=1.17;python_version>='3.7'", 25 | "scipy<=1.1.0;python_version=='2.7'", 26 | "scipy<=1.2.0;python_version=='3.4'", 27 | "scipy<=1.4.1;python_version=='3.5'", 28 | "scipy<=1.4.1;python_version=='3.6'", 29 | "scipy;python_version>='3.7'", 30 | "matplotlib==2.2.3;python_version=='2.7'", 31 | "matplotlib<=2.2.3;python_version=='3.4'", 32 | "matplotlib<=3.0.0;python_version=='3.5'", 33 | "matplotlib<=3.1.2;python_version=='3.6'", 34 | "matplotlib<=3.3.4;python_version=='3.7'", 35 | "matplotlib;python_version>='3.8'"], 36 | include_package_data=True, 37 | package_data={ 38 | '': ['data/*.csv', 'data/*.mat', 'data/*.log'] 39 | }, 40 | classifiers=[ 41 | "Programming Language :: Python :: 2.7", 42 | "Programming Language :: Python :: 3.4", 43 | "Programming Language :: Python :: 3.5", 44 | "Programming Language :: Python :: 3.6", 45 | "Programming Language :: Python :: 3.7", 46 | "Programming Language :: Python :: 3.8", 47 | "License :: OSI Approved :: MIT License", 48 | "Operating System :: OS Independent", 49 | ], 50 | ) 51 | -------------------------------------------------------------------------------- /validation/validation_rawdata.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paulvangentcom/heartrate_analysis_python/e3d80c2eff1967120cf5210b1f88cb23347246b0/validation/validation_rawdata.zip --------------------------------------------------------------------------------