├── pyvital ├── __init__.py ├── filters │ ├── __init__.py │ ├── UniMSNet.pth │ ├── model_beat.h5 │ ├── model_rhythm.h5 │ ├── model_dlapco_v1.pth │ ├── model_hpi_state_dict_v1.pth │ ├── ecg_qrs_detector.py │ ├── nirs_cox.py │ ├── resp_compliance.py │ ├── pleth_pvi.py │ ├── pleth_ptt.py │ ├── abp_hpi.py │ ├── ecg_classifier.py │ ├── eeg_fft.py │ ├── pleth_dpop.py │ ├── abp_ppv.py │ ├── pkpd_3comp.py │ ├── ecg_hrv.py │ ├── pleth_spi.py │ ├── ecg_mtwa.py │ ├── ecg_annotator.py │ ├── sv_dlapco.py │ └── ecg_beat_noise_detector.py ├── __main__.py └── arr.py ├── .gitignore ├── README.md ├── pyproject.toml └── LICENSE /pyvital/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pyvital/filters/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | dist 3 | *egg-info 4 | __pycache__/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyvital 2 | Open source python implementation of medical algorithms 3 | -------------------------------------------------------------------------------- /pyvital/filters/UniMSNet.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitaldb/pyvital/HEAD/pyvital/filters/UniMSNet.pth -------------------------------------------------------------------------------- /pyvital/filters/model_beat.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitaldb/pyvital/HEAD/pyvital/filters/model_beat.h5 -------------------------------------------------------------------------------- /pyvital/filters/model_rhythm.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitaldb/pyvital/HEAD/pyvital/filters/model_rhythm.h5 -------------------------------------------------------------------------------- /pyvital/filters/model_dlapco_v1.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitaldb/pyvital/HEAD/pyvital/filters/model_dlapco_v1.pth -------------------------------------------------------------------------------- /pyvital/filters/model_hpi_state_dict_v1.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vitaldb/pyvital/HEAD/pyvital/filters/model_hpi_state_dict_v1.pth -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | 4 | [project] 5 | name = "pyvital" 6 | version = "0.4.0" 7 | description = "Python Libray for Biosignal Analysis" 8 | readme = "README.md" 9 | requires-python = ">=3.8" 10 | license = "MIT" 11 | authors = [ 12 | {name = "VitalLab", email = "vital@snu.ac.kr"} 13 | ] 14 | dependencies = ['numpy','scipy','sanic','PyWavelets','tensorflow>=2.6','keras','torch'] 15 | 16 | [tool.setuptools] 17 | packages = ["pyvital", "pyvital.filters"] 18 | 19 | [tool.setuptools.package-data] 20 | "pyvital.filters" = ["*.h5", "*.pth"] 21 | -------------------------------------------------------------------------------- /pyvital/filters/ecg_qrs_detector.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | 3 | cfg = { 4 | 'name': 'ECG - QRS detector', 5 | 'group': 'Medical algorithms', 6 | 'desc': 'Simple QRS detector', 7 | 'reference': 'http://ocw.utm.my/file.php/38/SEB4223/07_ECG_Analysis_1_-_QRS_Detection.ppt%20%5BCompatibility%20Mode%5D.pdf', 8 | 'overlap': 3, # 3 sec overlap for HR=20 9 | 'interval': 40, 10 | 'inputs': [{"name": 'ECG', "type": 'wav'}], 11 | 'outputs': [{"name": 'RPEAK', "type": 'num', "min": 0, "max": 2}] 12 | } 13 | 14 | 15 | def run(inp, opt, cfg): 16 | trk_name = [k for k in inp][0] 17 | 18 | if 'srate' not in inp[trk_name]: 19 | return 20 | 21 | data = arr.interp_undefined(inp[trk_name]['vals']) 22 | srate = inp[trk_name]['srate'] 23 | 24 | r_list = arr.detect_qrs(data, srate) # detect r-peak 25 | ret_rpeak = [] 26 | for idx in r_list: 27 | dt = idx / srate 28 | ret_rpeak.append({'dt': dt, 'val': 1}) 29 | return [ret_rpeak] 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 VitalDB 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /pyvital/filters/nirs_cox.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | 4 | cfg = { 5 | 'name': 'NIRS - Cerebral Oximeter Index', 6 | 'group': 'Medical algorithms', 7 | 'desc': 'Calculate Pearson correlation coefficient between blood pressure and cerebral oxymetry', 8 | 'reference': 'Brady. et al. Continuous time-domain analysis of cerebrovascular autoregulation using near-infrared spectroscopy. Stroke. 2007 October; 38(10):2818-2825.', 9 | 'interval': 300, 10 | 'overlap': 0, 11 | 'inputs': [ 12 | {'name': 'ART1_MBP', 'type': 'num'}, 13 | {'name': 'SCO2_R', 'type': 'num'} 14 | ], 15 | 'outputs': [ 16 | {'name': 'MBP', 'type':'num', 'unit':'mmHg'}, 17 | {'name': 'COX_SLOPE', 'type':'num', 'unit':'%/mmHg', 'min': -1, 'max': 1}, 18 | {'name': 'COX_PEARSON', 'type':'num', 'min':-1, 'max':1}, 19 | {'name': 'ART_10SEC', 'type':'num', 'unit':'mmHg', 'min': 0, 'max': 150}, 20 | {'name': 'SCO_10SEC', 'type':'num', 'unit':'%', 'min':40, 'max':100} 21 | ] 22 | } 23 | 24 | 25 | def run(inp, opt, cfg): 26 | dt_last = cfg['interval'] 27 | 28 | avg_interval = 10 29 | 30 | sco_vals_10avg = [] # 10 sec avg 31 | mbp_vals_10avg = [] 32 | mbp_10sec = [] # trk 33 | sco_10sec = [] # trk 34 | for dt_bin_from in range(0, dt_last, avg_interval): 35 | dt_bin_to = min(dt_bin_from + avg_interval, dt_last) 36 | scos = [] # collect for 10 sec 37 | mbps = [] 38 | for esco in inp['SCO2_R']: 39 | if 20 < esco['val']: 40 | if dt_bin_from < esco['dt'] < dt_bin_to: 41 | scos.append(esco['val']) 42 | for embp in inp['ART1_MBP']: 43 | if 20 < embp['val'] < 200: 44 | if dt_bin_from < embp['dt'] < dt_bin_to: 45 | mbps.append(embp['val']) 46 | if not mbps or not scos: 47 | continue 48 | sco_vals_10avg.append(np.mean(scos)) 49 | mbp_vals_10avg.append(np.mean(mbps)) 50 | mbp_10sec.append({'dt':dt_bin_to, 'val':np.mean(mbps)}) 51 | sco_10sec.append({'dt':dt_bin_to, 'val':np.mean(scos)}) 52 | 53 | if len(mbp_vals_10avg) == 0: 54 | return 55 | # 56 | # if np.max(mbp_vals_10avg) - np.min(mbp_vals_10avg) < 20: 57 | # return 58 | 59 | mbp = np.mean(mbp_vals_10avg) 60 | r = np.corrcoef(mbp_vals_10avg, sco_vals_10avg)[0, 1] 61 | cox, _ = np.linalg.lstsq(np.vstack([mbp_vals_10avg, np.ones(len(mbp_vals_10avg))]).T, sco_vals_10avg)[0] 62 | 63 | return [ 64 | [{'dt': dt_last, 'val': mbp}], 65 | [{'dt': dt_last, 'val': cox}], 66 | [{'dt': dt_last, 'val': r}], 67 | mbp_10sec, 68 | sco_10sec, 69 | ] 70 | -------------------------------------------------------------------------------- /pyvital/filters/resp_compliance.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import math 4 | 5 | cfg = { 6 | 'name': 'RESP - Intratidal Compliance Profiles', 7 | 'group': 'Medical algorithms', 8 | 'desc': 'Calculate intratidal compliance using gliding-SLICE method', 9 | 'reference': 'Schumann et al, Estimating intratidal nonlinearity of respiratory system mechanics: a model study using the enhanced gliding-SLICE method. Physiological measurement, 30 (2009) 1341-56', 10 | 'overlap': 0, 11 | 'interval': 10, 12 | 'inputs': [ 13 | {'name': 'VOL', 'type': 'wav'}, 14 | {'name': 'flow', 'type': 'wav'}, 15 | {'name': 'awp', 'type': 'wav'} 16 | ], 17 | 'outputs': [ 18 | {'name': 'V', 'type': 'num', 'min': 0, 'max': 600, 'unit': 'mL'}, 19 | {'name': 'C', 'type': 'num', 'min': 0, 'max': 100, 'unit': 'mL/cmH2O'}, 20 | {'name': 'R', 'type': 'num', 'min': 0, 'max': 20, 'unit': 'cmH2Osec/L'}, 21 | {'name': 'P0', 'type': 'num', 'min': 0, 'max': 30, 'unit': 'cmH2O'} 22 | ] 23 | } 24 | 25 | 26 | def run(inp, opt, cfg): 27 | """ 28 | calculate ppv from arterial waveform 29 | :param art: arterial waveform 30 | :return: max, min, upper envelope, lower envelope, respiratory rate, ppv 31 | """ 32 | vsrate = inp['VOL']['srate'] 33 | psrate = inp['AWP']['srate'] 34 | fsrate = inp['AWF']['srate'] 35 | if vsrate != psrate or vsrate != fsrate: 36 | print("sampling rates of volume, flow and awp are different") 37 | return 38 | srate = vsrate 39 | 40 | vdata = arr.interp_undefined(inp['VOL']['vals']) 41 | fdata = arr.interp_undefined(inp['AWF']['vals']) 42 | pdata = arr.interp_undefined(inp['AWP']['vals']) 43 | 44 | # if srate < 200: 45 | # vdata = arr.resample_hz(vdata, srate, 200) 46 | # fdata = arr.resample_hz(fdata, srate, 200) 47 | # pdata = arr.resample_hz(pdata, srate, 200) 48 | # srate = 200 49 | 50 | vdata = np.array(vdata) 51 | fdata = np.array(fdata) / 60 # L/min -> L/sec 52 | pdata = np.array(pdata) 53 | 54 | #fdata = np.diff(vdata) * srate / 1000 # make difference to rate 55 | #vdata = vdata[:-1] # remove the last sample 56 | #pdata = pdata[:-1] # remove the last sample 57 | 58 | vmax = max(vdata) 59 | vmin = min(vdata) 60 | v95 = vmax - (vmax - vmin) * 0.1 61 | v5 = vmin + (vmax - vmin) * 0.1 62 | vret = [] 63 | cret = [] 64 | rret = [] 65 | p0ret = [] 66 | 67 | nstep = 31 68 | vstep = (v95 - v5) / nstep 69 | for i in range(nstep): 70 | # collect data 71 | vfrom = v5 + vstep * i 72 | seg_idx = np.logical_and(vfrom < vdata, vdata <= vfrom + vstep) 73 | 74 | if sum(seg_idx) < 3: 75 | print('number of samples in data seg < 3') 76 | continue 77 | 78 | pseg = pdata[seg_idx] 79 | vseg = vdata[seg_idx] 80 | fseg = fdata[seg_idx] 81 | 82 | A = np.vstack([vseg, fseg, np.ones(len(vseg))]).T 83 | cinv, r, p0 = np.linalg.lstsq(A, pseg)[0] 84 | c = 1/cinv 85 | 86 | vret.append({'dt': i * 0.02, 'val': vfrom}) 87 | cret.append({'dt': i * 0.02, 'val': c}) 88 | rret.append({'dt': i * 0.02, 'val': r}) 89 | p0ret.append({'dt': i * 0.02, 'val': p0}) 90 | 91 | return [ 92 | #{'dt':0, 'srate':srate, 'vals':list(fdata)}, 93 | vret, 94 | cret, 95 | rret, 96 | p0ret 97 | ] 98 | -------------------------------------------------------------------------------- /pyvital/filters/pleth_pvi.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import math 4 | 5 | cfg = { 6 | 'name': 'PVI - Plethysmographic Variability Index', 7 | 'group': 'Medical algorithms', 8 | 'desc': 'Calculate pulse pressure variation', 9 | 'reference': 'Aboy et al, An Enhanced Automatic Algorithm for Estimation of Respiratory Variations in Arterial Pulse Pressure During Regions of Abrupt Hemodynamic Changes. IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, VOL. 56, NO. 10, OCTOBER 2009', 10 | 'overlap': 3, 11 | 'interval': 40, 12 | 'inputs': [{'name': 'PLETH', 'type': 'wav'}], 13 | 'outputs': [{'name': 'RR', 'type': 'num', 'min': 0, 'max': 30, 'unit': '/min'}, {'name': 'PVI', 'type': 'num', 'min': 0, 'max': 30, 'unit': '%'}], 14 | 'pp': 0 15 | } 16 | 17 | 18 | def b(u): 19 | if -5 <= u <= 5: 20 | return math.exp(-u * u / 2) 21 | else: 22 | return 0 23 | 24 | 25 | def run(inp, opt, cfg): 26 | """ 27 | calculate ppv from arterial waveform 28 | :param art: arterial waveform 29 | :return: max, min, upper envelope, lower envelope, respiratory rate, ppv 30 | """ 31 | trk_name = [k for k in inp][0] 32 | 33 | if 'srate' not in inp[trk_name]: 34 | return 35 | 36 | data = arr.interp_undefined(inp[trk_name]['vals']) 37 | srate = inp[trk_name]['srate'] 38 | 39 | data = arr.resample_hz(data, srate, 100) 40 | srate = 100 41 | 42 | if len(data) < 30 * srate: 43 | return [{}, {}, {}, {}, {}, [], []] 44 | 45 | minlist, maxlist = arr.detect_peaks(data, srate) 46 | maxlist = maxlist[1:] 47 | 48 | # estimates the upper ue(n) and lower le(n) envelopes 49 | xa = np.array([data[idx] for idx in minlist]) 50 | le = np.array([0] * len(data)) 51 | for i in range(len(data)): 52 | be = np.array([b((i - idx) / (0.2 * srate)) for idx in minlist]) 53 | s = sum(be) 54 | if s != 0: 55 | le[i] = np.dot(xa, be) / s 56 | 57 | xb = np.array([data[idx] for idx in maxlist]) 58 | ue = np.array([0] * len(data)) 59 | for i in range(len(data)): 60 | be = np.array([b((i - idx) / (0.2 * srate)) for idx in maxlist]) 61 | s = sum(be) 62 | if s != 0: 63 | ue[i] = np.dot(xb, be) / s 64 | 65 | re = ue - le 66 | re[re < 0] = 0 67 | 68 | # estimates resp rate 69 | rr = arr.estimate_resp_rate(re, srate) 70 | 71 | # split by respiration 72 | nsamp_in_breath = int(srate * 60 / rr) 73 | m = int(len(data) / nsamp_in_breath) # m segments exist 74 | pps = [] 75 | for i in range(m - 1): 76 | imax = arr.max_idx(re, i * nsamp_in_breath, (i+2) * nsamp_in_breath) # 50% overlapping 77 | imin = arr.min_idx(re, i * nsamp_in_breath, (i+2) * nsamp_in_breath) 78 | ppmax = re[imax] 79 | ppmin = re[imin] 80 | ppe = 2 * (ppmax - ppmin) / (ppmax + ppmin) * 100 # estimate 81 | if ppe > 50 or ppe < 0: 82 | continue 83 | 84 | pp = cfg['pp'] 85 | if pp == 0: 86 | pp = ppe 87 | 88 | err = abs(ppe - pp) 89 | if err < 1: 90 | pp = ppe 91 | elif err < 25: 92 | pp = (pp + ppe) / 2 93 | else: 94 | pass # dont update 95 | 96 | cfg['pp'] = pp 97 | 98 | pps.append({'dt': (i * nsamp_in_breath) / srate, 'val': pp}) 99 | 100 | return [ 101 | [{'dt': cfg['interval'], 'val': rr}], 102 | pps 103 | ] 104 | -------------------------------------------------------------------------------- /pyvital/filters/pleth_ptt.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | 4 | cfg = { 5 | 'name': 'PLETH - Pulse Transit Time', 6 | 'group': 'Medical algorithms', 7 | 'desc': 'Calculate pulse transit time.', 8 | 'reference': '', 9 | 'overlap': 5, 10 | 'interval': 30, 11 | 'inputs': [{'name': 'ECG', 'type': 'wav'}, {'name': 'PLETH', 'type': 'wav'}], 12 | 'outputs': [ 13 | {'name': 'PTT_MIN', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500}, 14 | {'name': 'PTT_DMAX', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500}, 15 | {'name': 'PTT_MAX', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500}, 16 | {'name': 'R_PEAK', 'type': 'num', 'min': 0, 'max': 2} 17 | ] 18 | } 19 | 20 | 21 | def run(inp, opt, cfg): 22 | trk_names = [k for k in inp] 23 | 24 | if 'srate' not in inp[trk_names[0]] or 'srate' not in inp[trk_names[1]]: 25 | return 26 | 27 | ecg_data = arr.interp_undefined(inp[trk_names[0]]['vals']) 28 | ecg_srate = inp[trk_names[0]]['srate'] 29 | 30 | pleth_data = arr.interp_undefined(inp[trk_names[1]]['vals']) 31 | pleth_srate = inp[trk_names[1]]['srate'] 32 | pleth_data = arr.band_pass(pleth_data, pleth_srate, 0.5, 15) 33 | 34 | ecg_rlist = arr.detect_qrs(ecg_data, ecg_srate) 35 | pleth_minlist, pleth_maxlist = arr.detect_peaks(pleth_data, pleth_srate) 36 | 37 | dpleth = np.diff(pleth_data) 38 | pleth_dmaxlist = [] # index of the maximum slope between peak and nadir in pleth 39 | for i in range(len(pleth_minlist)): # maxlist is one less than minlist 40 | dmax_idx = arr.max_idx(dpleth, pleth_minlist[i], pleth_maxlist[i+1]) 41 | pleth_dmaxlist.append(dmax_idx) 42 | 43 | pttmax_list = [] 44 | pttmin_list = [] 45 | pttdmax_list = [] 46 | for i in range(len(ecg_rlist) - 1): 47 | if len(pleth_minlist) == 0: 48 | continue 49 | if len(pleth_maxlist) == 0: 50 | continue 51 | 52 | rpeak_dt = ecg_rlist[i] / ecg_srate 53 | rpeak_dt_next = ecg_rlist[i+1] / ecg_srate 54 | if rpeak_dt < cfg['overlap']: 55 | continue 56 | 57 | # find first min in pleth after rpeak_dt in ecg 58 | found_minidx = 0 59 | for minidx in pleth_minlist: 60 | if minidx > rpeak_dt * pleth_srate: 61 | found_minidx = minidx 62 | break 63 | elif minidx > rpeak_dt_next * pleth_srate: 64 | break 65 | if found_minidx == 0: 66 | continue 67 | 68 | # find first dmax in pleth after rpeak_dt in ecg 69 | found_dmaxidx = 0 70 | for dmaxidx in pleth_dmaxlist: 71 | if dmaxidx > rpeak_dt * pleth_srate: 72 | found_dmaxidx = dmaxidx 73 | break 74 | elif dmaxidx > rpeak_dt_next * pleth_srate: 75 | break 76 | if found_dmaxidx == 0: 77 | continue 78 | 79 | # find first dmax in pleth after rpeak_dt in ecg 80 | found_maxidx = 0 81 | for maxidx in pleth_maxlist: 82 | if maxidx > rpeak_dt * pleth_srate: 83 | found_maxidx = maxidx 84 | break 85 | elif maxidx > rpeak_dt_next * pleth_srate: 86 | break 87 | if found_maxidx == 0: 88 | continue 89 | 90 | max_dt = found_maxidx / pleth_srate 91 | if max_dt > cfg['interval']: 92 | continue 93 | min_dt = found_minidx / pleth_srate 94 | dmax_dt = found_dmaxidx / pleth_srate 95 | 96 | pttmax_list.append({'dt': max_dt, 'val': (max_dt - rpeak_dt) * 1000}) 97 | pttdmax_list.append({'dt': dmax_dt, 'val': (dmax_dt - rpeak_dt) * 1000}) 98 | pttmin_list.append({'dt': min_dt, 'val': (min_dt - rpeak_dt) * 1000}) 99 | 100 | return [ 101 | pttmin_list, 102 | pttdmax_list, 103 | arr.get_samples(ecg_data, ecg_srate, ecg_rlist), 104 | pttmax_list] 105 | -------------------------------------------------------------------------------- /pyvital/filters/abp_hpi.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pyvital.arr as arr 4 | import torch 5 | import numpy as np 6 | import pandas as pd 7 | import pickle 8 | import torch.nn as nn 9 | 10 | class Net(nn.Module): 11 | def __init__(self): 12 | super().__init__() 13 | self.conv1 = nn.Conv1d(1, 64, 3, padding='valid') 14 | self.conv2 = nn.Conv1d(64, 64, 3, padding='valid') 15 | self.conv3 = nn.Conv1d(64, 64, 3, padding='valid') 16 | self.conv4 = nn.Conv1d(64, 64, 3, padding='valid') 17 | self.conv5 = nn.Conv1d(64, 64, 3, padding='valid') 18 | self.batchnorm = nn.BatchNorm1d(64) 19 | self.maxpool = nn.MaxPool1d(2) 20 | self.gap = nn.AdaptiveAvgPool1d(1) 21 | self.fc1 = nn.Linear(64, 16) 22 | self.fc2 = nn.Linear(16, 1) 23 | self.dropout = nn.Dropout(0.2) 24 | 25 | def forward(self, x): 26 | x = self.maxpool(self.batchnorm(torch.relu(self.conv1(x)))) 27 | x = self.maxpool(self.batchnorm(torch.relu(self.conv2(x)))) 28 | x = self.maxpool(self.batchnorm(torch.relu(self.conv3(x)))) 29 | x = self.maxpool(self.batchnorm(torch.relu(self.conv4(x)))) 30 | x = self.maxpool(self.batchnorm(torch.relu(self.conv5(x)))) 31 | # print(x.shape) 32 | 33 | x = self.gap(x) 34 | x = torch.squeeze(x, 2) 35 | # print(x.shape) 36 | 37 | x = torch.relu(self.fc1(x)) 38 | x = self.dropout(x) 39 | x = torch.sigmoid(self.fc2(x)) 40 | 41 | return x 42 | 43 | model = None 44 | 45 | cfg = { 46 | 'name': 'ABP - Hypotension Prediction Index', 47 | 'group': 'Medical algorithms', 48 | 'desc': 'Predict hypotension 5 minutes before the event from arterial blood pressure using deep learning', 49 | 'reference': 'HPI_CNN', 50 | 'overlap': 10, 51 | 'interval': 20, 52 | 'inputs': [{'name': 'ART', 'type': 'wav'}], 53 | 'outputs': [{'name': 'HPI', 'type': 'num', 'min': 0, 'max': 100}] 54 | } 55 | 56 | def run(inp, opt, cfg): 57 | """ 58 | Predict hypotension 5 minute before the event from abp 59 | :param inp: arterial blood pressure (input wave) 60 | input wave must be 1-dimensional (#,) 61 | :param opt: 62 | :param cfg: 63 | :return: HPI index score 64 | """ 65 | global model 66 | 67 | trk_name = [k for k in inp][0] 68 | if 'srate' not in inp[trk_name]: 69 | return 70 | 71 | signal_data = np.array(inp[trk_name]['vals']) 72 | prop_nan = np.mean(np.isnan(signal_data)) 73 | if prop_nan > 0.1: 74 | return # raise ValueError(inp)#'nan {}'.format(prop_nan)) 75 | # print('abp_hpi: input is:', inp) 76 | # print('abp_hpi: input vals:', inp[trk_name]['vals']) 77 | 78 | signal_data = arr.interp_undefined(signal_data) 79 | 80 | srate = inp[trk_name]['srate'] 81 | signal_data = arr.resample_hz(signal_data, srate, 100) 82 | srate = 100 83 | 84 | if len(signal_data) < 2000 * 0.9: 85 | return # raise ValueError('len < 18 sec') 86 | 87 | if len(signal_data) != 2000: 88 | signal_data = signal_data[:2000] 89 | if len(signal_data) < 2000: 90 | signal_data = np.pad(signal_data, (0, 2000 - len(signal_data)), 'constant', constant_values=np.nan) 91 | 92 | signal_data = arr.interp_undefined(signal_data) 93 | 94 | if np.nanmax(signal_data) > 200: 95 | return 96 | if np.nanmin(signal_data) < 20: 97 | return 98 | if np.nanmax(signal_data) - np.nanmin(signal_data) < 30: 99 | return 100 | if any(np.abs(np.diff(signal_data[~np.isnan(signal_data)])) > 30): 101 | return 102 | 103 | signal_data = signal_data.reshape((-1, 1, 2000)) 104 | 105 | # print('abp_hpi: signal data process done, shape:', signal_data.shape) 106 | 107 | # normalize signal_data 108 | signal_data -= 65 109 | signal_data /= 65 110 | 111 | signal_data_torch = torch.from_numpy(signal_data) 112 | 113 | if model is None: 114 | model = Net() 115 | model.load_state_dict(torch.load(f'{os.path.dirname(__file__)}/model_hpi_state_dict_v1.pth')) 116 | 117 | # print('abp_hpi: model input is: ', signal_data_torch) 118 | prediction = model(signal_data_torch.float()) 119 | hpi = int(np.squeeze(prediction.detach().numpy()).tolist() * 100) 120 | # print('abp_hpi: model output is: ', hpi) 121 | 122 | return [ 123 | [{'dt': cfg['interval'], 'val': hpi}] 124 | ] 125 | 126 | if __name__ == '__main__': 127 | import vitaldb 128 | vf = vitaldb.VitalFile(1, 'ART') 129 | vf.run_filter(run, cfg) 130 | vf.to_vital(f'filtered.vital') 131 | -------------------------------------------------------------------------------- /pyvital/filters/ecg_classifier.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pyvital.arr as arr 3 | import numpy as np 4 | import math 5 | import keras.models 6 | 7 | cfg = { 8 | 'name': 'ECG - AI classifier', 9 | 'group': 'Medical algorithms', 10 | 'desc': '', 11 | 'reference': '', 12 | 'overlap': 5, 13 | 'interval': 15, 14 | 'inputs': [{'name': 'ECG', 'type': 'wav'}], 15 | 'outputs': [ 16 | {'name': 'RHYTHM', 'type': 'str'}, 17 | {'name': 'BEAT', 'type': 'str'}, 18 | {'name': 'RTYPE', 'type': 'num', 'min':-1, 'max':4}, 19 | {'name': 'BTYPE', 'type': 'num', 'min':-1, 'max':3}, 20 | ] 21 | } 22 | 23 | model_beat = None 24 | model_rhythm = None 25 | def run(inp, opt, cfg): 26 | global model_beat, model_rhythm 27 | 28 | trk_name = [k for k in inp][0] 29 | 30 | if 'srate' not in inp[trk_name]: 31 | return 32 | 33 | data = arr.interp_undefined(inp[trk_name]['vals']) 34 | srate = inp[trk_name]['srate'] 35 | 36 | if model_beat is None: 37 | model_beat = keras.models.load_model(f'{os.path.dirname(__file__)}/model_beat.h5') 38 | 39 | if model_rhythm is None: 40 | model_rhythm = keras.models.load_model(f'{os.path.dirname(__file__)}/model_rhythm.h5') 41 | 42 | # resample 43 | data = arr.resample_hz(data, srate, 100) 44 | srate = 100 45 | 46 | # detect r-peaks 47 | if len(data) < 200: 48 | return 49 | peaks = np.array(arr.detect_qrs(data, srate), dtype=int) 50 | valid_mask = (srate <= peaks) & (peaks < len(data) - srate) 51 | peaks = peaks[valid_mask] # remove qrs before overlap 52 | if len(peaks) == 0: 53 | return 54 | 55 | # output tracks 56 | out_bstr = [] 57 | out_bnum = [] 58 | out_rstr = [] 59 | out_rnum = [] 60 | 61 | # collect beat samples 62 | x = [] 63 | for peak in peaks: 64 | seg = data[peak - srate:peak + srate] 65 | if max(seg) - min(seg) > 0: 66 | x.append(seg) 67 | 68 | if len(x) > 0: 69 | x = np.array(x, dtype=np.float32) 70 | 71 | # min-max normalization 72 | x -= x.min(axis=1)[...,None] 73 | x /= x.max(axis=1)[...,None] 74 | x = x[..., None] # add dimension for cnn 75 | 76 | # predict 77 | y = np.argmax(model_beat.predict(x, verbose=0), axis=1) 78 | 79 | # beat label 80 | for i in range(len(y)): 81 | if y[i] == 0: 82 | s = 'N' 83 | elif y[i] == 1: 84 | s = 'S' 85 | elif y[i] == 2: 86 | s = 'V' 87 | else: 88 | continue 89 | out_bstr.append({'dt': peaks[i] / srate, 'val': s}) 90 | out_bnum.append({'dt': peaks[i] / srate, 'val': y[i]}) 91 | 92 | # rhythm label 93 | if len(peaks) >= 3: 94 | x = [] 95 | seglen = 10 * srate 96 | if len(data) >= seglen: 97 | for i in range(0, len(data) - seglen, seglen): 98 | seg = data[i:i+seglen] 99 | if max(seg) - min(seg) > 0: 100 | x.append(seg) 101 | if len(x) > 0: 102 | x = np.array(x, dtype=np.float32) 103 | x = x[x.min(axis=1) < x.max(axis=1)] 104 | 105 | # min-max normalization 106 | x -= x.min(axis=1)[...,None] 107 | # x /= x.max(axis=1)[...,None] 108 | x = x[..., None] # add dimension for cnn 109 | 110 | # prediction 111 | y = np.argmax(model_rhythm.predict(x, verbose=0), axis=1) 112 | for i in range(len(y)): 113 | if y[i] == 0: 114 | s = 'SR' 115 | elif y[i] == 1: 116 | s = 'AF' 117 | elif y[i] == 2: 118 | s = 'Others' 119 | elif y[i] == 3: 120 | s = 'Noise' 121 | else: 122 | continue 123 | out_rstr.append({'dt': i * seglen / srate, 'val': s}) 124 | out_rnum.append({'dt': i * seglen / srate, 'val': y[i]}) 125 | 126 | return [out_rstr, out_bstr, out_rnum, out_bnum] 127 | 128 | 129 | if __name__ == '__main__': 130 | import vitaldb 131 | for caseid in (2432, ): #2693, 603, 3323, 4636, 1204, 1738, 1776, 1901, 1926): 132 | print(f'{caseid}', end='...', flush=True) 133 | vf = vitaldb.VitalFile(caseid, 'ECG_II') 134 | print(f'filtering', end='...', flush=True) 135 | vf.run_filter(run, cfg) 136 | print(f'saving', end='...', flush=True) 137 | vf.to_vital(f'filtered_{caseid}.vital') 138 | print(f'done') -------------------------------------------------------------------------------- /pyvital/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from sanic import Sanic 3 | from sanic import response 4 | import json 5 | import importlib 6 | import os 7 | import traceback 8 | import copy 9 | import gzip 10 | import time 11 | 12 | filter_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'filters') 13 | server_port = 3000 14 | 15 | for arg in sys.argv[1:]: 16 | if os.path.isdir(arg): 17 | filter_folder = arg 18 | elif arg.isdecimal(): 19 | if 0 < int(arg) < 65535: 20 | server_port = int(arg) 21 | 22 | print('filter folder : ' + str(filter_folder)) 23 | print('server port : ' + str(server_port)) 24 | 25 | sys.path.insert(0, filter_folder) 26 | 27 | cfgs = {} # Current settings and data for the module (the corresponding invokeid) 28 | default_cfgs = {} # Default settings and data for the module 29 | mods = {} # Loaded modules 30 | mod_cfgs = [] # load module cfgs 31 | 32 | # load filters 33 | for root, dirs, files in os.walk(filter_folder): 34 | for filename in files: 35 | #filepath = os.path.join(root, filename) 36 | if filename[-3:] != ".py": 37 | continue 38 | 39 | m_modname = filename[:-3] #filepath[:-3].replace(os.sep, ".") 40 | print('importing ' + m_modname) 41 | o = importlib.import_module(m_modname) 42 | mods[m_modname] = o # modules are saved for later reloading 43 | 44 | if not hasattr(o, 'cfg'): 45 | continue 46 | if not hasattr(o, 'run'): 47 | continue 48 | 49 | if m_modname not in default_cfgs: # if the module was first loaded or changed? 50 | default_cfgs[m_modname] = copy.deepcopy(o.cfg) 51 | cfg = copy.deepcopy(default_cfgs[m_modname]) 52 | 53 | if 'name' in cfg: 54 | name = cfg['name'] 55 | else: 56 | name = m_modname 57 | if 'group' in cfg: 58 | group = cfg['group'] 59 | else: 60 | group = '' 61 | if 'desc' in cfg: 62 | desc = cfg['desc'] 63 | else: 64 | desc = '' 65 | if 'interval' in cfg: 66 | interval = cfg['interval'] 67 | else: 68 | interval = 60 69 | if 'overlap' in cfg: 70 | overlap = cfg['overlap'] 71 | else: 72 | overlap = 0 73 | if 'inputs' in cfg: 74 | inputs = cfg['inputs'] 75 | else: 76 | inputs = [] 77 | if 'options' in cfg: 78 | opt = cfg['options'] 79 | else: 80 | opt = [] 81 | if 'license' in cfg: 82 | licen = cfg['license'] 83 | else: 84 | licen = "" 85 | if 'reference' in cfg: 86 | refer = cfg['reference'] 87 | else: 88 | refer = "" 89 | if 'outputs' in cfg: 90 | outputs = cfg['outputs'] 91 | else: 92 | outputs = [] 93 | 94 | mod_cfgs.append({ 95 | "modname": m_modname, 96 | "name": name, 97 | "group": group, 98 | "desc": desc, 99 | "interval": interval, 100 | "overlap": overlap, 101 | "inputs": inputs, 102 | "options": opt, 103 | "outputs": outputs, 104 | "license": licen, 105 | "reference": refer 106 | }) 107 | 108 | app = Sanic("filter_server") 109 | 110 | @app.get("/") 111 | async def list_filter(request): 112 | return response.json(mod_cfgs) 113 | 114 | @app.post('/') 115 | async def run_filter(request, modname): 116 | posts = gzip.decompress(request.body) 117 | posts = posts.decode('utf-8') 118 | 119 | #print('[' + posts + ']') 120 | try: 121 | posts = json.loads(posts) 122 | except Exception as e: 123 | print(e) 124 | return response.raw('') 125 | 126 | invokeid = posts['invokeid'] 127 | inp = posts['inputs'] 128 | m_modname = os.path.basename(modname) # module name 129 | 130 | o = mods[m_modname] 131 | 132 | if invokeid not in cfgs.keys(): # whether this invokeid is a new one? 133 | if m_modname not in default_cfgs.keys(): # if the module is loaded at first or changed 134 | default_cfgs[m_modname] = copy.deepcopy(o.cfg) 135 | cfg = copy.deepcopy(default_cfgs[m_modname]) 136 | cfgs[invokeid] = cfg 137 | else: # reload the data of the invokeid 138 | cfg = cfgs[invokeid] 139 | 140 | cfg['interval'] = posts['interval'] # Interval, overlap must be user-specified values 141 | cfg['overlap'] = posts['overlap'] 142 | cfg['invokeid'] = invokeid 143 | 144 | opt = [] 145 | if 'options' in posts: 146 | opt = posts['options'] 147 | 148 | ret = o.run(inp, opt, cfg) # evoke run function 149 | ret = json.dumps(ret) # print the result 150 | ret = gzip.compress(ret.encode('utf-8')) 151 | 152 | return response.raw(ret) 153 | 154 | if __name__ == "__main__": 155 | app.run(host="0.0.0.0", port=server_port, access_log=False) 156 | -------------------------------------------------------------------------------- /pyvital/filters/eeg_fft.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | from math import factorial 4 | 5 | cfg = { 6 | 'name': 'EEG - Frequency Analysis', 7 | 'group': 'Medical algorithms', 8 | 'desc': 'Frequency Analysis of EEG.', 9 | 'reference': '', 10 | 'overlap': 58, 11 | 'interval': 60, 12 | 'inputs': [{'name': 'EEG', 'type': 'wav'}], 13 | 'outputs': [ 14 | {'name': 'TOTPOW', 'type': 'num', 'unit': 'dB', 'min': 0, 'max': 100}, 15 | {'name': 'SEF', 'type': 'num', 'unit': 'Hz', 'min': 0, 'max': 30}, 16 | {'name': 'MF', 'type': 'num', 'unit': 'Hz', 'min': 0, 'max': 30}, 17 | 18 | {'name': 'DELTA', 'type': 'num', 'unit': '%', 'min': 0, 'max': 100}, 19 | {'name': 'THETA', 'type': 'num', 'unit': '%', 'min': 0, 'max': 100}, 20 | {'name': 'ALPHA', 'type': 'num', 'unit': '%', 'min': 0, 'max': 100}, 21 | {'name': 'BETA', 'type': 'num', 'unit': '%', 'min': 0, 'max': 100}, 22 | {'name': 'GAMMA', 'type': 'num', 'unit': '%', 'min': 0, 'max': 100} 23 | ] 24 | } 25 | 26 | # http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay 27 | def savitzky_golay(y, window_size, order, deriv=0, rate=1): 28 | try: 29 | window_size = np.abs(int(window_size)) 30 | order = np.abs(int(order)) 31 | except ValueError: 32 | raise ValueError("window_size and order have to be of type int") 33 | 34 | if window_size % 2 != 1 or window_size < 1: 35 | raise TypeError("window_size size must be a positive odd number") 36 | 37 | if window_size < order + 2: 38 | raise TypeError("window_size is too small for the polynomials order") 39 | 40 | order_range = range(order+1) 41 | half_window = (window_size -1) // 2 42 | # precompute coefficients 43 | b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) 44 | m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv) 45 | # pad the signal at the extremes with values taken from the signal itself 46 | firstvals = y[0] - np.abs(y[1:half_window+1][::-1] - y[0]) 47 | lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) 48 | y = np.concatenate((firstvals, y, lastvals)) 49 | return np.convolve(m[::-1], y, mode='valid') 50 | 51 | def smooth(y): 52 | #return butter_bandpass(y, 0.5, 50, 128) 53 | #return lowess(y) 54 | return savitzky_golay(y, window_size=91, order=3) 55 | 56 | def fromhz(f, fres): 57 | # if type(f) is np.array: 58 | # return (f / fres).astype(int) 59 | return int(f / fres) 60 | 61 | def tohz(i, fres): 62 | return fres * i 63 | 64 | def repcols(v, nreps): 65 | return np.tile(v[:, None], nreps) 66 | 67 | def run(inp, opt, cfg): 68 | trk_name = [k for k in inp][0] 69 | 70 | if 'srate' not in inp[trk_name]: 71 | return 72 | 73 | data = arr.interp_undefined(inp[trk_name]['vals']) 74 | data -= smooth(np.array(data)) 75 | srate = int(inp[trk_name]['srate']) 76 | nfft = srate * 2 # srate * epoch size 77 | fres = srate / nfft # frequency resolution (hz) 78 | 79 | # frequency domain analysis 80 | EPOCH_SIZE = int(srate * 2) 81 | STRIDE_SIZE = int(srate * 0.5) 82 | ps = [] 83 | for epoch_start in range(0, len(data) - EPOCH_SIZE + 1, STRIDE_SIZE): # 0.5초 마다 겹침 84 | epoch_w = data[epoch_start:epoch_start + EPOCH_SIZE] # 2초 epoch 85 | epoch_w = (epoch_w - np.mean(epoch_w)) * np.blackman(EPOCH_SIZE) # detrend and windowing 86 | dft = np.fft.fft(epoch_w)[:srate] # 실수를 fft 했으므로 절반만 필요하다 87 | dft[0] = 0 # dc 성분은 지움 88 | ps.append(2 * np.abs(dft) ** 2) # 파워의 절대값인데 절반 날렸으므로 89 | ps = np.mean(np.array(ps), axis=0) 90 | pssum = np.cumsum(ps) # cummulative sum 91 | pssum = pssum[1:] 92 | totpow = pssum[fromhz(30, fres)] 93 | sef = tohz(np.argmax(pssum > 0.95 * totpow), fres) 94 | mf = tohz(np.argmax(pssum > 0.5 * totpow), fres) 95 | 96 | delta = pssum[fromhz(4, fres) - 1] / pssum[-1] * 100 97 | theta = (pssum[fromhz(8, fres) - 1] - pssum[fromhz(4, fres)]) / pssum[-1] * 100 98 | alpha = (pssum[fromhz(12, fres) - 1] - pssum[fromhz(8, fres)]) / pssum[-1] * 100 99 | beta = (pssum[fromhz(30, fres) - 1] - pssum[fromhz(12, fres)]) / pssum[-1] * 100 100 | gamma = (pssum[-1] - pssum[fromhz(30, fres)]) / pssum[-1] * 100 101 | 102 | # pttmax_list.append() 103 | # pttdmax_list.append({'dt': dmax_dt, 'val': (dmax_dt - rpeak_dt) * 1000}) 104 | # pttmin_list.append({'dt': min_dt, 'val': (min_dt - rpeak_dt) * 1000}) 105 | # 106 | return [ 107 | [{'dt': cfg['interval'], 'val': 10 * np.log10(totpow)}], 108 | [{'dt': cfg['interval'], 'val': sef}], 109 | [{'dt': cfg['interval'], 'val': mf}], 110 | [{'dt': cfg['interval'], 'val': delta}], 111 | [{'dt': cfg['interval'], 'val': theta}], 112 | [{'dt': cfg['interval'], 'val': alpha}], 113 | [{'dt': cfg['interval'], 'val': beta}], 114 | [{'dt': cfg['interval'], 'val': gamma}] 115 | ] 116 | # pttmin_list, 117 | # pttdmax_list, 118 | # arr.get_samples(ecg_data, ecg_srate, ecg_rlist), 119 | # pttmax_list] 120 | -------------------------------------------------------------------------------- /pyvital/filters/pleth_dpop.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import math 4 | 5 | last_ppv = 0 6 | 7 | cfg = { 8 | 'name': 'PVI - Delta Plethysmographic Waveform Amplitude', 9 | 'group': 'Medical algorithms', 10 | 'desc': 'Calculate the variation of pulse oximetric plethysmographic (POP) waveform amplitude', 11 | 'reference': 'Aboy et al, An Enhanced Automatic Algorithm for Estimation of Respiratory Variations in Arterial Pulse Pressure During Regions of Abrupt Hemodynamic Changes. IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, VOL. 56, NO. 10, OCTOBER 2009', 12 | 'overlap': 3, 13 | 'interval': 30, 14 | 'inputs': [{'name': 'PLETH', 'type': 'wav'}], 15 | 'outputs': [ 16 | {'name': 'DELTA_POP', 'type': 'num', 'min': 0, 'max': 30, 'unit': '%'}, 17 | {'name': 'PULSE_VAL', 'type': 'num', 'min': 0, 'max': 100, 'unit': 'mmHg'}, 18 | {'name': 'RR', 'type': 'num', 'min': 0, 'max': 30, 'unit': '/min'} 19 | ] 20 | } 21 | 22 | 23 | def b(u): 24 | if -5 <= u <= 5: 25 | return math.exp(-u * u / 2) 26 | else: 27 | return 0 28 | 29 | 30 | def run(inp, opt, cfg): 31 | """ 32 | calculate ppv from arterial waveform 33 | :param art: arterial waveform 34 | :return: max, min, upper envelope, lower envelope, respiratory rate, ppv 35 | """ 36 | global last_ppv 37 | 38 | trk_name = [k for k in inp][0] 39 | 40 | if 'srate' not in inp[trk_name]: 41 | return 42 | 43 | data = arr.interp_undefined(inp[trk_name]['vals']) 44 | srate = inp[trk_name]['srate'] 45 | 46 | data = arr.resample_hz(data, srate, 100) 47 | srate = 100 48 | 49 | if len(data) < 30 * srate: 50 | print('hr < 30') 51 | return 52 | 53 | # beat detection 54 | minlist, maxlist = arr.detect_peaks(data, srate) 55 | maxlist = maxlist[1:] 56 | 57 | # beat lengths 58 | beatlens = [] 59 | beats_128 = [] 60 | beats_128_valid = [] 61 | for i in range(0, len(minlist)-1): 62 | beatlen = minlist[i+1] - minlist[i] # in samps 63 | if not 30 < beatlen < 300: 64 | beats_128.append(None) 65 | continue 66 | 67 | pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure 68 | if not 20 < pp < 100: 69 | beats_128.append(None) 70 | continue 71 | 72 | beatlens.append(beatlen) 73 | beat = data[minlist[i]:minlist[i+1]] 74 | resampled = arr.resample(beat, 128) 75 | beats_128.append(resampled) 76 | beats_128_valid.append(resampled) 77 | 78 | if not beats_128_valid: 79 | return 80 | 81 | avgbeat = np.array(beats_128_valid).mean(axis=0) 82 | 83 | meanlen = np.mean(beatlens) 84 | stdlen = np.std(beatlens) 85 | if stdlen > meanlen * 0.2: # irregular rhythm 86 | return 87 | 88 | # remove beats with correlation < 0.9 89 | pulse_vals = [] 90 | for i in range(0, len(minlist)-1): 91 | if not beats_128[i]: 92 | continue 93 | if np.corrcoef(avgbeat, beats_128[i])[0, 1] < 0.9: 94 | continue 95 | pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure 96 | pulse_vals.append({'dt': minlist[i] / srate, 'val': pp}) 97 | 98 | # estimates the upper env(n) and lower env(n) envelopes 99 | xa = np.array([data[idx] for idx in minlist]) 100 | lower_env = np.array([0.0] * len(data)) 101 | for i in range(len(data)): 102 | be = np.array([b((i - idx) / (0.2 * srate)) for idx in minlist]) 103 | s = sum(be) 104 | if s != 0: 105 | lower_env[i] = np.dot(xa, be) / s 106 | 107 | xb = np.array([data[idx] for idx in maxlist]) 108 | upper_env = np.array([0.0] * len(data)) 109 | for i in range(len(data)): 110 | be = np.array([b((i - idx) / (0.2 * srate)) for idx in maxlist]) 111 | s = sum(be) 112 | if s != 0: 113 | upper_env[i] = np.dot(xb, be) / s 114 | 115 | pulse_env = upper_env - lower_env 116 | pulse_env[pulse_env < 0.0] = 0.0 117 | 118 | # estimates resp rate 119 | rr = arr.estimate_resp_rate(pulse_env, srate) 120 | 121 | # split by respiration 122 | nsamp_in_breath = int(srate * 60 / rr) 123 | m = int(len(data) / nsamp_in_breath) # m segments exist 124 | raw_pps = [] 125 | pps = [] 126 | for ibreath in np.arange(0, m - 1, 0.5): 127 | pps_breath = [] 128 | for ppe in pulse_vals: 129 | if ibreath * nsamp_in_breath < ppe['dt'] * srate < (ibreath + 1) * nsamp_in_breath: 130 | pps_breath.append(ppe['val']) 131 | if len(pps_breath) < 4: 132 | continue 133 | 134 | pp_min = min(pps_breath) 135 | pp_max = max(pps_breath) 136 | 137 | ppv = 2 * (pp_max - pp_min) / (pp_max + pp_min) * 100 # estimate 138 | if not 0 < ppv < 50: 139 | continue 140 | 141 | # raw_pps.append({'dt': (ibreath * nsamp_in_breath) / srate, 'val': pp}) 142 | # 143 | # kalman filter 144 | if last_ppv == 0: # first time 145 | last_ppv = ppv 146 | elif abs(last_ppv - ppv) <= 1.0: 147 | ppv = last_ppv 148 | elif abs(last_ppv - ppv) <= 25.0: # ppv cannot be changed abruptly 149 | ppv = (ppv + last_ppv) * 0.5 150 | last_ppv = ppv 151 | else: 152 | continue # no update 153 | 154 | pps.append({'dt': ((ibreath + 1) * nsamp_in_breath) / srate, 'val': int(ppv)}) 155 | 156 | return [ 157 | pps, 158 | pulse_vals, 159 | [{'dt': cfg['interval'], 'val': rr}] 160 | ] 161 | -------------------------------------------------------------------------------- /pyvital/filters/abp_ppv.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import time 4 | import scipy.interpolate 5 | import scipy.signal 6 | 7 | last_ppv = 0 8 | last_spv = 0 9 | 10 | cfg = { 11 | 'name': 'ART - Pulse Pressure Variation', 12 | 'group': 'ABP', 13 | 'desc': 'Calculate pulse pressure variation using modified version of the method in the reference', 14 | 'reference': 'Aboy et al, An Enhanced Automatic Algorithm for Estimation of Respiratory Variations in Arterial Pulse Pressure During Regions of Abrupt Hemodynamic Changes. IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING, VOL. 56, NO. 10, OCTOBER 2009', 15 | 'overlap': 20, 16 | 'interval': 30, # 30초는 되어야 rr을 추정 가능함 17 | 'inputs': [{'name': 'ART', 'type': 'wav'}], 18 | 'outputs': [ 19 | {'name': 'PPV', 'type': 'num', 'min': 0, 'max': 30, 'unit': '%'}, 20 | {'name': 'SPV', 'type': 'num', 'min': 0, 'max': 30, 'unit': '%'}, 21 | {'name': 'ART_RR', 'type': 'num', 'min': 0, 'max': 30, 'unit': '/min'} 22 | ] 23 | } 24 | 25 | 26 | def run(inp, opt, cfg): 27 | """ 28 | calculate ppv from arterial waveform 29 | :param art: arterial waveform 30 | :return: max, min, upper envelope, lower envelope, respiratory rate, ppv 31 | """ 32 | global last_ppv, last_spv 33 | 34 | trk_name = [k for k in inp][0] 35 | 36 | if 'srate' not in inp[trk_name]: 37 | return 38 | 39 | data = arr.interp_undefined(inp[trk_name]['vals']) 40 | srate = inp[trk_name]['srate'] 41 | 42 | data = arr.resample_hz(data, srate, 100) 43 | srate = 100 44 | 45 | if len(data) < 30 * srate: 46 | print('hr < 30') 47 | return 48 | 49 | # beat detection 50 | minlist, maxlist = arr.detect_peaks(data, srate) 51 | maxlist = maxlist[1:] 52 | 53 | # beat lengths 54 | beatlens = [] 55 | beats_128 = [] 56 | beats_128_valid = [] 57 | for i in range(0, len(minlist)-1): 58 | beatlen = minlist[i+1] - minlist[i] # in samps 59 | if not 30 < beatlen < 300: 60 | beats_128.append(None) 61 | continue 62 | 63 | pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure 64 | if not 20 < pp < 100: 65 | beats_128.append(None) 66 | continue 67 | 68 | beatlens.append(beatlen) 69 | beat = data[minlist[i]:minlist[i+1]] 70 | resampled = arr.resample(beat, 128) 71 | beats_128.append(resampled) 72 | beats_128_valid.append(resampled) 73 | 74 | if not beats_128_valid: 75 | return 76 | 77 | avgbeat = np.array(beats_128_valid).mean(axis=0) 78 | 79 | meanlen = np.mean(beatlens) 80 | stdlen = np.std(beatlens) 81 | if stdlen > meanlen * 0.2: # irregular rhythm 82 | return 83 | 84 | # remove beats with correlation < 0.9 85 | pp_vals = [] 86 | sp_vals = [] 87 | for i in range(0, len(minlist)-1): 88 | if beats_128[i] is None or not len(beats_128[i]): 89 | continue 90 | if np.corrcoef(avgbeat, beats_128[i])[0, 1] < 0.9: 91 | continue 92 | pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure 93 | sp = data[maxlist[i]] 94 | pp_vals.append({'dt': minlist[i] / srate, 'val': pp}) 95 | sp_vals.append({'dt': minlist[i] / srate, 'val': sp}) 96 | 97 | dtstart = time.time() 98 | 99 | # estimates resp rate 100 | # upper env 101 | idx_start = max(min(minlist),min(maxlist)) 102 | idx_end = min(max(minlist),max(maxlist)) 103 | xa = scipy.interpolate.CubicSpline(maxlist, [data[idx] for idx in maxlist])(np.arange(idx_start, idx_end)) 104 | 105 | # lower env 106 | xb = scipy.interpolate.CubicSpline(minlist, [data[idx] for idx in minlist])(np.arange(idx_start, idx_end)) 107 | rr = arr.estimate_resp_rate(xa-xb, srate) 108 | 109 | dtend = time.time() 110 | #print('rr {}'.format(rr)) 111 | 112 | # split by respiration 113 | nsamp_in_breath = int(srate * 60 / rr) 114 | m = int(len(data) / nsamp_in_breath) # m segments exist 115 | 116 | raw_pps = [] 117 | raw_sps = [] 118 | ppvs = [] 119 | spvs = [] 120 | for ibreath in np.arange(0, m - 1, 0.5): 121 | pps_breath = [] 122 | sps_breath = [] 123 | 124 | for ppe in pp_vals: 125 | if ibreath * nsamp_in_breath < ppe['dt'] * srate < (ibreath + 1) * nsamp_in_breath: 126 | pps_breath.append(ppe['val']) 127 | 128 | for spe in sp_vals: 129 | if ibreath * nsamp_in_breath < spe['dt'] * srate < (ibreath + 1) * nsamp_in_breath: 130 | sps_breath.append(spe['val']) 131 | 132 | if len(pps_breath) < 4: 133 | continue 134 | 135 | if len(sps_breath) < 4: 136 | continue 137 | 138 | pp_min = min(pps_breath) 139 | pp_max = max(pps_breath) 140 | sp_min = min(sps_breath) 141 | sp_max = max(sps_breath) 142 | 143 | ppv = (pp_max - pp_min) / (pp_max + pp_min) * 200 144 | if not 0 < ppv < 50: 145 | continue 146 | 147 | spv = (sp_max - sp_min) / (sp_max + sp_min) * 200 148 | if not 0 < spv < 50: 149 | continue 150 | 151 | # kalman filter 152 | if last_ppv == 0: # first time 153 | last_ppv = ppv 154 | elif abs(last_ppv - ppv) <= 1.0: 155 | ppv = last_ppv 156 | elif abs(last_ppv - ppv) <= 25.0: # ppv cannot be changed abruptly 157 | ppv = (ppv + last_ppv) * 0.5 158 | last_ppv = ppv 159 | else: 160 | continue 161 | 162 | if last_spv == 0: # first time 163 | last_spv = spv 164 | elif abs(last_spv - spv) <= 1.0: 165 | spv = last_spv 166 | elif abs(last_spv - spv) <= 25.0: # ppv cannot be changed abruptly 167 | spv = (spv + last_spv) * 0.5 168 | last_spv = spv 169 | else: 170 | continue 171 | 172 | ppvs.append(ppv) 173 | spvs.append(spv) 174 | 175 | median_ppv = np.median(ppvs) 176 | median_spv = np.median(spvs) 177 | 178 | return [ 179 | [{'dt': cfg['interval'], 'val': median_ppv}], 180 | [{'dt': cfg['interval'], 'val': median_spv}], 181 | [{'dt': cfg['interval'], 'val': rr}] 182 | ] 183 | -------------------------------------------------------------------------------- /pyvital/filters/pkpd_3comp.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | 4 | last_a1 = 0 5 | last_a2 = 0 6 | last_a3 = 0 7 | last_a4 = 0 8 | last_vol = 0 9 | 10 | cfg = { 11 | 'name': 'PKPD - 3 Compartment Model', 12 | 'group': 'Medical algorithms', 13 | 'desc': '3 compartment PKPD model', 14 | 'reference': '', 15 | 'overlap': 0, 16 | 'interval': 10, 17 | 'inputs': [ 18 | {'name': 'PUMP1_VOL', 'type': 'num'}, 19 | {'name': 'PUMP1_CONC', 'type': 'num'} 20 | ],'options': [ 21 | {'name': 'model', 'sels': 'Marsh/Modified Marsh/Schnider/Paedfusor/Kataria/Kim/Minto', 'init': 'Schnider'}, 22 | {'name': 'age', 'init': 50}, 23 | {'name': 'sex', 'sels': 'F/M'}, 24 | {'name': 'ht', 'init': 160}, 25 | {'name': 'wt', 'init': 63} 26 | ], 27 | 'outputs': [ 28 | {'name': 'CP', 'type': 'num', 'min': 0, 'max': 10}, 29 | {'name': 'CE', 'type': 'num', 'min': 0, 'max': 10} 30 | ] 31 | } 32 | 33 | 34 | def get_model(name, age, sex, wt, ht): 35 | v1 = 0 36 | k10 = 0 37 | k12 = 0 38 | k13 = 0 39 | k21 = 0 40 | k31 = 0 41 | ke0 = 0 42 | if name == 'Marsh': 43 | v1 = 0.228 * wt 44 | k10 = 0.119 45 | k12 = 0.114 46 | k13 = 0.0419 47 | k21 = 0.055 48 | k31 = 0.0033 49 | ke0 = 0.26 # diprifusor 50 | elif name == "Modified Marsh": 51 | v1 = 0.228 * wt 52 | k10 = 0.119 53 | k12 = 0.114 54 | k13 = 0.0419 55 | k21 = 0.055 56 | k31 = 0.0033 57 | ke0 = 1.2195 # stanpump, orchestra 58 | elif name == "Schnider": 59 | lbm = james(sex, wt, ht) 60 | v1 = 4.27 61 | v2 = 18.9 - 0.391 * (age - 53) 62 | v3 = 238 63 | cl1 = 1.89 + 0.0456 * (wt - 77) - 0.0681 * (lbm - 59) + 0.0264 * (ht - 177) 64 | cl2 = 1.29 - 0.024 * (age - 53) 65 | cl3 = 0.836 66 | k10 = cl1 / v1 67 | k12 = cl2 / v1 68 | k13 = cl3 / v1 69 | k21 = cl2 / v2 70 | k31 = cl3 / v3 71 | ke0 = 0.456 72 | elif name == "Paedfusor": 73 | if 1 <= age < 13: 74 | v1 = 0.4584 * wt 75 | k10 = 0.1527 * wt ** -0.3 76 | elif age <= 13: 77 | v1 = 0.4 * wt 78 | k10 = 0.0678 79 | elif age <= 14: 80 | v1 = 0.342 * wt 81 | k10 = 0.0792 82 | elif age <= 15: 83 | v1 = 0.284 * wt 84 | k10 = 0.0954 85 | elif age <= 16: 86 | v1 = 0.22857 * wt 87 | k10 = 0.119 88 | else: 89 | v1 = None 90 | k12 = 0.114 91 | k13 = 0.0419 92 | k21 = 0.055 93 | k31 = 0.0033 94 | ke0 = 0.26 # from diprifusor (for adults) 95 | ke0 = 0.91 # Munoz et al Anesthesiology 2004:101(6) 96 | elif name == "Kataria": # Kataria et al. Anesthesiology 1994;80:104 97 | v1 = 0.41 * wt 98 | v2 = 0.78 * wt + 3.1 * age - 15.5 99 | v3 = 6.9 * wt 100 | cl1 = 0.035 * wt 101 | cl2 = 0.077 * wt 102 | cl3 = 0.026 * wt 103 | k10 = cl1 / v1 104 | k12 = cl2 / v1 105 | k13 = cl3 / v1 106 | k21 = cl2 / v2 107 | k31 = cl3 / v3 108 | ke0 = 0.41 # Munoz et al Anesthesiology 2004:101(6) 109 | elif name == "Kim": 110 | v1 = 1.69 111 | v2 = 27.2 + 0.93 * (wt - 25) 112 | cl1 = 0.89 * (wt / 23.6) ** 0.97 113 | cl2 = 1.3 114 | k10 = cl1 / v1 115 | k12 = cl2 / v1 116 | k13 = 0 117 | k21 = cl2 / v2 118 | k31 = 0 119 | elif name == "Minto": 120 | lbm = james(sex, wt, ht) 121 | v1 = 5.1 - 0.0201 * (age - 40) + 0.072 * (lbm - 55) 122 | v2 = 9.82 - 0.0811 * (age - 40) + 0.108 * (lbm - 55) 123 | v3 = 5.42 124 | cl1 = 2.6 - 0.0162 * (age - 40) + 0.0191 * (lbm - 55) 125 | cl2 = 2.05 - 0.0301 * (age - 40) 126 | cl3 = 0.076 - 0.00113 * (age - 40) 127 | k10 = cl1 / v1 128 | k12 = cl2 / v1 129 | k13 = cl3 / v1 130 | k21 = cl2 / v2 131 | k31 = cl3 / v3 132 | ke0 = 0.595 - 0.007 * (age - 40) 133 | return v1, k10, k12, k13, k21, k31, ke0 134 | 135 | 136 | def james(sex, wt, ht): 137 | if sex == "M": 138 | return 1.1 * wt - 128 * (wt / ht) ** 2 139 | elif sex == "F": 140 | return 1.07 * wt - 148 * (wt / ht) ** 2 141 | return None 142 | 143 | 144 | def run(inp, opt, cfg): 145 | global last_vol, last_a1, last_a2, last_a3, last_a4 146 | 147 | interval = cfg['interval'] 148 | 149 | conc = 0 150 | for cone in inp['PUMP1_CONC']: 151 | if cone['val'] > 0: 152 | conc = cone['val'] 153 | break 154 | if conc == 0: 155 | print("conc = {}".format(conc)) 156 | return 157 | 158 | v1, k10, k12, k13, k21, k31, k41 = get_model(opt['model'], opt['age'], opt['sex'], opt['wt'], opt['ht']) 159 | k10 /= 60 160 | k12 /= 60 161 | k13 /= 60 162 | k21 /= 60 163 | k31 /= 60 164 | k41 /= 60 165 | v4 = v1 / 1000 166 | k14 = k41 * v4 / v1 167 | 168 | # collect volumes every 1 sec 169 | vols = [last_vol] + [0] * interval 170 | for t in range(interval): 171 | for vole in inp['PUMP1_VOL']: 172 | if t < vole['dt'] <= t+1: 173 | vols[t+1] = vole['val'] 174 | 175 | # fill blanks 176 | for t in range(1, interval+1): 177 | if vols[t] == 0: 178 | vols[t] = vols[t-1] 179 | 180 | # cache last vols for next call 181 | last_vol = vols[-1] 182 | 183 | # convert volumes to doses 184 | doses = np.diff(vols) * conc 185 | 186 | # update amount of drug in each compartment 187 | cp_list = [] 188 | ce_list = [] 189 | for t in range(interval): 190 | next_a1 = last_a1 - last_a1 * (k10 + k12 + k13) + last_a2 * k21 + last_a3 * k31 + doses[t] 191 | next_a2 = last_a2 + last_a1 * k12 - last_a2 * k21 192 | next_a3 = last_a3 + last_a1 * k13 - last_a3 * k31 193 | next_a4 = last_a4 + last_a1 * k14 - last_a4 * k41 194 | 195 | last_a1 = next_a1 196 | last_a2 = next_a2 197 | last_a3 = next_a3 198 | last_a4 = next_a4 199 | 200 | cp_list.append({"dt": t + 1, "val": last_a1 / v1}) 201 | ce_list.append({"dt": t + 1, "val": last_a4 / v4}) 202 | 203 | return [ 204 | cp_list, 205 | ce_list 206 | ] 207 | -------------------------------------------------------------------------------- /pyvital/filters/ecg_hrv.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import math 4 | 5 | cfg = { 6 | 'name': 'ECG - Heart Rate Variability', 7 | 'group': 'Medical algorithms', 8 | 'desc': 'Calculate Heart Rate Variability. Approximately 60-second data is required for calculating HF component and 120-second for LF. To calculate VLF, a longer signal is needed.', 9 | 'reference': 'Heart rate variability. Standards of measurement, physiological interpretation, and clinical use. European Heart Journal (1996)17,354-381', 10 | 'overlap': 2, # 2 sec overlap for HR=30 11 | 'interval': 300, # 5 min 12 | 'inputs': [{'name': 'ECG', 'type': 'wav'}], 13 | 'outputs': [ 14 | {'name': 'SDNN', 'type': 'num', 'unit': 'ms', 'min': 0, 'max': 100}, 15 | {'name': 'RMSSD', 'type': 'num', 'unit': 'ms', 'min': 0, 'max': 10}, 16 | {'name': 'pNN50', 'type': 'num', 'unit': '%', 'min': 0, 'max': 5}, 17 | {'name': 'NNI', 'type': 'num', 'unit': 'ms', 'min': 500, 'max': 2500}, 18 | {'name': 'TP', 'type': 'num', 'unit': 'ms2', 'min': 0, 'max': 200000}, 19 | {'name': 'VLF', 'type': 'num', 'unit': 'ms2', 'min': 0, 'max': 200000}, 20 | {'name': 'LF', 'type': 'num', 'unit': 'ms2', 'min': 0, 'max': 10000}, 21 | {'name': 'HF', 'type': 'num', 'unit': 'ms2', 'min': 0, 'max': 10000}, 22 | {'name': 'LF_HF', 'type': 'num', 'unit': '', 'min': 0, 'max': 100} 23 | ] 24 | } 25 | 26 | 27 | def run(inp, opt, cfg): 28 | trk_name = [k for k in inp][0] 29 | 30 | if 'srate' not in inp[trk_name]: 31 | return 32 | 33 | data = arr.interp_undefined(inp[trk_name]['vals']) 34 | srate = inp[trk_name]['srate'] 35 | 36 | rlist = arr.detect_qrs(data, srate) # detect r-peaks 37 | 38 | # import matplotlib.pyplot as plt 39 | # plt.figure(figsize=(50,30)) 40 | # plt.plot(data, label='y') 41 | # for ann in rlist: 42 | # plt.plot(ann, data[ann], 'ro') 43 | # plt.show() 44 | # plt.legend() 45 | # plt.savefig(f'mit-bih_img/{fileid}.png', bbox_inches='tight') 46 | # plt.close() 47 | 48 | # remove qrs before and after overlap 49 | new_rlist = [] 50 | for ridx in rlist: 51 | if cfg['overlap'] <= ridx / srate: 52 | new_rlist.append(ridx) 53 | rlist = new_rlist 54 | 55 | ret_rpeak = [{'dt': ridx / srate, 'val': 1} for ridx in rlist] 56 | 57 | # average qrs 58 | qrs_width = int(0.1 * srate) 59 | qrslist = [] 60 | for ridx in rlist: 61 | qrslist.append(data[ridx - qrs_width: ridx + qrs_width]) 62 | avg_qrs = np.mean(np.array(qrslist), axis=0) 63 | 64 | # correlation coefficient 65 | celist = [] 66 | for qrs in qrslist: 67 | ce = arr.corr(qrs, avg_qrs) 68 | celist.append(ce) 69 | 70 | # rr interval (ms) 71 | rri_list = np.diff(rlist) / srate * 1000 72 | 73 | nni_list = [] # nn interval (ms) 74 | ret_nni = [] 75 | for i in range(len(rlist) - 1): 76 | if celist[i] < 0.9 or celist[i+1] < 0.9: 77 | continue 78 | 79 | # median RR interval nearest 10 beats 80 | med_rri = np.median(rri_list[max(0, i-5): min(len(rri_list), i+5)]) 81 | 82 | rri = rri_list[i] 83 | 84 | if med_rri * 0.5 <= rri <= med_rri * 1.5: 85 | nni_list.append(rri) 86 | ret_nni.append({'dt': rlist[i+1] / srate, 'val': rri}) 87 | 88 | # make time domain nni_data function by linear interpolation (200 hz) 89 | nni_srate = 200 90 | nni_data = [np.nan] * int(math.ceil(len(data) / srate * nni_srate)) 91 | for nni in ret_nni: 92 | nni_data[int(nni['dt'] * nni_srate)] = nni['val'] 93 | nni_data = arr.interp_undefined(nni_data) 94 | 95 | # hamming window 96 | nni_data *= np.hamming(len(nni_data)) 97 | 98 | vlf = 0 # <= 0.04 Hz 99 | lf = 0 # 0.04-0.15 Hz 100 | hf = 0 # 0.15-0.4 Hz 101 | 102 | # A power spectral density (PSD) takes the amplitude of the FFT, multiplies it by its complex conjugate and normalizes it to the frequency bin width. 103 | # This allows for accurate comparison of random vibration signals that have different signal lengths. 104 | psd = abs(np.fft.fft(nni_data)) ** 2 / (len(nni_data) * nni_srate) # power density per bin (ms2/hz) from fft 105 | psd *= 2 # In order to conserve the total power, 106 | # multiply all frequencies that occur in both sets -- the positive and negative frequencies -- by a factor of 2. 107 | # Zero frequency (DC) and the Nyquist frequency do not occur twice 108 | for k in range(len(nni_data)): 109 | f = k * nni_srate / len(nni_data) 110 | if f < 0.0033: 111 | pass 112 | elif f < 0.04: 113 | vlf += psd[k] 114 | elif f < 0.15: 115 | lf += psd[k] 116 | elif f < 0.4: 117 | hf += psd[k] 118 | else: 119 | break 120 | 121 | # multiply the width (hz) to get the area under curve 122 | vlf *= nni_srate / len(nni_data) 123 | lf *= nni_srate / len(nni_data) 124 | hf *= nni_srate / len(nni_data) 125 | tp = vlf + lf + hf 126 | 127 | # lf_arrnorm = 0 128 | # hf_arrnorm = 0 129 | # if tp - vlf: 130 | # lf_arrnorm = lf / (tp-vlf) 131 | # hf_arrnorm = hf / (tp-vlf) 132 | 133 | lf_hf = 0 134 | if hf: 135 | lf_hf = lf / hf 136 | 137 | sdnn = np.std(nni_list) 138 | 139 | dnni_list = abs(np.diff(nni_list)) # Difference between adjacent nn intervals 140 | nn50 = 0 141 | ret_dnni = [] 142 | for i in range(len(dnni_list)): 143 | dnni = dnni_list[i] 144 | if dnni > 50: 145 | nn50 += 1 146 | ret_dnni.append({'dt': ret_nni[i+1]['dt'], 'val': dnni}) 147 | 148 | if len(dnni_list) > 0: 149 | pnn50 = nn50 * 100 / len(dnni_list) 150 | else: 151 | pnn50 = 0 152 | 153 | rmssdnni = 0 154 | if len(dnni_list) > 0: 155 | for dnni in dnni_list: 156 | rmssdnni += dnni * dnni 157 | rmssdnni = (rmssdnni / len(dnni_list)) ** 0.5 158 | 159 | dt_last = cfg['interval'] 160 | return [ 161 | [{'dt': dt_last, 'val': sdnn}], 162 | [{'dt': dt_last, 'val': rmssdnni}], 163 | [{'dt': dt_last, 'val': pnn50}], 164 | ret_nni, 165 | [{'dt': dt_last, 'val': tp}], 166 | [{'dt': dt_last, 'val': vlf}], 167 | [{'dt': dt_last, 'val': lf}], 168 | [{'dt': dt_last, 'val': hf}], 169 | [{'dt': dt_last, 'val': lf_hf}] 170 | ] 171 | -------------------------------------------------------------------------------- /pyvital/filters/pleth_spi.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import scipy.stats as st 4 | 5 | class Histogram: 6 | def __init__(self, minval=0, maxval=100, resolution=1000): 7 | self.minval = minval 8 | self.maxval = maxval 9 | self.bins = [0] * resolution 10 | self.total = 0 11 | 12 | def getbin(self, v): 13 | if v < self.minval: 14 | return 0 15 | if v > self.maxval: 16 | return self.bins[-1] 17 | bin = int((v-self.minval) / (self.maxval - self.minval) * len(self.bins)) 18 | if bin >= len(self.bins): 19 | return len(self.bins) - 1 20 | return bin 21 | 22 | def learn(self, v): 23 | """ 24 | add tnew data 25 | """ 26 | bin = self.getbin(v) 27 | self.bins[bin] += 1 28 | self.total += 1 29 | 30 | # minimum value -> 0, maximum value -> 1 31 | def percentile(self, v): 32 | if self.total == 0: 33 | return 0 34 | # number of values less than the value 35 | cnt = 0 36 | bin = self.getbin(v) 37 | for i in range(bin): 38 | cnt += self.bins[i] 39 | return cnt / self.total * 100 40 | 41 | 42 | cfg = { 43 | 'name': 'PLETH - Surgical Pleth Index', 44 | 'group': 'Medical algorithms', 45 | 'reference': 'Br J Anaesth. 2007 Apr98(4):447-55', 46 | 'interval': 30, # for 4096 sample/call 47 | 'overlap': 3, # 2 sec overlap for HR=30 48 | 'inputs': [{'name': 'PLETH', 'type': 'wav'}], 49 | 'outputs': [ 50 | {'name': 'BEAT', 'type': 'num', 'max':2}, 51 | {'name': 'PPGA', 'type': 'num', 'min':0, 'max':100}, 52 | {'name': 'HBI', 'type': 'num', 'min':240, 'max':2000}, 53 | {'name': 'PPGA_PERC', 'type': 'num', 'min':0, 'max':100}, 54 | {'name': 'HBI_PERC', 'type': 'num', 'min':0, 'max':100}, 55 | {'name': 'SPI', 'type': 'num', 'max':100} 56 | ] 57 | } 58 | 59 | 60 | # filter should be called sequentially 61 | hist_ppga = Histogram(0, 100, 1000) 62 | hist_hbi = Histogram(240, 2000, 1000) # HR 30-250 --> HBI 240-2000 63 | 64 | # 100- spi = 0.7*ppga + 0.3*hbi 65 | # ppga = 0.7 * ppga_ind + 0.3 * ppga_grp 66 | # hbi = 0.7 * hbi_ind + 0.3 * hbi_grp 67 | # 100 - spi - 0.49 ppga_ind - 0.21 hbi_ind = 0.21 ppga_grp + 0.09 hbi_grp 68 | 69 | # hist_ppga_grp = Histogram(0, 15, 1000) 70 | # hist_ppga_grp.bins = [43,68,35,43,28,44,45,60,40,54,62,89,92,151,106,189,145,341,192,301,323,441,227,590,572,871,430,665,459,808,402,756,425,800,483,963,576,1214,638,879,1042,1481,825,1721,882,1679,825,1313,1079,804,1482,1362,721,1220,960,1339,898,880,1440,1723,815,1525,1031,1630,1092,1235,1519,1827,930,1937,959,1857,994,1394,1459,1913,956,1731,947,1310,1343,863,1655,1753,801,1728,834,1683,807,1466,940,1559,773,1521,812,1267,1020,986,1477,949,1530,1531,754,1435,795,1293,832,1044,927,1259,611,1068,708,828,915,612,996,1047,493,1012,483,953,541,888,636,973,508,917,644,779,684,595,768,822,410,827,435,858,426,798,394,631,544,748,412,419,685,514,615,390,777,738,396,791,400,684,398,792,393,624,676,854,515,415,822,590,688,828,424,810,438,748,404,663,456,696,354,475,555,591,419,353,671,637,363,684,309,618,314,637,340,508,496,616,344,356,674,605,409,335,713,651,340,674,335,657,360,679,366,491,458,605,345,336,560,444,387,570,239,535,286,567,287,478,309,488,241,344,442,469,281,342,512,513,266,559,266,495,319,586,285,477,385,564,280,404,440,527,336,298,535,582,296,534,273,486,329,543,274,425,373,488,246,299,426,404,342,539,224,332,349,435,225,377,264,423,205,229,458,434,226,279,364,342,294,427,234,373,238,393,190,214,383,436,184,268,351,369,252,197,391,441,195,404,175,265,331,406,181,293,281,305,193,183,293,342,181,332,157,214,227,277,133,256,168,285,138,132,265,286,159,187,201,211,181,278,118,258,126,250,127,154,224,254,115,162,207,190,152,117,233,231,112,240,107,139,162,206,104,177,118,204,112,110,213,204,102,190,94,160,144,196,110,203,93,205,95,109,180,162,108,133,135,135,126,149,94,166,89,148,82,102,140,210,94,163,122,146,103,87,159,155,83,150,73,107,116,158,70,150,95,114,91,99,150,147,87,144,70,100,111,163,72,130,87,134,76,86,96,130,62,101,80,109,83,137,64,113,71,118,87,59,85,126,74,84,67,104,85,53,103,120,47,102,69,82,111,127,60,114,59,84,62,59,102,96,54,115,48,66,83,114,52,105,80,133,75,67,106,111,68,103,94,85,72,118,60,138,68,134,63,78,118,121,55,116,87,113,78,102,131,137,67,104,56,94,90,122,60,102,53,87,82,85,111,131,51,105,62,78,96,119,57,100,61,80,66,73,88,82,54,73,61,58,84,96,47,76,42,63,64,72,77,87,42,75,59,82,51,46,89,81,41,75,38,39,69,77,31,65,40,44,77,67,52,71,29,41,58,41,45,62,31,76,31,62,37,23,62,50,20,54,21,32,51,45,24,69,21,34,43,39,34,35,21,33,21,23,31,17,29,42,24,28,20,15,35,39,24,42,18,22,31,18,22,36,15,30,24,34,38,46,18,41,12,37,23,8,33,34,17,32,15,17,25,26,22,27,11,18,22,23,26,32,13,32,15,24,29,15,45,41,19,33,16,19,27,34,18,38,14,30,29,16,30,32,14,36,16,29,22,39,7,27,12,27,14,12,23,32,17,29,15,10,33,18,16,34,12,21,20,21,19,32,15,20,24,27,15,20,25,23,8,37,18,17,28,21,9,29,12,12,24,17,15,22,10,18,17,26,8,19,7,18,11,15,12,9,17,21,6,15,5,6,10,13,8,9,6,12,8,5,6,18,7,8,9,4,5,4,7,5,1,6,5,5,7,9,1,8,3,3,6,2,6,6,3,3,2,6,1,3,1,3,4,1,2,1,4,3,1,3,4,2,5,3,0,3,5,2,2,1,4,1,0,0,2,4,1,3,5,1,5,2,2,6,2,3,2,3,0,2,0,2,2,3,1,2,1,1,0,1,2,0,3,2,0,1,1,2,0,2,0,0,0,1,1,1,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,2,0,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,1,0,1] 71 | # hist_hbi_grp = Histogram(240, 2000, 1000) 72 | # hist_hbi_grp.bins = [53,144,89,111,113,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,7,0,0,0,0,0,15,0,0,0,0,0,103,0,0,0,0,439,0,0,0,0,0,699,0,0,0,0,0,1081,0,0,0,0,1230,0,0,0,0,0,2369,0,0,0,0,0,1965,0,0,0,0,2074,0,0,0,0,0,2585,0,0,0,0,0,3365,0,0,0,0,3766,0,0,0,0,0,3885,0,0,0,0,0,3654,0,0,0,0,4443,0,0,0,0,0,5086,0,0,0,0,0,4381,0,0,0,0,3745,0,0,0,0,0,4066,0,0,0,0,0,4878,0,0,0,0,5697,0,0,0,0,0,6116,0,0,0,0,0,6970,0,0,0,0,0,7675,0,0,0,0,7525,0,0,0,0,0,8090,0,0,0,0,0,7772,0,0,0,0,8642,0,0,0,0,0,6911,0,0,0,0,0,5336,0,0,0,0,5214,0,0,0,0,0,6342,0,0,0,0,0,5083,0,0,0,0,4136,0,0,0,0,0,4002,0,0,0,0,0,3735,0,0,0,0,3430,0,0,0,0,0,3138,0,0,0,0,0,3089,0,0,0,0,3179,0,0,0,0,0,2903,0,0,0,0,0,3043,0,0,0,0,3107,0,0,0,0,0,3104,0,0,0,0,0,2871,0,0,0,0,0,2636,0,0,0,0,2367,0,0,0,0,0,2201,0,0,0,0,0,2074,0,0,0,0,1945,0,0,0,0,0,2043,0,0,0,0,0,1994,0,0,0,0,2040,0,0,0,0,0,2228,0,0,0,0,0,2641,0,0,0,0,3010,0,0,0,0,0,2916,0,0,0,0,0,2459,0,0,0,0,2151,0,0,0,0,0,1957,0,0,0,0,0,1787,0,0,0,0,1824,0,0,0,0,0,1589,0,0,0,0,0,1234,0,0,0,0,1022,0,0,0,0,0,997,0,0,0,0,0,1001,0,0,0,0,0,867,0,0,0,0,839,0,0,0,0,0,866,0,0,0,0,0,851,0,0,0,0,681,0,0,0,0,0,492,0,0,0,0,0,398,0,0,0,0,360,0,0,0,0,0,287,0,0,0,0,0,222,0,0,0,0,197,0,0,0,0,0,151,0,0,0,0,0,107,0,0,0,0,100,0,0,0,0,0,91,0,0,0,0,0,74,0,0,0,0,52,0,0,0,0,0,47,0,0,0,0,0,57,0,0,0,0,31,0,0,0,0,0,36,0,0,0,0,0,35,0,0,0,0,0,31,0,0,0,0,31,0,0,0,0,0,27,0,0,0,0,0,19,0,0,0,0,27,0,0,0,0,0,32,0,0,0,0,0,24,0,0,0,0,16,0,0,0,0,0,20,0,0,0,0,0,20,0,0,0,0,21,0,0,0,0,0,21,0,0,0,0,0,20,0,0,0,0,16,0,0,0,0,0,24,0,0,0,0,0,8,0,0,0,0,10,0,0,0,0,0,17,0,0,0,0,0,12,0,0,0,0,12,0,0,0,0,0,14,0,0,0,0,0,16,0,0,0,0,0,12,0,0,0,0,16,0,0,0,0,0,6,0,0,0,0,0,14,0,0,0,0,24,0,0,0,0,0,19,0,0,0,0,0,15,0,0,0,0,8,0,0,0,0,0,8,0,0,0,0,0,16,0,0,0,0,7,0,0,0,0,0,18,0,0,0,0,0,13,0,0,0,0,11,0,0,0,0,0,12,0,0,0,0,0,11,0,0,0,0,13,0,0,0,0,0,8,0,0,0,0,0,10,0,0,0,0,9,0,0,0,0,0,4,0,0,0,0,0,6,0,0,0,0,0,12,0,0,0,0,14,0,0,0,0,0,16,0,0,0,0,0,9,0,0,0,0,7,0,0,0,0,0,12,0,0,0,0,0,14,0,0,0,0,8,0,0,0,0,0,10,0,0,0,0,0,13,0,0,0,0,9,0,0,0,0,0,5,0,0,0,0,0,5,0,0,0,0,9,0,0,0,0,0,4,0,0,0,0,0,7,0,0,0,0,14,0,0,0,0,0,10,0,0,0,0,0,7,0,0,0,0,12,0,0,0,0,0,7,0,0,0,0,0,5,0,0,0,0,4] 73 | 74 | def run(inp, opt, cfg): 75 | """ 76 | http:#ocw.utm.my/file.php/38/SEB4223/07_ECG_Analysis_1_-_QRS_Detection.ppt%20%5BCompatibility%20Mode%5D.pdf 77 | """ 78 | global hist_ppga, hist_hbi 79 | trk_name = [k for k in inp][0] 80 | 81 | if 'srate' not in inp[trk_name]: 82 | return 83 | 84 | data = arr.interp_undefined(inp[trk_name]['vals']) 85 | srate = inp[trk_name]['srate'] 86 | 87 | minlist, maxlist = arr.detect_peaks(data, srate) # extract beats 88 | beat_res = [{'dt':idx / srate, 'val':1} for idx in maxlist] 89 | 90 | ppga_res = [] 91 | hbi_res = [] 92 | ppga_perc_res = [] 93 | hbi_perc_res = [] 94 | spi_res = [] 95 | for i in range(len(maxlist) - 1): 96 | dt = maxlist[i+1] / srate 97 | 98 | hbi = (maxlist[i+1] - maxlist[i]) / srate * 1000 99 | ppga = data[maxlist[i+1]] - data[minlist[i]] 100 | 101 | #hbi_perc = hist_hbi.percentile(hbi) * 0.7 + st.norm.cdf(hbi, 754.7, 210.8) * 30 102 | hbi_perc = hist_hbi.percentile(hbi) * 0.7 + st.norm.cdf(hbi, 700, 100) * 30 103 | #ppga_perc = hist_ppga.percentile(ppga) * 0.7 + st.norm.cdf(ppga, 2.428, 1.896) * 30 104 | ppga_perc = hist_ppga.percentile(ppga) * 0.7 + st.norm.cdf(ppga, 1, 0.2) * 30 105 | # hbi_perc = hist_hbi.percentile(hbi) * 0.7 + hist_hbi_grp.percentile(hbi) * 0.3 106 | # ppga_perc = hist_ppga.percentile(ppga) * 0.7 + hist_ppga_grp.percentile(ppga) * 0.3 107 | 108 | spi = 100 - (0.7 * ppga_perc + 0.3 * hbi_perc) 109 | 110 | ppga_res.append({'dt': dt, 'val': ppga}) 111 | hbi_res.append({'dt': dt, 'val': hbi}) 112 | ppga_perc_res.append({'dt': dt, 'val': ppga_perc}) 113 | hbi_perc_res.append({'dt': dt, 'val': hbi_perc}) 114 | spi_res.append({'dt':dt, 'val':spi}) 115 | 116 | hist_hbi.learn(hbi) 117 | hist_ppga.learn(ppga) 118 | 119 | return [beat_res, ppga_res, hbi_res, ppga_perc_res, hbi_perc_res, spi_res] 120 | -------------------------------------------------------------------------------- /pyvital/filters/ecg_mtwa.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import math 4 | import copy 5 | # import matplotlib 6 | # matplotlib.use('agg') 7 | # import matplotlib.pyplot as plt 8 | 9 | cfg = { 10 | 'name': 'ECG - T-wave alternans', 11 | 'group': 'Medical algorithms', 12 | 'desc': 'Calculate microvolt T-wave alternans', 13 | 'reference': 'Narayan SM1, Smith JM. Spectral analysis of periodic fluctuations in electrocardiographic repolarization. IEEE Trans Biomed Eng. 1999 Feb;46(2):203-12.', 14 | 'overlap': 1.5, # for HR=40 15 | 'interval': 60 * 5, # 5 min 16 | 'inputs': [{'name': 'ECG', 'type': 'wav'}], 17 | 'outputs': [ 18 | {'name': 'ECG_FILTD', 'type': 'wav'}, 19 | {'name': 'AVG_BEAT', 'type': 'wav'}, 20 | {'name': 'PEAKS', 'type': 'num'}, 21 | {'name': 'TWA_VOLT', 'type': 'num', 'unit': 'uv', 'min': 0, 'max': 100}, 22 | {'name': 'TWA_RATIO', 'type': 'num', 'unit': '', 'min': 0, 'max': 10} 23 | ] 24 | } 25 | 26 | 27 | def run(inp, opt, cfg): 28 | trk_name = [k for k in inp][0] 29 | 30 | if 'srate' not in inp[trk_name]: 31 | return 32 | 33 | data = arr.interp_undefined(inp[trk_name]['vals']) 34 | srate = inp[trk_name]['srate'] 35 | 36 | ecg_500 = data 37 | if srate != 500: 38 | ecg_500 = arr.resample(data, math.ceil(len(data) / srate * 500)) # resample to 500 Hz 39 | srate = 500 40 | ecg_filt = arr.band_pass(ecg_500, srate, 0.01, 100) # filtering 41 | ecg_filt = arr.remove_wander_spline(ecg_filt, srate) # remove baseline wander 42 | 43 | r_list = arr.detect_qrs(ecg_filt, srate) # detect r-peak 44 | new_r_list = [] 45 | for ridx in r_list: # remove qrs before and after overlap 46 | if cfg['overlap'] <= ridx / srate: 47 | new_r_list.append(ridx) 48 | r_list = new_r_list 49 | 50 | ret_rpeak = [] 51 | for ridx in r_list: 52 | ret_rpeak.append({'dt': ridx / srate}) 53 | 54 | segbeats = 128 55 | segsteps = 32 # int(segbeats/4) 56 | 57 | # for each segments 58 | twavs = [] 59 | twars = [] 60 | ret_twav = [] 61 | ret_twar = [] 62 | ret_avg_beat = {'srate': srate, 'vals': [0] * len(ecg_500)} 63 | 64 | iseg = 0 65 | for seg_start in range(0, len(r_list) - segbeats, 66 | segsteps): # Separates in 128-beat units regardless of input length 67 | iseg += 1 68 | 69 | hrs = [] # calculate hrs 70 | for i in range(segbeats - 1): 71 | hr = srate / (r_list[seg_start + i + 1] - r_list[seg_start + i]) 72 | hrs.append(hr) 73 | 74 | if max(hrs) - min(hrs) > 20: 75 | # print('seg ' + iseg + ' excluded HR diff > ' + diff_hr) 76 | continue 77 | 78 | # only -250 to 350 ms from R peak 79 | idx_r = int(0.25 * srate) # idx_r == 125 80 | beat_len = int(0.6 * srate) # beat_len == 300 81 | beats = [] 82 | for i in range(segbeats): 83 | ridx = r_list[seg_start + i] 84 | beat = ecg_filt[ridx - idx_r:ridx - idx_r + beat_len] 85 | beats.append(beat) 86 | beats = np.array(beats) 87 | 88 | # remove each beat's baseline voltage 89 | # no effect because of R peak leveling is below 90 | # Baseline correction included estimation of the baseline in the isoelectric PQ 91 | # segment by averaging 16 successive samples in this time window 92 | pq_width = int(0.008 * srate) 93 | # for i in range(segbeats): 94 | # idx_base = arr.min_idx(beats[i], idx_r - int(0.15 * srate), idx_r) 95 | # min_std = 999999 96 | # for j in range(idx_base - int(0.03 * srate), idx_base + int(0.03 * srate)): 97 | # # The baseline is the point at which the standard deviation of around 15ms is minimized. 98 | # this_std = np.std(beats[i][j - pq_width:j + pq_width]) 99 | # if this_std < min_std: 100 | # idx_base = j 101 | # min_std = this_std 102 | # beats[i] -= np.mean(beats[i][idx_base - pq_width:idx_base + pq_width]) 103 | 104 | # calculate average beat 105 | avg_beat = np.mean(beats, axis=0) # average beat of the segbeats beats 106 | 107 | # find minimum values from avg_beat in both sides 108 | idx_start = idx_r - int(0.15 * srate) # idx_start == 50 109 | idx_end = idx_r + int(0.1 * srate) # idx_end == 175 110 | 111 | idx_base = arr.min_idx(avg_beat, idx_start, idx_r) # avg_beat's baseline 112 | min_std = 999999 # find minimum std value 113 | for j in range(idx_base - int(0.03 * srate), idx_base + int(0.03 * srate)): 114 | idx_from = max(0, j - pq_width) 115 | idx_to = min(len(avg_beat), j + pq_width) 116 | this_std = np.std(avg_beat[idx_from:idx_to]) 117 | # print("{} {}".format(j, this_std)) 118 | if this_std < min_std: 119 | idx_base = j 120 | min_std = this_std 121 | # print("idx_base={}", idx_base) 122 | min_left = np.mean(avg_beat[idx_base - pq_width:idx_base + pq_width]) 123 | min_right = np.min(avg_beat[idx_r:idx_end]) 124 | 125 | # threshold = 5% of max val 126 | th_left = min_left + 0.05 * (avg_beat[idx_r] - min_left) 127 | th_right = min_right + 0.05 * (avg_beat[idx_r] - min_right) 128 | idx_qrs_start = idx_r - int(0.05 * srate) 129 | idx_qrs_end = idx_r + int(0.05 * srate) 130 | for j in range(idx_r, idx_r - int(0.1 * srate), -1): # idx_r = 125 131 | if avg_beat[j] < th_left: 132 | idx_qrs_start = j 133 | break 134 | for j in range(idx_r, idx_r + int(0.1 * srate)): 135 | if avg_beat[j] < th_right: 136 | idx_qrs_end = j 137 | break 138 | 139 | # find offset with maximum correlation 140 | offsets = [] # for each beat, likes [0, -1, 0, 0, 1, ...] 141 | qrs_coeffs = [] 142 | offset_width = int(0.01 * srate) # 3 = range for finding offset 143 | for i in range(segbeats): # for each beat 144 | maxoffset = -offset_width 145 | maxce = -999999 146 | for offset in range(-offset_width, offset_width + 1): 147 | ce = arr.corr(avg_beat[idx_qrs_start:idx_qrs_end], 148 | beats[i][offset + idx_qrs_start:offset + idx_qrs_end]) 149 | if maxce < ce: 150 | maxoffset = offset 151 | maxce = ce 152 | offsets.append(maxoffset) 153 | qrs_coeffs.append(maxce) 154 | 155 | # move beats by the offset 156 | new_beats = [] 157 | for i in range(segbeats): 158 | ost = offsets[i] 159 | beat = beats[i].tolist() 160 | if ost < 0: 161 | beat = [0] * -ost + beat[:ost] 162 | else: 163 | beat = beat[ost:] + [0] * ost 164 | new_beats.append(beat) 165 | beats = np.array(new_beats) # beats.shape == (segbeats,300) 166 | 167 | # calculate average beat 168 | avg_beat = np.mean(beats, axis=0) # average beat of the segbeats beats 169 | 170 | # replace vpc as template 171 | nreplaced = 0 172 | for i in range(segbeats): 173 | ce = arr.corr(avg_beat, beats[i]) 174 | if ce < 0.95: 175 | nreplaced += 1 176 | beats[i] = copy.deepcopy(avg_beat) 177 | offsets[i] = 0 178 | 179 | #print('{} beats are replaced'.format(nreplaced)) 180 | if nreplaced > 0.1 * segbeats: 181 | print('excluded VPC > {}'.format(nreplaced)) 182 | continue 183 | 184 | # qrs level alignment 185 | # idx_r == 125 186 | # len(avg_beat) == beat_len == 300 187 | for i in range(segbeats): 188 | beats[i] -= beats[i][idx_r] 189 | 190 | # plot for debugging 191 | # plt.plot() 192 | # for i in range(segbeats): 193 | # col = 'blue' 194 | # if i % 2: 195 | # col = 'red' 196 | # plt.plot(beats[i], c=col, ls='-') 197 | # plt.savefig('{:02d}_{}.png'.format(opt['ifile'], iseg)) 198 | # plt.close() 199 | 200 | # gather segbeats beats from idx_r(125) to beat_len(300) 201 | # power spectrums of segbeats beats 202 | spect = [] 203 | for idx_from_r in range(beat_len - idx_r): 204 | timed_samples = beats[:, idx_r + idx_from_r] 205 | # timed_samples *= np.hamming(len(timed_samples)) 206 | segfft = 2 ** np.abs(np.fft.fft(timed_samples)) 207 | spect.append(segfft) # each segbeats beat fft result 208 | spect = np.array(spect) # rows == idx_from_r, cols == frequency(0-segbeats) 209 | 210 | # power spectra are summed into a composite in which 211 | # the magnitude at 0.5 cycles/beat indicates raw alternans (in mv2) 212 | # cum_spect.shape == segbeats 213 | 214 | # cumulative spectum of beats 215 | st_start = int(0.1 * srate) # idx_qrs_end - idx_r #int(0.1*srate) 216 | st_end = int(0.25 * srate) 217 | avg_spect = np.mean(spect[st_start:st_end, :], axis=0) # between 100 (50) and 250 ms (125) from rpeak 218 | avg_alt = avg_spect[int(0.5 * segbeats)] 219 | 220 | # cum_spect_noise = cum_spect[int(0.4*segbeats):int(0.46*segbeats)] # noise level: 0.44-0.49 cycles / beat 221 | avg_spect_noise = avg_spect[int(0.44 * segbeats):int(0.49 * segbeats)] # noise level: 0.44-0.49 cycles / beat 222 | # cum_spect_noise = cum_spect[int(0.33 * segbeats):int(0.48 * segbeats)] # noise level: 0.44-0.49 cycles / beat 223 | avg_noise_avg = np.mean(avg_spect_noise) 224 | avg_noise_std = np.std(avg_spect_noise) 225 | 226 | # return avg beat 227 | # avg_beat = np.mean(beats, axis=0) 228 | for j in range(len(avg_beat)): 229 | if len(ret_avg_beat['vals']) > r_list[seg_start + segbeats - 1] + j: 230 | ret_avg_beat['vals'][r_list[seg_start + segbeats - 1] + j] = avg_beat[j] 231 | 232 | # print('avg alt {}, noise {}'.format(cum_alt, cum_noise_avg)) 233 | twar = 0 234 | if avg_alt > avg_noise_avg: 235 | twav = 1000 * (avg_alt - avg_noise_avg) ** 0.5 236 | twar = (avg_alt - avg_noise_avg) / avg_noise_std 237 | twavs.append(twav) 238 | ret_twav.append({'dt': r_list[seg_start + segbeats - 1] / srate, 'val': twav}) 239 | 240 | twars.append(twar) 241 | ret_twar.append({'dt': r_list[seg_start + segbeats - 1] / srate, 'val': twar}) 242 | 243 | # plt.figure(figsize=(30, 5)) 244 | # plt.plot(ecg_filt.tolist(), color='black', lw=1) 245 | # plt.savefig('e:/{}_raw.pdf'.format(twar), bbox_inches="tight", pad_inches=0.5) 246 | # plt.close() 247 | # 248 | # plt.figure(figsize=(10, 5)) 249 | # for i in range(len(beats)): 250 | # c = 'red' 251 | # if i % 2 == 0: 252 | # c = 'blue' 253 | # plt.plot(beats[i], color=c, lw=1) 254 | # plt.savefig('e:/{}_ecg.pdf'.format(twar), bbox_inches="tight", pad_inches=0.5) 255 | # plt.close() 256 | # 257 | # plt.figure(figsize=(10, 5)) 258 | # plt.plot(np.arange(1, 65) / 128, avg_spect[1:65], lw=1) 259 | # plt.savefig('e:/{}_spect.pdf'.format(twar), bbox_inches="tight", pad_inches=0.5) 260 | # plt.close() 261 | 262 | dt_last = r_list[-1] / srate - cfg['overlap'] 263 | 264 | return [ 265 | {'srate': srate, 'vals': ecg_filt.tolist()}, 266 | ret_avg_beat, 267 | ret_rpeak, 268 | ret_twav, 269 | ret_twar 270 | ] 271 | -------------------------------------------------------------------------------- /pyvital/filters/ecg_annotator.py: -------------------------------------------------------------------------------- 1 | import pyvital.arr as arr 2 | import numpy as np 3 | import math 4 | import copy 5 | import pywt 6 | 7 | 8 | cfg = { 9 | 'name': 'ECG - Annotator', 10 | 'group': 'Medical algorithms', 11 | 'desc': 'ECG annotator based on YC Chesnokov\'s implementation', 12 | 'reference': 'YC Chesnokov, D Nerukh, RC Glen, Individually Adaptable Automatic QT Detector', 13 | 'overlap': 3, # 2 sec overlap for HR=30 14 | 'interval': 30, 15 | 'inputs': [{'name': 'ECG', 'type': 'wav'}], 16 | 'outputs': [{'name': 'ANN', 'type': 'str', 'unit': ''}], 17 | 'license': 'GPL' 18 | } 19 | 20 | 21 | def minimax(data): 22 | return np.std(data) * (0.3936 + 0.1829 * math.log(len(data))) 23 | 24 | 25 | def denoise(data, wsize): 26 | # hard minmax denoise 27 | for i in range(0, len(data), wsize): 28 | iend = min(len(data), i + wsize) 29 | th = minimax(data[i: iend]) 30 | for j in range(i, iend): 31 | if abs(data[j]) <= th: 32 | data[j] = 0 33 | 34 | 35 | def cwt(data, srate, wname, freq): 36 | scale = 0.16 * srate / freq # for gaus1 37 | sig = pywt.cwt(data, [scale], wname)[0].flatten() 38 | return sig 39 | 40 | 41 | def qmf(w): 42 | ret = [] 43 | for i in range(len(w)): 44 | if i % 2 == 1: 45 | ret.append(-w[len(w)-1-i]) 46 | else: 47 | ret.append(w[len(w)-1-i]) 48 | return ret 49 | 50 | 51 | def orthfilt(w): 52 | lor = w / np.linalg.norm(w) 53 | lod = lor[::-1] 54 | hir = qmf(lor) 55 | hid = hir[::-1] 56 | return [lod, hid, lor, hir] 57 | 58 | 59 | def run(inp, opt, cfg): 60 | trk_name = [k for k in inp][0] 61 | 62 | if 'srate' not in inp[trk_name]: 63 | return 64 | 65 | data = arr.interp_undefined(inp[trk_name]['vals']) 66 | srate = inp[trk_name]['srate'] 67 | 68 | min_hr = 40 # min bpm 69 | max_hr = 200 # max bpm 70 | min_qrs = 0.04 # min qslist duration 71 | max_qrs = 0.2 # max qslist duration 72 | min_umv = 0.2 # min UmV of R,S peaks 73 | min_pq = 0.07 # min PQ duration 74 | max_pq = 0.20 # max PQ duration 75 | min_qt = 0.21 # min QT duration 76 | max_qt = 0.48 # max QT duration 77 | pfreq = 9.0 # cwt Hz for pidx wave 78 | tfreq = 2.5 # cwt Hz for tidx wave 79 | min_sq = (60.0 / max_hr) - max_qrs # from s to next q 80 | if min_sq * srate <= 0: 81 | min_sq = 0.1 82 | max_hr = int(60.0 / (max_qrs + min_sq)) 83 | 84 | # denoised ecg 85 | depth = int(math.ceil(np.log2(srate / 0.8))) - 1 86 | ad = pywt.wavedec(data, 'db2', level=depth) 87 | ad[0].fill(0) # low frequency approx -> 0 88 | ecg_denoised = pywt.waverec(ad, 'db2') 89 | 90 | # interpolation filter 91 | inter1 = pywt.Wavelet('inter1', filter_bank=orthfilt([0.25, 0.5, 0.25])) 92 | 93 | # qrs augmented ecg 94 | sig = cwt(data, srate, 'gaus1', 13) # 13 Hz gaus convolution 95 | depth = int(math.ceil(np.log2(srate / 23))) - 2 96 | ad = pywt.wavedec(sig, inter1, level=depth) 97 | for level in range(depth): # remove [0-30Hz] 98 | wsize = int(2 * srate / (2 ** (level+1))) # 2 sec window 99 | denoise(ad[depth-level], wsize) # Remove less than 30 hz from all detail 100 | ad[0].fill(0) # most lowest frequency approx -> 0 101 | ecg_qrs = pywt.waverec(ad, inter1) 102 | 103 | # start parsing 104 | qslist = [] # qrs list [startqrs, endqrs, startqrs, endqrs, ...] 105 | vpclist = [] # abnormal beat 106 | 107 | # save greater than 0 after min_sq 108 | prev_zero = 0 109 | ipos = 0 110 | while ipos < len(ecg_qrs) - int(max_qrs * srate): 111 | if ecg_qrs[ipos] == 0: 112 | prev_zero += 1 113 | else: 114 | if prev_zero > min_sq * srate: 115 | iend = ipos + int(max_qrs * srate) # find the position of the end of the current qrs 116 | while iend > ipos: 117 | if ecg_qrs[iend] != 0: 118 | break 119 | iend -= 1 120 | 121 | # Check if it is the minimum length or if there is a pause 122 | if ipos + min_qrs * srate > iend or np.any(ecg_qrs[iend + 1:iend + 1 + int(min_sq * srate)]): 123 | vpclist.append(ipos) # push vpc 124 | else: 125 | qslist.append(ipos) 126 | qslist.append(iend) 127 | 128 | ipos = iend 129 | prev_zero = 0 130 | ipos += 1 131 | 132 | # qlist = [qslist[i] for i in range(0, len(qslist), 2)] 133 | 134 | complist = [] 135 | for n in range(int(len(qslist) / 2)): 136 | start_qrs = qslist[n * 2] 137 | end_qrs = qslist[n * 2 + 1] 138 | 139 | qidx = -1 140 | ridx = arr.max_idx(ecg_denoised, start_qrs, end_qrs) 141 | if ecg_denoised[ridx] < min_umv: 142 | ridx = -1 143 | 144 | sidx = arr.min_idx(ecg_denoised, start_qrs, end_qrs) 145 | if -ecg_denoised[sidx] < min_umv: 146 | sidx = -1 147 | 148 | # ridxpeak > 0mV sidxpeak < 0mV 149 | if ridx != -1 and sidx != -1: 150 | if sidx < ridx: # check for sidx 151 | if ecg_denoised[ridx] > -ecg_denoised[sidx]: 152 | qidx = sidx 153 | sidx = arr.min_idx(ecg_denoised, ridx, end_qrs + 1) 154 | if sidx == ridx or sidx == end_qrs or abs(ecg_denoised[end_qrs] - ecg_denoised[sidx]) < 0.05: 155 | sidx = -1 156 | else: # check for qidx 157 | qidx = arr.min_idx(ecg_denoised, start_qrs, ridx + 1) 158 | if qidx == ridx or qidx == start_qrs or abs(ecg_denoised[start_qrs] - ecg_denoised[qidx]) < 0.05: 159 | qidx = -1 160 | elif sidx != -1: # only sidx --> Find small r if only sidx detected in rsidx large tidx lead 161 | ridx = arr.max_idx(ecg_denoised, start_qrs, sidx + 1) 162 | if ridx == sidx or ridx == start_qrs or abs(ecg_denoised[start_qrs] - ecg_denoised[ridx]) < 0.05: 163 | ridx = -1 164 | elif ridx != -1: # only ridx --> Find small q,s 165 | qidx = arr.min_idx(ecg_denoised, start_qrs, ridx + 1) 166 | if qidx == ridx or qidx == start_qrs or abs(ecg_denoised[start_qrs] - ecg_denoised[qidx]) < 0.05: 167 | qidx = -1 168 | sidx = arr.min_idx(ecg_denoised, ridx, end_qrs + 1) 169 | if sidx == ridx or sidx == end_qrs or abs(ecg_denoised[end_qrs] - ecg_denoised[sidx]) < 0.05: 170 | sidx = -1 171 | else: 172 | vpclist.append(start_qrs) 173 | continue 174 | 175 | o = {'q': qslist[n*2], 's': qslist[n*2+1]} # always exists 176 | 177 | if qidx != -1: 178 | o['q'] = qidx 179 | if ridx != -1: 180 | o['r'] = ridx 181 | if sidx != -1: 182 | o['s'] = sidx 183 | 184 | complist.append(o) 185 | 186 | # for each QRS --> find tidx and pidx wave 187 | for n in range(len(complist) - 1): 188 | pree = complist[n]['q'] 189 | nows = complist[n]['s'] 190 | nowe = complist[n+1]['q'] 191 | size = nowe - nows # s-q interval 192 | size = int(min(size, srate * max_qt - (nows - pree))) 193 | 194 | rr = (nowe - pree) / srate 195 | if (60.0 / rr < min_hr) or (60.0 / rr > max_hr - 20): 196 | continue 197 | 198 | # all are in this 199 | block = [data[nows + i] for i in range(size)] 200 | 201 | ecg_qrs = cwt(block, srate, 'gaus1', tfreq) 202 | tidx1 = arr.min_idx(ecg_qrs) + nows 203 | tidx2 = arr.max_idx(ecg_qrs) + nows 204 | if tidx1 > tidx2: 205 | tidx1, tidx2 = tidx2, tidx1 206 | 207 | # additional constraints on [tidx1 tidx tidx2] duration, symmetry, QT interval 208 | ist = False 209 | if ecg_qrs[tidx1-nows] < 0 < ecg_qrs[tidx2-nows]: 210 | ist = True 211 | elif ecg_qrs[tidx1-nows] > 0 > ecg_qrs[tidx2-nows]: 212 | ist = True 213 | 214 | if ist: 215 | if (tidx2 - tidx1) >= 0.09 * srate: # and (tidx2-tidx1)<=0.24 * srate) #check for tidx wave duration 216 | ist = True # QT interval = .4 * sqrt(RR) 217 | if min_qt * srate <= (tidx2 - pree) <= max_qt * srate: 218 | ist = True 219 | else: 220 | ist = False 221 | else: 222 | ist = False 223 | 224 | if ist: 225 | tidx = 0 # zero crossing 226 | sign = (ecg_qrs[tidx1-nows] >= 0) 227 | for i in range(tidx1 - nows, tidx2 - nows): 228 | if sign == (ecg_qrs[i] >= 0): 229 | continue 230 | tidx = i + nows 231 | break 232 | 233 | # check for tidx wave symetry 234 | if tidx2 - tidx < tidx - tidx1: 235 | ratio = (tidx2 - tidx) / (tidx - tidx1) 236 | else: 237 | ratio = (tidx - tidx1) / (tidx2 - tidx) 238 | if ratio < 0.4: 239 | ist = False 240 | 241 | if ist: 242 | tmin = arr.min_idx(data, tidx1, tidx2) 243 | tmax = arr.max_idx(data, tidx1, tidx2) 244 | # find the most nearest values from 0-cross, tmin, tmax 245 | tidx = arr.find_nearest((tidx, tmin, tmax), (tidx2 + tidx1) / 2) 246 | complist[n]['(t'] = tidx1 247 | complist[n]['t'] = tidx 248 | complist[n]['t)'] = tidx2 249 | 250 | # search for P-WAVE 251 | size = nowe - nows # s-q interval 252 | size = int(min(size, srate * max_pq)) 253 | 254 | if ist: 255 | if tidx2 > nowe - size - int(0.04 * srate): # isp wnd far from Twave at least on 0.04 sec 256 | size -= tidx2 - (nowe - size - int(0.04 * srate)) 257 | 258 | nskip = (nowe - nows) - size 259 | 260 | if size <= 0.03 * srate: 261 | continue # impresize QRS begin detection 262 | 263 | block = [data[nows + nskip + i] for i in range(size)] 264 | 265 | ecg_qrs = cwt(block, srate, 'gaus1', pfreq) 266 | p1 = arr.min_idx(ecg_qrs) + nows + nskip 267 | p2 = arr.max_idx(ecg_qrs) + nows + nskip 268 | if p1 > p2: 269 | p1, p2 = p2, p1 270 | 271 | # additional constraints on [p1 pidx p2] duration, symmetry, PQ interval 272 | isp = False 273 | if ecg_qrs[p1-nows-nskip] < 0 < ecg_qrs[p2-nows-nskip]: 274 | isp = True 275 | elif ecg_qrs[p1-nows-nskip] > 0 > ecg_qrs[p2-nows-nskip]: 276 | isp = True 277 | 278 | if isp: 279 | if 0.03 * srate <= (p2 - p1) <= 0.15 * srate: # check for pidx wave duration 9Hz0.03 5Hz0.05 280 | isp = (min_pq * srate <= (nowe - p1) <= max_pq * srate) # PQ interval = [0.07 - 0.12,0.20] 281 | else: 282 | isp = False 283 | 284 | if not isp: 285 | continue 286 | 287 | pidx = 0 # zero crossing 288 | sign = (ecg_qrs[p1-nows-nskip] >= 0) 289 | for i in range(p1 - nows - nskip, p2 - nows - nskip): 290 | if sign == (ecg_qrs[i] >= 0): 291 | continue 292 | pidx = i + nows + nskip 293 | break 294 | 295 | # check for pidx wave symetry 296 | if p2 - pidx < pidx - p1: 297 | ratio = (p2 - pidx) / (pidx - p1) 298 | else: 299 | ratio = (pidx - p1) / (p2 - pidx) 300 | 301 | if ratio < 0.4: 302 | isp = False # not a p wave 303 | if isp: 304 | complist[n]['(p'] = p1 305 | complist[n]['p'] = pidx 306 | complist[n]['p)'] = p2 307 | 308 | # add annotation 309 | ret_ann = [] 310 | for n in range(len(complist)): 311 | for k, v in complist[n].items(): 312 | if k == 'q' and abs(ecg_denoised[v]) > 0.5: 313 | k = 'Q' 314 | elif k == 'r' and abs(ecg_denoised[v]) > 0.5: 315 | k = 'R' 316 | elif k == 's' and abs(ecg_denoised[v]) > 0.5: 317 | k = 'S' 318 | elif k == '(t': 319 | k = '(T' 320 | elif k == 't': 321 | k = 'T' 322 | elif k == 't)': 323 | k = 'T)' 324 | elif k == '(p': 325 | k = '(P' 326 | elif k == 'p': 327 | k = 'P' 328 | elif k == 'p)': 329 | k = 'P)' 330 | 331 | ret_ann.append({"dt": v / srate, "val": k}) 332 | 333 | for n in range(len(vpclist)): 334 | ret_ann.append({"dt": vpclist[n] / srate, "val": 'A'}) 335 | 336 | return [ret_ann] 337 | -------------------------------------------------------------------------------- /pyvital/filters/sv_dlapco.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pyvital.arr as arr 4 | import numpy as np 5 | import torch 6 | import torch.nn as nn 7 | from collections import OrderedDict 8 | 9 | class InceptionModule(nn.Module): 10 | 11 | def __init__(self, in_channel, nfilter=32): 12 | super(InceptionModule, self).__init__() 13 | # implement same padding with (kernel_size//2) for pytorch 14 | # Ref: https://discuss.pytorch.org/t/convolution-1d-and-simple-function/11606 15 | 16 | # 1x1 conv path 17 | self.path0 = nn.Sequential( 18 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 19 | nn.BatchNorm1d(nfilter), 20 | nn.ReLU(True) 21 | ) 22 | 23 | # 1X1 conv -> 3x3 conv path 24 | self.path1 = nn.Sequential( 25 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 26 | nn.BatchNorm1d(nfilter), 27 | nn.ReLU(True), 28 | nn.Conv1d(nfilter, nfilter, kernel_size=3, padding=(3 // 2)), 29 | nn.BatchNorm1d(nfilter), 30 | nn.ReLU(True) 31 | ) 32 | 33 | # 1x1 conv -> 5x5 conv path 34 | self.path2 = nn.Sequential( 35 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 36 | nn.BatchNorm1d(nfilter), 37 | nn.ReLU(True), 38 | nn.Conv1d(nfilter, nfilter, kernel_size=5, padding=(5 // 2)), 39 | nn.BatchNorm1d(nfilter), 40 | nn.ReLU(True) 41 | ) 42 | 43 | # 3x3 pool -> 1x1 conv path 44 | self.path3 = nn.Sequential( 45 | nn.MaxPool1d(3, stride=1, padding=1), 46 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 47 | nn.BatchNorm1d(nfilter), 48 | nn.ReLU(True) 49 | ) 50 | 51 | 52 | def forward(self, x): 53 | print('x shape: {}'.format(x.shape)) 54 | y0 = self.path0(x) 55 | y1 = self.path1(x) 56 | y2 = self.path2(x) 57 | y3 = self.path3(x) 58 | print('y0 shape: {}'.format(y0.shape)) 59 | print('y1 shape: {}'.format(y1.shape)) 60 | print('y2 shape: {}'.format(y2.shape)) 61 | print('y3 shape: {}'.format(y3.shape)) 62 | print('cat shape: {}'.format(torch.cat([y0, y1, y2, y3], 1).shape)) 63 | 64 | return torch.cat([y0, y1, y2, y3], 1) 65 | 66 | 67 | class InceptionModule_dilated(nn.Module): 68 | 69 | def __init__(self, in_channel, nfilter=32): 70 | super(InceptionModule_dilated, self).__init__() 71 | # implement same padding with (kernel_size//2) for pytorch 72 | # Ref: https://discuss.pytorch.org/t/convolution-1d-and-simple-function/11606 73 | 74 | # 1x1 conv path 75 | self.path0 = nn.Sequential( 76 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 77 | nn.BatchNorm1d(nfilter), 78 | nn.ReLU(True) 79 | ) 80 | 81 | # 1X1 conv -> 3x3 conv path 82 | self.path1 = nn.Sequential( 83 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 84 | nn.BatchNorm1d(nfilter), 85 | nn.ReLU(True), 86 | nn.Conv1d(nfilter, nfilter, kernel_size=3, padding=int(((3-1)*5)/2), 87 | dilation=5), 88 | nn.BatchNorm1d(nfilter), 89 | nn.ReLU(True) 90 | ) 91 | 92 | # 1x1 conv -> 5x5 conv path 93 | self.path2 = nn.Sequential( 94 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 95 | nn.BatchNorm1d(nfilter), 96 | nn.ReLU(True), 97 | nn.Conv1d(nfilter, nfilter, kernel_size=5, padding=int(((5-1)*7)/2), 98 | dilation=7), 99 | nn.BatchNorm1d(nfilter), 100 | nn.ReLU(True) 101 | ) 102 | 103 | # 3x3 pool -> 1x1 conv path 104 | self.path3 = nn.Sequential( 105 | nn.MaxPool1d(3, stride=1, padding=1), 106 | nn.Conv1d(in_channel, nfilter, kernel_size=1), 107 | nn.BatchNorm1d(nfilter), 108 | nn.ReLU(True) 109 | ) 110 | 111 | # Dilation output size calculation 112 | # o = output 113 | # p = padding 114 | # k = kernel_size 115 | # s = stride 116 | # d = dilation 117 | # o = [i + 2 * p - k - (k - 1) * (d - 1)] / s + 1 118 | 119 | # padding = ((s-1)*i + (k-1)*d)/2 120 | 121 | def forward(self, x): 122 | y0 = self.path0(x) 123 | y1 = self.path1(x) 124 | y2 = self.path2(x) 125 | y3 = self.path3(x) 126 | 127 | # print(y0.shape) 128 | # print(y1.shape) 129 | # print(y2.shape) 130 | # print(y3.shape) 131 | return torch.cat([y0, y1, y2, y3], 1) 132 | 133 | 134 | class SpatialNL(nn.Module): 135 | """Spatial NL block for image classification. 136 | [https://github.com/facebookresearch/video-nonlocal-net]. 137 | revised to 1d 138 | """ 139 | def __init__(self, inplanes, planes, use_scale=False): 140 | self.use_scale = use_scale 141 | 142 | super(SpatialNL, self).__init__() 143 | self.t = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias=False) 144 | self.p = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias=False) 145 | self.g = nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias=False) 146 | self.softmax = nn.Softmax(dim=1) 147 | self.z = nn.Conv1d(planes, inplanes, kernel_size=1, stride=1, bias=False) 148 | self.bn = nn.BatchNorm1d(inplanes) 149 | 150 | def forward(self, x): 151 | residual = x 152 | 153 | t = self.t(x) 154 | p = self.p(x) 155 | g = self.g(x) 156 | 157 | b, c, d = t.size() 158 | 159 | t = t.view(b, c, -1).permute(0, 2, 1) 160 | p = p.view(b, c, -1) 161 | g = g.view(b, c, -1).permute(0, 2, 1) 162 | 163 | att = torch.bmm(t, p) 164 | 165 | if self.use_scale: 166 | att = att.div(c**0.5) 167 | 168 | att = self.softmax(att) 169 | x = torch.bmm(att, g) 170 | 171 | x = x.permute(0, 2, 1) 172 | x = x.contiguous() 173 | x = x.view(b, c, d) 174 | 175 | x = self.z(x) 176 | x = self.bn(x) + residual 177 | 178 | return x 179 | 180 | class Inception1DNet(nn.Module): 181 | 182 | def __init__(self, nlayer, nfilter=32): 183 | super(Inception1DNet, self).__init__() 184 | 185 | self.fnn = nn.Sequential( 186 | nn.Linear(4, 256), 187 | nn.ReLU(True) 188 | ) 189 | 190 | self.stem = nn.Sequential( 191 | nn.Conv1d(1, nfilter, kernel_size=3, padding=(3 // 2)), 192 | nn.BatchNorm1d(nfilter), 193 | nn.Conv1d(nfilter, nfilter * 4, kernel_size=3, padding=(3 // 2)), 194 | ) 195 | 196 | dynamicInception = OrderedDict() 197 | i = 0 198 | j = 0 199 | while (i < nlayer): 200 | dynamicInception[str(j)] = InceptionModule(nfilter * 4, nfilter) 201 | j += 1 202 | if i % 2 == 0: 203 | dynamicInception[str(j)] = nn.AvgPool1d(2) 204 | j += 1 205 | i += 1 206 | 207 | self.Inception = nn.Sequential(dynamicInception) 208 | 209 | self.GAP = nn.AdaptiveAvgPool1d(1) 210 | 211 | self.regressor = nn.Sequential( 212 | nn.Linear(512, 256), 213 | nn.ReLU(True), 214 | nn.Linear(256, 256), 215 | nn.ReLU(True), 216 | nn.Linear(256, 1) 217 | ) 218 | 219 | def forward(self, x): 220 | """ 221 | x must be shape of 2dim list, 222 | 1st is : aswh 223 | 2nd is : nsec signal 224 | """ 225 | out_aswh = self.fnn(x[0]) 226 | 227 | out_cnn = self.stem(x[1]) 228 | out_cnn = self.Inception(out_cnn) 229 | out_cnn = self.GAP(out_cnn) 230 | out_cnn = torch.squeeze(out_cnn, 2) 231 | concat = torch.cat([out_aswh, out_cnn], 1) 232 | 233 | out = self.regressor(concat) 234 | 235 | return out 236 | 237 | 238 | class Inception1DNet_NL_compact_dilated(nn.Module): 239 | 240 | def __init__(self, nlayer, nfilter=32): 241 | super(Inception1DNet_NL_compact_dilated, self).__init__() 242 | 243 | self.fnn = nn.Sequential( 244 | nn.Linear(4, 4), 245 | nn.ReLU(True) 246 | ) 247 | 248 | self.stem = nn.Sequential( 249 | nn.Conv1d(1, nfilter, kernel_size=3, padding=(3 // 2)), 250 | nn.BatchNorm1d(nfilter), 251 | nn.Conv1d(nfilter, nfilter * 4, kernel_size=3, padding=(3 // 2)), 252 | nn.BatchNorm1d(nfilter * 4), 253 | nn.Dropout(0.5) 254 | ) 255 | 256 | dynamicInception = OrderedDict() 257 | i = 0 258 | j = 0 259 | k = 0 260 | while (i < nlayer): 261 | if i > (nlayer - 3): 262 | dynamicInception[str(j)] = SpatialNL(nfilter * 4, nfilter * 4) 263 | j += 1 264 | dynamicInception[str(j)] = InceptionModule_dilated(nfilter * 4, nfilter) 265 | j += 1 266 | if i % 2 == 0: 267 | dynamicInception[str(j)] = nn.AvgPool1d(2) 268 | j += 1 269 | i += 1 270 | 271 | self.Inception = nn.Sequential(dynamicInception) 272 | 273 | self.GAP = nn.AdaptiveAvgPool1d(1) 274 | 275 | self.regressor = nn.Sequential( 276 | nn.Linear((nfilter * 4) + 4, 64), 277 | nn.ReLU(True), 278 | nn.BatchNorm1d(64), 279 | nn.Dropout(0.5), 280 | nn.Linear(64, 1) 281 | ) 282 | 283 | def forward(self, x): 284 | """ 285 | x must be shape of 2dim list, 286 | 1st is : aswh 287 | 2nd is : nsec signal 288 | """ 289 | out_aswh = self.fnn(x[0]) 290 | 291 | out_cnn = self.stem(x[1]) 292 | out_cnn = self.Inception(out_cnn) 293 | out_cnn = self.GAP(out_cnn) 294 | out_cnn = torch.squeeze(out_cnn, 2) 295 | concat = torch.cat([out_aswh, out_cnn], 1) 296 | 297 | out = self.regressor(concat) 298 | 299 | return out 300 | 301 | 302 | class Inception1DNet_NL_compact_dilated_no_ashw(nn.Module): 303 | 304 | def __init__(self, nlayer, nfilter=32): 305 | super(Inception1DNet_NL_compact_dilated_no_ashw, self).__init__() 306 | 307 | # remove ashw 308 | # self.fnn = nn.Sequential( 309 | # nn.Linear(4, 4), 310 | # nn.ReLU(True) 311 | # ) 312 | 313 | self.stem = nn.Sequential( 314 | nn.Conv1d(1, nfilter, kernel_size=3, padding=(3 // 2)), 315 | nn.BatchNorm1d(nfilter), 316 | nn.Conv1d(nfilter, nfilter * 4, kernel_size=3, padding=(3 // 2)), 317 | nn.BatchNorm1d(nfilter * 4), 318 | nn.Dropout(0.5) 319 | ) 320 | 321 | dynamicInception = OrderedDict() 322 | i = 0 323 | j = 0 324 | k = 0 325 | while (i < nlayer): 326 | if i > (nlayer - 3): 327 | dynamicInception[str(j)] = SpatialNL(nfilter * 4, nfilter * 4) 328 | j += 1 329 | dynamicInception[str(j)] = InceptionModule_dilated(nfilter * 4, nfilter) 330 | j += 1 331 | if i % 2 == 0: 332 | dynamicInception[str(j)] = nn.AvgPool1d(2) 333 | j += 1 334 | i += 1 335 | 336 | self.Inception = nn.Sequential(dynamicInception) 337 | 338 | self.GAP = nn.AdaptiveAvgPool1d(1) 339 | 340 | self.regressor = nn.Sequential( 341 | # for noashw 342 | nn.Linear(nfilter*4, 64), 343 | nn.ReLU(True), 344 | nn.BatchNorm1d(64), 345 | nn.Dropout(0.5), 346 | nn.Linear(64, 1) 347 | ) 348 | 349 | def forward(self, x): 350 | """ 351 | x must be shape of 2dim list, 352 | 1st is : aswh 353 | 2nd is : nsec signal 354 | """ 355 | out_cnn = self.stem(x[1]) 356 | out_cnn = self.Inception(out_cnn) 357 | out_cnn = self.GAP(out_cnn) 358 | out_cnn = torch.squeeze(out_cnn, 2) 359 | 360 | out = self.regressor(out_cnn) 361 | 362 | return out 363 | 364 | model = None 365 | 366 | cfg = { 367 | 'name': 'Stroke Volume', 368 | 'group': 'Medical algorithms', 369 | 'desc': 'Calculate stroke volume from arterial blood pressure using deep learning', 370 | 'reference': 'DLAPCO', 371 | 'overlap': 10, 372 | 'interval': 20, 373 | 'inputs': [{'name': 'ART', 'type': 'wav'}], 374 | 'outputs': [ 375 | {'name': 'SV', 'type': 'num', 'min': 0, 'max': 200, 'unit': 'mL'}, 376 | {'name': 'CO', 'type': 'num', 'min': 0, 'max': 10, 'unit': 'L'}, 377 | ] 378 | } 379 | 380 | def run(inp, opt, cfg): 381 | """ 382 | calculate SV from DeepLearningAPCO 383 | :param inp: input waves 384 | inp['ART']['vals'] must be 1-dimensional, (#,) 385 | :param opt: demographic information 386 | :param cfg: 387 | :return: SV 388 | """ 389 | global model 390 | trk_name = [k for k in inp][0] 391 | 392 | if 'srate' not in inp[trk_name]: 393 | return 394 | 395 | signal_data = arr.interp_undefined(inp[trk_name]['vals']) 396 | srate = inp[trk_name]['srate'] 397 | 398 | signal_data = arr.resample_hz(signal_data, srate, 100) 399 | srate = 100 400 | 401 | # Whole heart freq estimation 402 | hr = arr.estimate_heart_freq(signal_data, srate) * 60 403 | 404 | signal_data = np.array(signal_data) / 100. 405 | 406 | if len(np.squeeze(signal_data)) < 20 * srate: 407 | return 408 | 409 | #age_data = opt['age'] 410 | #sex_data = opt['sex'] 411 | #wt_data = opt['weight'] 412 | #ht_data = opt['height'] 413 | age_data = 60 414 | sex_data = 1. 415 | wt_data = 65.8 416 | ht_data = 164.9 417 | 418 | if all (k in opt for k in ('age','sex','weight','height')) : 419 | age_data = int(opt['age']) 420 | sex_data = int(opt['sex']) 421 | wt_data = int(opt['weight']) 422 | ht_data = int(opt['height']) 423 | 424 | #print(age_data, sex_data, wt_data, ht_data) 425 | 426 | if isinstance(sex_data, str): 427 | if sex_data == 'M': 428 | sex_data = int(1) 429 | elif sex_data == 'F': 430 | sex_data = int(0) 431 | else: 432 | raise ValueError('opt_sex must be "M" or "F". current value: {}'.format(sex_data)) 433 | 434 | else: 435 | if not ((int(sex_data) == 1) or (int(sex_data) == 0)): 436 | raise ValueError('opt_sex must be 1 or 0 current value: {}'.format(str(sex_data))) 437 | 438 | ashw_data = np.array([age_data, sex_data, wt_data, ht_data]) 439 | 440 | x_input = [torch.Tensor(np.expand_dims(ashw_data, axis=0)), torch.Tensor(np.expand_dims(signal_data, axis=(0, 1)))] 441 | 442 | if model is None: 443 | model = Inception1DNet_NL_compact_dilated(nlayer=15, nfilter=32) 444 | model.load_state_dict(torch.load(f'{os.path.dirname(__file__)}/model_dlapco_v1.pth')) 445 | model = model.cpu() 446 | model.eval() 447 | 448 | output = model(x_input) 449 | sv = float(output.detach().numpy()) 450 | 451 | return [ 452 | [{'dt': cfg['interval'], 'val': sv}], 453 | [{'dt': cfg['interval'], 'val': sv * hr / 1000}], 454 | ] 455 | 456 | if __name__ == '__main__': 457 | import vitaldb 458 | vf = vitaldb.VitalFile(1, 'ART') 459 | vf.run_filter(run, cfg) 460 | vf.to_vital(f'filtered.vital') -------------------------------------------------------------------------------- /pyvital/filters/ecg_beat_noise_detector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import vitaldb 3 | import os 4 | import torch 5 | import pyvital.arr 6 | import torch.nn.functional as F 7 | import torch.nn as nn 8 | from scipy.signal import butter, filtfilt, iirnotch, find_peaks 9 | import warnings 10 | warnings.filterwarnings('ignore') 11 | 12 | # ================================================================ 13 | # 1. 모델 및 유틸리티 함수 정의 14 | # ================================================================ 15 | 16 | class UniMSNet(nn.Module): 17 | def __init__(self, input_length=256, num_classes=5): 18 | super().__init__() 19 | self.small_conv = nn.Conv1d(1, 16, kernel_size=5, stride=1, padding=2) 20 | self.medium_conv = nn.Conv1d(1, 8, kernel_size=15, stride=1, padding=7) 21 | self.large_conv = nn.Conv1d(1, 8, kernel_size=31, stride=1, padding=15) 22 | combined_channels = 32 23 | self.conv11 = nn.Conv1d(combined_channels, combined_channels, kernel_size=5, stride=1, padding='same') 24 | self.conv12 = nn.Conv1d(combined_channels, combined_channels, kernel_size=5, stride=1, padding='same') 25 | self.pool1 = nn.MaxPool1d(kernel_size=5, stride=2) 26 | self.conv21 = nn.Conv1d(combined_channels, 48, kernel_size=5, stride=1, padding='same') 27 | self.conv22 = nn.Conv1d(48, 48, kernel_size=5, stride=1, padding='same') 28 | self.shortcut2 = nn.Conv1d(combined_channels, 48, kernel_size=1) 29 | self.pool2 = nn.MaxPool1d(kernel_size=5, stride=2) 30 | self.conv31 = nn.Conv1d(48, 64, kernel_size=5, stride=1, padding='same') 31 | self.conv32 = nn.Conv1d(64, 64, kernel_size=5, stride=1, padding='same') 32 | self.shortcut3 = nn.Conv1d(48, 64, kernel_size=1) 33 | self.pool3 = nn.MaxPool1d(kernel_size=5, stride=2) 34 | self.conv41 = nn.Conv1d(64, 96, kernel_size=3, stride=1, padding='same') 35 | self.conv42 = nn.Conv1d(96, 96, kernel_size=3, stride=1, padding='same') 36 | self.shortcut4 = nn.Conv1d(64, 96, kernel_size=1) 37 | self.pool4 = nn.MaxPool1d(kernel_size=5, stride=2) 38 | self.global_avg_pool = nn.AdaptiveAvgPool1d(1) 39 | self.fc1 = nn.Linear(96, 64) 40 | self.dropout = nn.Dropout(0.3) 41 | self.fc2 = nn.Linear(64, num_classes) 42 | 43 | def forward(self, x): 44 | if x.dim() == 2: 45 | x = x.unsqueeze(1) 46 | elif x.dim() == 3 and x.size(1) == 2: 47 | x = x[:, 0, :].unsqueeze(1) 48 | 49 | x_small = F.relu(self.small_conv(x)) 50 | x_medium = F.relu(self.medium_conv(x)) 51 | x_large = F.relu(self.large_conv(x)) 52 | 53 | x = torch.cat([x_small, x_medium, x_large], dim=1) 54 | 55 | identity = x 56 | x = F.relu(self.conv11(x)) 57 | x = self.conv12(x) 58 | x += identity 59 | x = F.relu(x) 60 | x = self.pool1(x) 61 | 62 | identity = self.shortcut2(x) 63 | x = F.relu(self.conv21(x)) 64 | x = self.conv22(x) 65 | x += identity 66 | x = F.relu(x) 67 | x = self.pool2(x) 68 | 69 | identity = self.shortcut3(x) 70 | x = F.relu(self.conv31(x)) 71 | x = self.conv32(x) 72 | x += identity 73 | x = F.relu(x) 74 | x = self.pool3(x) 75 | 76 | identity = self.shortcut4(x) 77 | x = F.relu(self.conv41(x)) 78 | x = self.conv42(x) 79 | x += identity 80 | x = F.relu(x) 81 | x = self.pool4(x) 82 | 83 | x = self.global_avg_pool(x) 84 | x = x.view(x.size(0), -1) 85 | 86 | x = F.relu(self.fc1(x)) 87 | x = self.dropout(x) 88 | x = self.fc2(x) 89 | return x 90 | 91 | def filter_signal(signal, fs=100, cutoff=30, notch_freq=50, q=30, order=4): 92 | """신호 필터링 함수""" 93 | nyq = 0.5 * fs 94 | if notch_freq > 0 and notch_freq < nyq: 95 | b_notch, a_notch = iirnotch(notch_freq, q, fs) 96 | signal = filtfilt(b_notch, a_notch, signal) 97 | 98 | normal_cutoff = cutoff / nyq 99 | b, a = butter(order, normal_cutoff, btype='low', analog=False) 100 | return filtfilt(b, a, signal) 101 | 102 | def process_peaks(segment_data, srate=100): 103 | """세그먼트 내 R-peak 검출 함수""" 104 | max_amp = np.max(np.abs(segment_data)) 105 | scipy_peaks, _ = find_peaks( 106 | segment_data, 107 | distance=int(0.15 * srate), 108 | ) 109 | return scipy_peaks 110 | 111 | # ================================================================ 112 | # 2. 노이즈 탐지 규칙 헬퍼 함수 113 | # (ECGAnalyzer 클래스에서 가져와 독립 함수로 수정) 114 | # ================================================================ 115 | 116 | def merge_intervals(intervals): 117 | if not intervals: return [] 118 | intervals.sort(key=lambda x: x[0]) 119 | merged = [intervals[0]] 120 | for current in intervals[1:]: 121 | last = merged[-1] 122 | if current[0] <= last[1]: 123 | last[1] = max(last[1], current[1]) 124 | else: 125 | merged.append(current) 126 | return merged 127 | 128 | def calculate_noise_score(segment_rr, freq_threshold=0.3): 129 | if len(segment_rr) < 2 or np.ptp(segment_rr) < 1e-8: return 0.0 130 | norm_seg = (segment_rr - np.min(segment_rr)) / np.ptp(segment_rr) 131 | grad = np.diff(norm_seg) 132 | if len(grad) == 0: return 0.0 133 | magnitude_score = np.max(np.abs(grad)) 134 | frequency_score = np.sum(np.abs(grad) > freq_threshold) 135 | return magnitude_score * (frequency_score + 1) 136 | 137 | def define_noise_from_u_beat(ecg_chunk, r_peaks, predictions, class_map, srate): 138 | rr_scores = [] 139 | for i in range(len(r_peaks) - 1): 140 | start, end = r_peaks[i], r_peaks[i+1] 141 | score = calculate_noise_score(ecg_chunk[start:end]) 142 | rr_scores.append({'start_peak': start, 'end_peak': end, 'score': score}) 143 | 144 | n_code, u_code = class_map['N'], class_map['U'] 145 | n_n_scores = [item['score'] for item in rr_scores if predictions.get(item['start_peak']) == n_code and predictions.get(item['end_peak']) == n_code] 146 | if not n_n_scores: return [] 147 | 148 | noise_threshold = np.mean(n_n_scores) * 1.5 149 | noisy_u_peaks = set() 150 | for item in rr_scores: 151 | start_pred, end_pred = predictions.get(item['start_peak']), predictions.get(item['end_peak']) 152 | if u_code in [start_pred, end_pred] and item['score'] > noise_threshold: 153 | if start_pred == u_code: noisy_u_peaks.add(item['start_peak']) 154 | if end_pred == u_code: noisy_u_peaks.add(item['end_peak']) 155 | 156 | return [[max(0, p - srate), min(len(ecg_chunk), p + srate)] for p in noisy_u_peaks] 157 | 158 | def detect_isoelectric(ecg_chunk, srate, threshold_mv=0.05, min_dur_sec=0.5): 159 | min_len = int(min_dur_sec * srate) 160 | padded_sections = [] 161 | is_flat = np.zeros(len(ecg_chunk), dtype=bool) 162 | for i in range(len(ecg_chunk) - min_len): 163 | window = ecg_chunk[i : i + min_len] 164 | if (np.max(window) - np.min(window)) < threshold_mv: 165 | is_flat[i:i+min_len] = True 166 | 167 | in_section, start_idx = False, 0 168 | for i, flat in enumerate(is_flat): 169 | if flat and not in_section: 170 | in_section, start_idx = True, i 171 | elif not flat and in_section: 172 | in_section = False 173 | if (i - start_idx) >= min_len: 174 | padded_sections.append([max(0, start_idx - srate), min(len(ecg_chunk), i + srate)]) 175 | if in_section and (len(ecg_chunk) - start_idx) >= min_len: 176 | padded_sections.append([max(0, start_idx - srate), len(ecg_chunk)]) 177 | return padded_sections 178 | 179 | def detect_irregular_u(ecg_chunk, predictions, class_map, srate, std_thresh=10, min_seq=2): 180 | u_code, p_code, s_code = class_map['U'], class_map['P'], class_map['S'] 181 | noise_sections = [] 182 | 183 | u_sequences, current_sequence = [], [] 184 | for peak in sorted(predictions.keys()): 185 | pred = predictions.get(peak) 186 | if pred == u_code: current_sequence.append(peak) 187 | elif pred != p_code: 188 | if len(current_sequence) >= min_seq: u_sequences.append(current_sequence) 189 | current_sequence = [] 190 | if len(current_sequence) >= min_seq: u_sequences.append(current_sequence) 191 | 192 | for seq in u_sequences: 193 | if len(seq) > 1 and np.std(np.diff(seq)) > std_thresh: 194 | start_peak, end_peak = seq[0], seq[-1] 195 | padded_start = max(0, start_peak - 2 * srate) 196 | padded_end = min(len(ecg_chunk), end_peak + 2 * srate) 197 | 198 | s_beat_present = any(pred == s_code and padded_start <= p <= padded_end for p, pred in predictions.items()) 199 | if not s_beat_present: 200 | noise_sections.append([padded_start, padded_end]) 201 | return noise_sections 202 | 203 | def detect_steep_slope(ecg_chunk, predictions, srate, slope_thresh_mv=0.5, pad_sec=0.5, peak_exclude_sec=0.15): 204 | analysis_mask = np.ones(len(ecg_chunk), dtype=bool) 205 | exclude_samples = int(peak_exclude_sec * srate) 206 | for peak in predictions.keys(): 207 | start = max(0, peak - exclude_samples) 208 | end = min(len(ecg_chunk), peak + exclude_samples) 209 | analysis_mask[start:end] = False 210 | 211 | gradient = np.diff(ecg_chunk) 212 | steep_indices = np.where(np.abs(gradient) > slope_thresh_mv)[0] 213 | valid_steep_points = [i for i in steep_indices if analysis_mask[i]] 214 | if not valid_steep_points: return [] 215 | 216 | pad_samples = int(pad_sec * srate) 217 | intervals = [[max(0, p - pad_samples), min(len(ecg_chunk), p + pad_samples)] for p in set(valid_steep_points)] 218 | return intervals 219 | 220 | def detect_consecutive_p(ecg_chunk, predictions, class_map, srate, min_p_seq=4, pad_sec=1.0): 221 | n_count = list(predictions.values()).count(class_map['N']) 222 | s_count = list(predictions.values()).count(class_map['S']) 223 | if s_count > n_count: return [] 224 | 225 | noise_sections, current_p_seq = [], [] 226 | p_code = class_map['P'] 227 | for peak in sorted(predictions.keys()): 228 | if predictions.get(peak) == p_code: 229 | current_p_seq.append(peak) 230 | else: 231 | if len(current_p_seq) >= min_p_seq: 232 | pad_samples = int(pad_sec * srate) 233 | start = max(0, current_p_seq[0] - pad_samples) 234 | end = min(len(ecg_chunk), current_p_seq[-1] + pad_samples) 235 | noise_sections.append([start, end]) 236 | current_p_seq = [] 237 | if len(current_p_seq) >= min_p_seq: 238 | pad_samples = int(pad_sec * srate) 239 | start = max(0, current_p_seq[0] - pad_samples) 240 | end = min(len(ecg_chunk), current_p_seq[-1] + pad_samples) 241 | noise_sections.append([start, end]) 242 | return noise_sections 243 | 244 | def detect_amp_variation(ecg_chunk, predictions, class_map, srate, ratio=10.0, baseline_hz=0.5): 245 | nyq = 0.5 * srate 246 | b, a = butter(1, baseline_hz / nyq, btype='high', analog=False) 247 | corrected_ecg = filtfilt(b, a, ecg_chunk) 248 | 249 | r_codes = {class_map['N'], class_map['S'], class_map['V']} 250 | heights = [corrected_ecg[p] for p, pred in predictions.items() if pred in r_codes] 251 | if len(heights) < 2: return [] 252 | 253 | min_h, max_h = np.min(heights), np.max(heights) 254 | if min_h > 1e-6 and (max_h / min_h) > ratio: 255 | return [[0, len(ecg_chunk)]] 256 | return [] 257 | 258 | 259 | # ================================================================ 260 | # 3. 메인 실행 함수 (run) 261 | # ================================================================ 262 | 263 | # 모델을 한 번만 로드하기 위한 전역 변수 264 | model_beat = None 265 | 266 | def run(inp, opt, cfg): 267 | global model_beat 268 | 269 | # --- 초기 설정 --- 270 | trk_name = list(inp.keys())[0] 271 | if 'srate' not in inp[trk_name]: 272 | return [[]] 273 | 274 | data = inp[trk_name]['vals'] 275 | srate_orig = inp[trk_name]['srate'] 276 | srate = 100 277 | if srate_orig != srate: 278 | data = pyvital.arr.resample_hz(data, srate_orig, srate) 279 | 280 | # --- 모델 로드 --- 281 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 282 | if model_beat is None: 283 | print("Loading Beat Classification Model...") 284 | model_beat = UniMSNet(input_length=200, num_classes=5).to(device) 285 | modelname = './UniMSNet.pth' # 모델 가중치 파일 경로 286 | if not os.path.exists(modelname): 287 | raise FileNotFoundError(f"Model weight file not found: {modelname}") 288 | model_beat.load_state_dict(torch.load(modelname, map_location=device)) 289 | model_beat.eval() 290 | 291 | # --- 1. 비트 분류 (Beat Classification) --- 292 | all_peaks = [] 293 | segment_len = int(10 * srate) 294 | step = int(9 * srate) # 1초 오버랩 295 | for seg_start in range(0, len(data), step): 296 | seg_end = min(seg_start + segment_len, len(data)) 297 | data_segment = data[seg_start:seg_end] 298 | if len(data_segment) < srate * 0.5: continue 299 | 300 | segment_peaks = process_peaks(data_segment, srate) 301 | all_peaks.extend([p + seg_start for p in segment_peaks]) 302 | 303 | peaks = sorted(list(set(all_peaks))) 304 | 305 | beat_signals, valid_peaks = [], [] 306 | for peak in peaks: 307 | beat_start = max(0, peak - srate) 308 | beat_end = min(len(data), peak + srate) 309 | if beat_end - beat_start == srate * 2: 310 | beat_signal = data[beat_start:beat_end] 311 | filtered_beat = filter_signal(beat_signal, fs=srate, cutoff=20) 312 | if np.ptp(filtered_beat) > 1e-8: 313 | normalized_beat = (filtered_beat - np.min(filtered_beat)) / np.ptp(filtered_beat) 314 | beat_signals.append(normalized_beat) 315 | valid_peaks.append(peak) 316 | 317 | out_bstr = [] 318 | predictions_dict = {} 319 | class_names = {0: 'N', 1: 'S', 2: 'V', 3: 'U', 4: 'P'} 320 | 321 | if beat_signals: 322 | beat_tensors = torch.FloatTensor(np.array(beat_signals)).unsqueeze(1).to(device) 323 | with torch.no_grad(): 324 | outputs = model_beat(beat_tensors) 325 | predictions = torch.argmax(outputs, dim=1).cpu().numpy() 326 | 327 | for peak, pred_id in zip(valid_peaks, predictions): 328 | predictions_dict[peak] = pred_id 329 | label = class_names.get(pred_id) 330 | if label and label != 'P': # P-beat는 결과에 저장하지 않음 331 | out_bstr.append({'dt': peak / srate, 'val': label}) 332 | 333 | # --- 2. 노이즈 구간 분석 (Noise Detection) --- 334 | all_noise_intervals = [] 335 | class_map = {v: k for k, v in class_names.items()} 336 | 337 | # 최종 R-peak 정의 (P-beat, Filtered-U 제외) - 노이즈 분석에 사용 338 | # 여기서는 단순화를 위해 모든 non-P peak를 r_peaks로 간주 339 | r_peaks = sorted([p for p, pred in predictions_dict.items() if class_names.get(pred) != 'P']) 340 | 341 | if r_peaks: 342 | # 각 노이즈 규칙 호출 343 | all_noise_intervals.extend(define_noise_from_u_beat(data, r_peaks, predictions_dict, class_map, srate)) 344 | all_noise_intervals.extend(detect_isoelectric(data, srate, threshold_mv=0.05)) 345 | all_noise_intervals.extend(detect_irregular_u(data, predictions_dict, class_map, srate, std_thresh=10)) 346 | all_noise_intervals.extend(detect_steep_slope(data, predictions_dict, srate, slope_thresh_mv=0.9)) 347 | all_noise_intervals.extend(detect_consecutive_p(data, predictions_dict, class_map, srate, min_p_seq=4)) 348 | all_noise_intervals.extend(detect_amp_variation(data, predictions_dict, class_map, srate, ratio=10.0)) 349 | 350 | # 노이즈 구간 병합 및 포맷팅 351 | merged_noise = merge_intervals(all_noise_intervals) 352 | out_noise = [] 353 | for i, (start_sample, end_sample) in enumerate(merged_noise): 354 | out_noise.append({'dt': start_sample / srate, 'val': f'Noise_start{i+1}'}) 355 | out_noise.append({'dt': end_sample / srate, 'val': f'Noise_end{i+1}'}) 356 | 357 | # --- 3. 결과 통합 및 반환 --- 358 | return [out_bstr + out_noise] 359 | 360 | 361 | # ================================================================ 362 | # 4. PyVital 필터 실행 설정 363 | # ================================================================ 364 | 365 | cfg = { 366 | 'name': 'ECG - AI Beat & Noise Detector', 367 | 'group': 'Medical algorithms', 368 | 'desc': 'Detects ECG beat type (N, S, V, U) and noise intervals.', 369 | 'reference': '', 370 | 'overlap': 5, 371 | 'interval': 15, 372 | 'inputs': [{'name': 'ECG', 'type': 'wav'}], 373 | 'outputs': [{'name': 'BEAT_NOISE', 'type': 'str'}] 374 | } 375 | -------------------------------------------------------------------------------- /pyvital/arr.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import numbers 4 | import scipy 5 | from scipy.signal import butter, filtfilt, argrelextrema, find_peaks 6 | 7 | def print_all(data): 8 | """ 9 | print full array 10 | """ 11 | print('[' + ', '.join([str(x) for x in data]) + ']') 12 | 13 | 14 | def corr(a, b): 15 | """ 16 | correlation coefficient 17 | """ 18 | return np.corrcoef(a, b)[0, 1] 19 | 20 | def max_idx(data, idxfrom = 0, idxto=None): 21 | idxfrom = max(0, idxfrom) 22 | if idxto == None: 23 | idxto = len(data) 24 | idxto = min(len(data), idxto) 25 | return idxfrom + np.argmax(data[idxfrom:idxto]) 26 | 27 | 28 | def min_idx(data, idxfrom = 0, idxto=None): 29 | idxfrom = max(0, idxfrom) 30 | if idxto == None: 31 | idxto = len(data) 32 | idxto = min(len(data), idxto) 33 | return idxfrom + np.argmin(data[idxfrom:idxto]) 34 | 35 | 36 | def get_samples(data, srate, idxes): 37 | """ 38 | Gets a sample of the wave with the indexes specified by idxes 39 | returns has a form of [{'dt' :, 'val':}, ...] 40 | """ 41 | return [{"dt": idx / srate, "val": data[idx]} for idx in idxes] 42 | 43 | 44 | def is_num(x): 45 | if not isinstance(x, numbers.Number): 46 | return False 47 | return math.isfinite(x) 48 | 49 | 50 | def exclude_undefined(a): 51 | if not isinstance(a, np.ndarray): 52 | a = np.array(a) 53 | return a[~np.isnan(a)] 54 | 55 | def extend_undefined(a): 56 | # [np.nan, 1, np.nan, 2, 3, np.nan] -> [ 1. 1. nan 2. 3. 3.] 57 | if not isinstance(a, np.ndarray): 58 | a = np.array(a) 59 | valid_mask = ~np.isnan(a) 60 | if not np.any(valid_mask): 61 | return a 62 | valid_index = np.where(valid_mask)[0] 63 | first_valid_index, last_valid_index = valid_index[0], valid_index[-1] 64 | a[:first_valid_index] = a[first_valid_index] 65 | a[last_valid_index + 1:] = a[last_valid_index] 66 | return a 67 | 68 | def interp_undefined(a): 69 | if not isinstance(a, np.ndarray): 70 | # Convert None values to np.nan for consistent handling 71 | a = [x if x is not None else np.nan for x in a] 72 | a = np.array(a) 73 | valid_mask = ~np.isnan(a) 74 | if not np.any(valid_mask): 75 | return a 76 | return np.interp(np.arange(len(a)), np.arange(len(a))[valid_mask], a[valid_mask]) 77 | 78 | def ffill(a): 79 | valid_mask = ~np.isnan(a) 80 | if not np.any(valid_mask): 81 | return a 82 | idx = np.where(valid_mask, np.arange(len(a)), 0) 83 | np.maximum.accumulate(idx, out=idx) 84 | return a[idx] 85 | 86 | def bfill(a): 87 | return ffill(a[::-1])[::-1] 88 | 89 | # ffill nan 90 | def replace_undefined(a): 91 | # [np.nan, 1, 2, 3, np.nan, np.nan, 4, 5, np.nan] -> [1,1,2,3,3,3,4,5,5] 92 | if not isinstance(a, np.ndarray): 93 | a = np.array(a) 94 | return ffill(extend_undefined(a)) 95 | 96 | def detect_window_maxima(a, wind=1): 97 | # [1, 2, 3, 4, 3, 3] -> [3] (wind=2) 98 | # [1, 2, 3, 4, 4, 4, 3, 3] -> [3] (wind=2) 99 | # [1, 2, 3, 4, 3] -> [] (wind=2) 100 | span = int(wind / 2) 101 | ret = np.array(argrelextrema(a, np.greater_equal, order=span)[0]) 102 | # remove the first and the last maxima 103 | # it should be greather than before at least 104 | valid_ret = [] 105 | for i in ret: 106 | if i > span and i < len(a) - span: 107 | if a[i-1] < a[i]: 108 | valid_ret.append(i) 109 | return np.array(valid_ret) 110 | 111 | def detect_maxima(data, tr=None): 112 | """ 113 | Find indexes of x such that xk-1 <= x >= xk+1 114 | data: arr 115 | tr: percentile threshold (0-100) 116 | return: sorted peak index above tr 117 | """ 118 | if tr is None: 119 | return np.array(argrelextrema(data, np.greater_equal)[0]) 120 | else: 121 | tval = np.percentile(data, tr) 122 | ret = [] 123 | for i in argrelextrema(data, np.greater_equal)[0]: 124 | if data[i] > tval: 125 | ret.append(i) 126 | return np.array(ret) 127 | 128 | # for i in range(1, len(data) - 1): 129 | # if data[i-1] < data[i]: # Increased value compared to previous value 130 | # if data[i] > tval: 131 | # is_peak = False 132 | # for j in range(i+1, len(data)): # find next increase or decrease 133 | # if data[i] == data[j]: 134 | # continue 135 | # if data[i] < data[j]: 136 | # break # value increased -> not a peak 137 | # if data[i] > data[j]: 138 | # is_peak = True 139 | # break # value decreased -> peak! 140 | # if is_peak: 141 | # ret.append(i) 142 | # return ret 143 | 144 | 145 | def detect_minima(data, tr=100): 146 | """ 147 | Find indexes of x such that xk-1 <= x >= xk+1 148 | x: arr 149 | tr: percentile threshold (0-100) 150 | return: detect peak above tr 151 | """ 152 | tval = np.percentile(data, tr) 153 | ret = [] 154 | for i in range(1, len(data) - 1): 155 | if data[i-1] > data[i]: # value decreased 156 | if data[i] < tval: 157 | is_nadir = False 158 | for j in range(i+1, len(data)): # find next increase or decrease 159 | if data[i] == data[j]: 160 | continue 161 | if data[i] > data[j]: 162 | break # value increased -> minima! 163 | if data[i] < data[j]: 164 | is_nadir = True 165 | break 166 | if is_nadir: 167 | ret.append(i) 168 | return ret 169 | 170 | 171 | def next_power_of_2(x): 172 | """ 173 | Find power of 2 greater than x 174 | """ 175 | return 2 ** math.ceil(math.log(x) / math.log(2)) 176 | 177 | 178 | def band_pass(data, srate, fl, fh, order=5): 179 | if fl > fh: 180 | return band_pass(data, srate, fh, fl) 181 | nyq = 0.5 * srate 182 | b, a = butter(order, [fl / nyq, fh / nyq], btype='band') 183 | return filtfilt(b, a, data) 184 | 185 | # low pass filter 186 | # faster x20 times than that using fft 187 | def low_pass(data, srate, fl, order=5): 188 | """ 189 | low pass filter 190 | """ 191 | nyq = 0.5 * srate 192 | low = fl / nyq 193 | b, a = butter(order, low, btype='lowpass') 194 | return filtfilt(b, a, data) 195 | 196 | def find_nearest(a, value): 197 | """ 198 | Find the nearest value in a "sorted" np array 199 | :param data: array 200 | :param value: value to find 201 | :return: nearest value 202 | 203 | find_nearest([10,20,30,40,50], 21) -> 20 204 | find_nearest([10,20,30,40,50], 27) -> 30 205 | """ 206 | idx = np.searchsorted(a, value) 207 | if idx > 0 and (idx == len(a) or math.fabs(value - a[idx-1]) < math.fabs(value - a[idx])): 208 | return a[idx-1] 209 | else: 210 | return a[idx] 211 | 212 | """ 213 | Find the nearest value in a np array 214 | """ 215 | idx = np.abs(np.array(a) - value).argmin() 216 | return a[idx] 217 | 218 | def moving_average3(x, N): 219 | x = np.pad(x, (N//2, N-1-N//2), mode='edge') 220 | cumsum = np.cumsum(np.insert(x, 0, 0)) 221 | return (cumsum[N:] - cumsum[:-N]) / N 222 | 223 | def moving_average2(x, N): # slowest 224 | x = np.pad(x, (N//2, N-1-N//2), mode='edge') 225 | return np.convolve(x, np.ones(N) / float(N), 'valid') 226 | 227 | # this is fastest when we can use scipy 228 | def moving_average(x, N): 229 | x = np.pad(x, (N//2, N-1-N//2), mode='edge') 230 | return scipy.ndimage.filters.uniform_filter1d(x, N, mode='constant', origin=-(N//2))[:-(N-1)] 231 | 232 | def detect_qrs_old(data, srate): 233 | """ 234 | find qrs and return the indexes 235 | Pan and Tompkins, A Real-Time QRS Detection Algorithm. IEEE Transactions on Biomedical Engineering BME-32.3 (1985) 236 | """ 237 | # maximum ecg voltage 5mV 238 | y1 = band_pass(data, srate, 5, 15) # The qrs value must be at 10-20 hz 239 | y2 = np.convolve(y1, [-2,-1,0,1,2], 'same') # derivative 240 | y3 = np.square(y2) # square 241 | y4 = moving_average(y3, int(srate * 0.15)) # moving average filter 242 | y4[y4 > 5] = 5 243 | 244 | p1 = detect_window_maxima(y4, 0.3 * srate) # find peaks 245 | 246 | # threshold -> 0.5 times the median value of the peak within 10 seconds before and after 247 | p2 = [] 248 | for idx in p1: 249 | val = y4[idx] 250 | peak_vals = [] 251 | for idx2 in p1: 252 | if abs(idx - idx2) < srate * 10: 253 | peak_vals.append(y4[idx2]) 254 | th = np.median(peak_vals) * 0.1 255 | if val >= th: 256 | p2.append(idx) 257 | 258 | # find closest peak within 80 ms 259 | p3 = [] 260 | last = -1 261 | pcand = detect_window_maxima(data, 0.08 * srate) # 80 ms local peak 262 | for x in p2: 263 | idx_cand = find_nearest(pcand, x) 264 | if idx_cand != last: 265 | p3.append(idx_cand) 266 | last = idx_cand 267 | 268 | # remove false positives 269 | p4 = list(p3) 270 | i = 0 271 | while i < len(p4) - 1: 272 | idx1 = p4[i] 273 | idx2 = p4[i+1] 274 | if idx2 - idx1 < 0.2 * srate: # physiological refractory period of about 200 ms 275 | if i == 0: 276 | dele = i 277 | elif i >= len(p4) - 2: 278 | dele = i + 1 279 | else: # minimize heart rate variability 280 | idx_prev = p4[i-1] 281 | idx_next = p4[i+2] 282 | # find center point distance 283 | if abs(idx_next + idx_prev - 2 * idx1) > abs(idx_next + idx_prev - 2 * idx2): 284 | dele = i 285 | else: 286 | dele = i+1 287 | p4.pop(dele) 288 | if dele == i: 289 | i -= 1 290 | i += 1 291 | return p4 292 | 293 | 294 | def detect_qrs(data, srate): 295 | """ 296 | find qrs and return the indexes 297 | Pan and Tompkins, A Real-Time QRS Detection Algorithm. IEEE Transactions on Biomedical Engineering BME-32.3 (1985) 298 | """ 299 | if np.isnan(data).all(): 300 | return [] 301 | 302 | y0 = data 303 | y1 = band_pass(y0, srate, 5, 15) # The qrs value must be at 5-15 hz 304 | y2 = np.convolve(y1, [-2,-1,0,1,2], 'same') # derivative 305 | y3 = np.square(y2) # square 306 | y4 = moving_average(y3, int(srate * 0.15)) # moving average filter 307 | 308 | # removing erractic noise in signal 309 | noise_mask = (y4 > 3) # it never goes larger than 3 310 | if noise_mask.any(): 311 | noise_mask = np.convolve(noise_mask, np.ones(int(srate * 2)), 'same').astype(bool) # expand erractic_mask 312 | y4[noise_mask] = 0 313 | 314 | p1 = detect_window_maxima(y4, 0.3 * srate) # find peaks 315 | 316 | if len(p1): # QRS amplitude should be at least 0.4 mV 317 | valid_mask = y4[p1] > 0.1 318 | p1 = p1[valid_mask] 319 | 320 | min_distance = int(0.25 * srate) 321 | p2 = [] 322 | spki = 0 323 | npki = 0 324 | thval = 0 325 | for peak in p1: # iterate all peaks 326 | if (len(p2) == 0) or (y4[peak] > thval): 327 | p2.append(peak) 328 | spki = 0.125 * y4[peak] + 0.875 * spki # update signal level with cropping 329 | elif (len(p2) > 8) and ((peak - p2[-1] > 5 * srate) or (peak - p2[-1] > 1.66 * int(np.mean(np.diff(p2[-9:]))))) and (y4[peak] > 0.5 * thval): 330 | # we missed some signal 331 | p2.append(peak) 332 | spki = 0.25 * y4[peak] + 0.75 * spki 333 | npki = 0.01 334 | else: # otherwise, it must be a noise 335 | npki = 0.125 * y4[peak] + 0.875 * npki 336 | thval = npki + 0.25 * (spki - npki) # recalculate the threshold 337 | #print(f'{t} @{peak}, spki={spki:.3f}, npki={npki:.3f}, y4[peak]={y4[peak]:.3f} -> thval={thval:.3f}') 338 | 339 | # find the nearest extreme within 150ms 340 | # it should be based on the filterd signal because it is centered 341 | p3 = [] 342 | ya = np.abs(y0) 343 | for i in p2: 344 | cand1 = max_idx(ya, i - int(srate * 0.15), i + int(srate * 0.15)) 345 | cand2 = max_idx(y0, i - int(srate * 0.15), i + int(srate * 0.15)) 346 | if abs(i - cand1) < abs(i - cand2): 347 | p3.append(cand1) 348 | else: 349 | p3.append(cand2) 350 | 351 | return p3 352 | 353 | 354 | def remove_wander_spline(data, srate): 355 | """ 356 | cubic spline ECG wander removal 357 | http://jh786t2saqs.tistory.com/416 358 | http://blog.daum.net/jty71/10850833 359 | """ 360 | # calculate downslope 361 | # downslope = [0, 0, 0] 362 | # for i in range(3, len(data) - 3): 363 | # downslope.append(data[i-3] + data[i-1] - data[i+1] - data[i+3]) 364 | # downslope += [0, 0, 0] 365 | downslope = np.convolve(data, [-1,0,-1,0,1,0,1], 'same') # calculate downslope 366 | 367 | r_list = detect_qrs(data, srate) # detect r-peak 368 | 369 | rsize = int(0.060 * srate) # knots from r-peak 370 | jsize = int(0.066 * srate) 371 | knots = [] # indexes of the kot 372 | for ridx in r_list: 373 | th = 0.6 * max(downslope[ridx:ridx + rsize]) 374 | for j in range(ridx, ridx + rsize): 375 | if downslope[j] >= th: # R detected 376 | knots.append(j - jsize) 377 | break 378 | 379 | # cubic spline for every knots 380 | baseline = [0] * len(data) 381 | for i in range(1, len(knots)-2): 382 | x1 = knots[i] 383 | x2 = knots[i+1] 384 | y1 = data[x1] 385 | y2 = data[x2] 386 | d1 = (data[x2] - data[knots[i-1]]) / (x2 - knots[i-1]) 387 | d2 = (data[knots[i+2]] - data[x1]) / (knots[i+2] - x1) 388 | a = -2 * (y2-y1) / (x2-x1)**3 + (d2+d1) / (x2-x1)**2 389 | b = 3 * (y2-y1) / (x2-x1)**2 - (d2+2*d1) / (x2-x1) 390 | c = d1 391 | d = y1 392 | for x in range(x1, x2): 393 | x_a = (x-x1) # x-a 394 | x_a2 = x_a * x_a 395 | x_a3 = x_a2 * x_a 396 | baseline[x] = a * x_a3 + b * x_a2 + c * x_a + d 397 | 398 | for i in range(len(data)): 399 | data[i] -= baseline[i] 400 | 401 | return data 402 | 403 | 404 | def resample(data, dest_len, avg=False): 405 | """ 406 | resample the data 407 | avg: If True, the data is averaged and resampled (slower) 408 | applied only for downsampling. It is meaningless for upsampling 409 | """ 410 | if dest_len == 0: 411 | return [] 412 | 413 | src_len = len(data) 414 | if src_len == 0: 415 | return np.zeros(dest_len) 416 | 417 | if dest_len == 1: # average 418 | if avg: 419 | return np.array([np.mean(data)]) 420 | else: 421 | return np.array([data[0]]) 422 | 423 | if src_len == 1: # copy 424 | return np.full(dest_len, data[0]) 425 | 426 | if not isinstance(data, np.ndarray): 427 | data = np.array(data) 428 | 429 | if src_len == dest_len: 430 | return np.copy(data) 431 | 432 | if src_len < dest_len: # upsample -> linear interpolate 433 | ret = [] 434 | for x in range(dest_len): 435 | srcx = x / (dest_len - 1) * (src_len - 1) # current position of x 436 | srcx1 = math.floor(srcx) # index 1 on the original 437 | srcx2 = math.ceil(srcx) # index 2 on the original 438 | factor = srcx - srcx1 # how close to index 2 439 | val1 = data[srcx1] 440 | val2 = data[srcx2] 441 | ret.append(val1 * (1 - factor) + val2 * factor) 442 | return np.array(ret) 443 | 444 | #if src_len > dest_len: # downsample -> nearest or avg 445 | if avg: 446 | ret = [] 447 | for x in range(dest_len): 448 | src_from = int(x * src_len / dest_len) 449 | src_to = int((x + 1) * src_len / dest_len) 450 | ret.append(np.mean(data[src_from:src_to])) 451 | return np.array(ret) 452 | 453 | ret = [] 454 | for x in range(dest_len): 455 | srcx = int(x * src_len / dest_len) 456 | ret.append(data[srcx]) 457 | return np.array(ret) 458 | 459 | 460 | def resample_hz(data, srate_from, srate_to, avg=False): 461 | dest_len = int(math.ceil(len(data) / srate_from * srate_to)) 462 | return resample(data, dest_len, avg) 463 | 464 | 465 | def estimate_heart_freq(data, srate, fl=30/60, fh=200/60): 466 | """ 467 | An automatic beat detection algorithm for pressure signals 468 | http://www.ncbi.nlm.nih.gov/pubmed/16235652 469 | data: input signal 470 | srate: sampling rate 471 | fl: lower bound of freq 472 | """ 473 | # Fourier transformed, and squared to obtain a frequency-dependent power 474 | # estimate psd in data 475 | p = np.abs(np.fft.fft(data)) ** 2 476 | maxf = 0 477 | maxval = 0 478 | for w in range(len(data)): 479 | f = w * srate / len(data) 480 | # add 11 harmonics, which do not exceed double of the default power 481 | # sampling 482 | if fl <= f <= fh: 483 | h = 0 # harmonic pds 484 | for k in range(1, 12): 485 | h += min(2 * p[w], p[(k * w) % len(data)]) 486 | if h > maxval: 487 | maxval = h 488 | maxf = f 489 | return maxf 490 | 491 | 492 | def detect_peaks(data, srate): 493 | """ 494 | obrain maximum and minimum values from blood pressure or pleth waveform 495 | the minlist is always one less than the maxlist 496 | """ 497 | ret = [] 498 | 499 | if not isinstance(data, np.ndarray): 500 | data = np.array(data) 501 | 502 | raw_data = np.copy(data) 503 | raw_srate = srate 504 | 505 | # resampling rate to 100Hz 506 | data = resample_hz(data, srate, 100) 507 | srate = 100 508 | 509 | # upper and lower bound of the heart rate (Hz = /sec) 510 | # heart rate = hf * 60; 511 | fh = 200 / 60 # 3.3 512 | fl = 30 / 60 # 0.5 513 | 514 | # estimate hr 515 | y1 = band_pass(data, srate, 0.5 * fl, 3 * fh) 516 | 517 | # Divide the entire x into four regions and use the median of these 518 | # hf = [] 519 | # for(var i = 0; i < 4; i++) { 520 | # var subw = new Wav(srate, y1.vals.copy(data.length / 4 * i, data.length / 4 * (i+1))); 521 | # hf[i] = subw.estimate_heart_rate(fl, fh); 522 | # if(hf[i] == 0) { 523 | # console.log("HR estimation failed, assume 75"); 524 | # hf[i] = 75 / 60; 525 | # } 526 | # } 527 | # hf = hf.median(); 528 | 529 | # Whole heart freq estimation 530 | hf = estimate_heart_freq(y1, srate) 531 | if hf == 0: 532 | print("HR estimation failed, assume 75") 533 | hf = 75 / 60 534 | 535 | # band pass filter again with heart freq estimation 536 | y2 = band_pass(data, srate, 0.5 * fl, 2.5 * hf) 537 | d2 = np.diff(y2) 538 | 539 | # detect peak in gradient 540 | p2 = detect_maxima(d2, 90) 541 | 542 | # detect real peak 543 | y3 = band_pass(data, srate, 0.5 * fl, 10 * hf) 544 | p3 = detect_maxima(y3, 60) 545 | 546 | # find closest p3 that follows p2 547 | p4 = [] 548 | last_p3 = 0 549 | for idx_p2 in p2: 550 | idx_p3 = 0 551 | for idx_p3 in p3: 552 | if idx_p3 > idx_p2: 553 | break 554 | if idx_p3 != 0: 555 | if last_p3 != idx_p3: 556 | p4.append(idx_p3) 557 | last_p3 = idx_p3 558 | 559 | # nearest neighbor and inter beat interval correction 560 | # p: location of detected peaks 561 | pc = [] 562 | 563 | # find all maxima before preprocessing 564 | m = detect_maxima(data, 0) 565 | m = np.array(m) 566 | 567 | # correct peaks location error due to preprocessing 568 | last = -1 569 | for idx_p4 in p4: 570 | cand = find_nearest(m, idx_p4) 571 | if cand != last: 572 | pc.append(cand) 573 | last = cand 574 | 575 | ht = 1 / hf # beat interval (sec) 576 | 577 | # correct false negatives (FN) 578 | # Make sure if there is rpeak not included in the PC. 579 | i = -1 580 | while i < len(pc): 581 | if i < 0: 582 | idx_from = 0 583 | else: 584 | idx_from = pc[i] 585 | 586 | if i >= len(pc) - 1: 587 | idx_to = len(data)-1 588 | else: 589 | idx_to = pc[i+1] 590 | 591 | # find false negative and fill it 592 | if idx_to - idx_from < 1.75 * ht * srate: 593 | i += 1 594 | continue 595 | 596 | # It can not be within 0.2 of both sides 597 | idx_from += 0.2 * ht * srate 598 | idx_to -= 0.2 * ht * srate 599 | 600 | # Find missing peak and add it 601 | # find the maximum value from idx_from to idx_to 602 | idx_max = -1 603 | val_max = 0 604 | 605 | for j in range(np.searchsorted(m, idx_from), len(m)): 606 | idx_cand = m[j] 607 | if idx_cand >= idx_to: 608 | break 609 | if idx_max == -1 or val_max < data[idx_cand]: 610 | val_max = data[idx_cand] 611 | idx_max = idx_cand 612 | 613 | # There is no candidate to this FN. Overtake 614 | if idx_max != -1: # add idx_max and restart trom there 615 | pc.insert(i+1, idx_max) 616 | i -= 1 617 | i += 1 618 | 619 | # correct false positives (FP) 620 | i = 0 621 | while i < len(pc) - 1: 622 | idx1 = pc[i] 623 | idx2 = pc[i+1] 624 | if idx2 - idx1 < 0.75 * ht * srate: # false positive 625 | idx_del = i + 1 # default: delete i+1 626 | if 1 < i < len(pc) - 2: 627 | # minimize heart rate variability 628 | idx_prev = pc[i-1] 629 | idx_next = pc[i+2] 630 | 631 | # find center point distance 632 | d1 = abs(idx_next + idx_prev - 2 * idx1) 633 | d2 = abs(idx_next + idx_prev - 2 * idx2) 634 | 635 | if d1 > d2: 636 | idx_del = i 637 | else: 638 | idx_del = i+1 639 | 640 | elif i == 0: 641 | idx_del = i 642 | elif i == len(pc) - 2: 643 | idx_del = i+1 644 | 645 | pc.pop(idx_del) 646 | i -= 1 647 | i += 1 648 | 649 | # remove dupilcates 650 | i = 0 651 | for i in range(0, len(pc) - 1): 652 | if pc[i] == pc[i+1]: 653 | pc.pop(i) 654 | i -= 1 655 | i += 1 656 | 657 | # find nearest peak in real data 658 | # We downsample x to srate to get maxidxs. ex) 1000 Hz -> 100 Hz 659 | # Therefore, the position found by maxidx may differ by raw_srate / srate. 660 | maxlist = [] 661 | ratio = math.ceil(raw_srate / srate) 662 | for maxidx in pc: 663 | idx = int(maxidx * raw_srate / srate) # extimated idx -> not precise 664 | maxlist.append(max_idx(raw_data, idx - ratio - 1, idx + ratio + 1)) 665 | 666 | # get the minlist from maxlist 667 | minlist = [] 668 | for i in range(len(maxlist) - 1): 669 | minlist.append(min_idx(raw_data, maxlist[i], maxlist[i+1])) 670 | 671 | return [minlist, maxlist] 672 | 673 | def estimate_resp_rate(data, srate): 674 | """ 675 | count-adv algorithm 676 | doi: 10.1007/s10439-007-9428-1 677 | """ 678 | filted = band_pass(data, srate, 0.1, 0.5) 679 | 680 | # find maxima 681 | maxlist = list(detect_maxima(filted)) 682 | minlist = [] # find minima 683 | for i in range(len(maxlist) - 1): 684 | minlist.append(min_idx(data, maxlist[i] + 1, maxlist[i+1])) 685 | extrema = sorted(maxlist + minlist) 686 | 687 | while len(extrema) >= 4: 688 | diffs = [] # diffs of absolute value 689 | for i in range(len(extrema) - 1): 690 | diffs.append(abs(filted[extrema[i]] - filted[extrema[i + 1]])) 691 | th = 0.1 * np.percentile(diffs, 75) 692 | minidx = np.argmin(diffs) 693 | if diffs[minidx] >= th: 694 | break 695 | extrema.pop(minidx) 696 | extrema.pop(minidx) 697 | 698 | if len(extrema) < 3: 699 | print("warning: rr estimation failed, 13 used") 700 | return 13 701 | 702 | # Obtain both even-numbered or odd-numbered distances 703 | resp_len = np.mean(np.diff(extrema)) * 2 704 | rr = 60 * srate / resp_len 705 | 706 | return rr 707 | # # count-orig algorithm 708 | # tval = 0.2 * np.percentile(max_vals, 75) # find 75 percentile value 709 | # 710 | # # check the maxima is over 75 percentile 711 | # max_over = [(maxval > tval) for maxval in max_vals] 712 | # resp_lens = [] 713 | # for i in range(len(maxlist) - 1): 714 | # if max_over[i] and max_over[i+1]: 715 | # cnt = 0 716 | # minval = 0 717 | # for minidx in min_idxs: 718 | # if minidx > maxlist[i+1]: 719 | # break 720 | # if minidx < maxlist[i]: 721 | # continue 722 | # cnt += 1 723 | # if cnt > 1: 724 | # break 725 | # minval = filted[minidx] 726 | # 727 | # if cnt == 1 and minval < 0: 728 | # resp_len = maxlist[i+1] - maxlist[i] 729 | # if resp_len > 0: 730 | # resp_lens.append(resp_len) 731 | # if len(resp_lens) == 0: 732 | # print("warning: rr estimation failed, 13 used") 733 | # return 13 734 | # 735 | # rr = 60 * srate / np.mean(resp_lens) 736 | # 737 | # return rr 738 | 739 | 740 | if __name__ == '__main__': 741 | import vitaldb 742 | srate = 100 743 | vals = vitaldb.load_case(1, ['SNUADC/ECG_II','SNUADC/ART'], 1 / srate) 744 | art = vals[300000:306000, 1] 745 | 746 | import filters.abp_ppv as f 747 | res = f.run({'ART':{'srate':100, 'vals':art}}, {}, f.cfg) 748 | print(res[0]) 749 | quit() 750 | 751 | ecg = vals[110000:111000, 0] 752 | ecg = interp_undefined(ecg) 753 | peaks = detect_qrs(ecg, srate) # find qrs and return the indexes 754 | 755 | import matplotlib.pyplot as plt 756 | plt.figure(figsize=(15,5)) 757 | plt.plot(ecg, color='g') 758 | plt.plot(peaks, [ecg[i] for i in peaks], 'ro') 759 | plt.show() 760 | quit() 761 | 762 | import filters.ecg_hrv as f 763 | for i in range(0, len(ecg), INTERVAL): 764 | print(f.run({f.cfg['inputs'][0]['name']: {'srate':500.0, 'vals': ecg[i:i+INTERVAL,1]}}, {}, f.cfg)) 765 | quit() 766 | 767 | import matplotlib.pyplot as plt 768 | plt.figure(figsize=(15,5)) 769 | plt.plot(sig, label='ecg') 770 | plt.plot(p2, sig[p2], 'rx') 771 | plt.legend() 772 | plt.show() 773 | 774 | quit() 775 | 776 | tid = df_trks[(df_trks['caseid'] == caseid) & (df_trks['tname'] == 'SNUADC/ART')]['tid'].values[0] 777 | art = pd.read_csv('https://api.vitaldb.net/' + tid).values[:,1] 778 | 779 | srate = 500 780 | 781 | art = exclude_undefined(art) 782 | # peaks = detect_peaks(art, srate) 783 | # print(peaks) 784 | #import cProfile 785 | #cProfile.run('detect_peaks(art, srate)') 786 | #quit() 787 | 788 | idx_start = 3600 * srate 789 | art = vals[idx_start:idx_start + 8 * srate, 1] 790 | art = exclude_undefined(art) 791 | peaks = detect_peaks(art, srate) 792 | 793 | print(peaks) 794 | --------------------------------------------------------------------------------