├── .gitignore ├── .travis.yml ├── README.md ├── README.rst ├── peakdetect ├── __init__.py └── peakdetect.py ├── requirements.txt ├── setup.py ├── stale.yml ├── test.py └── waveform.py /.gitignore: -------------------------------------------------------------------------------- 1 | # emacs 2 | \#*\# 3 | 4 | # vim 5 | *.swp 6 | *~ 7 | 8 | # virtual envs 9 | *venv 10 | 11 | # builds 12 | build 13 | dist 14 | *.egg-info 15 | 16 | # bytecode 17 | __pycache__ 18 | *.pyc 19 | *.pyo 20 | 21 | # macOS 22 | .DS_Store 23 | 24 | # jetbrains 25 | .idea 26 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 2.7 4 | - 3.5 5 | - 3.6 6 | 7 | # For Python 3.7 8 | matrix: 9 | include: 10 | - python: 3.7 11 | dist: xenial 12 | sudo: true 13 | 14 | allow_failures: 15 | - python: 2.7 16 | 17 | script: 18 | - python setup.py install 19 | - python test.py 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Peakdetect 2 | [![PyPI](https://badge.fury.io/py/peakdetect.svg)](https://pypi.org/project/peakdetect) 3 | [![Build](https://travis-ci.org/Anaxilaus/peakdetect.svg?branch=master)](https://travis-ci.org/Anaxilaus/peakdetect) 4 | [![Python Version](https://img.shields.io/badge/python-2%20and%203-blue.svg)](./.travis.yml) 5 | 6 | Simple peak detection library for Python based on [Billauer's work](http://billauer.co.il/peakdet.html) and [this gist](https://gist.github.com/sixtenbe/1178136). If you can improve this project, feel free to contribute. 7 | 8 | 9 | ## Installation 10 | **Pip:** 11 | ``` 12 | $ pip install peakdetect 13 | ``` 14 | 15 | **Clone repository:** 16 | ``` 17 | $ git clone https://github.com/avhn/peakdetect 18 | $ python peakdetect/setup.py install 19 | ``` 20 | *Requirements:* numpy and scipy. Setup installs requirements itself. 21 | 22 | 23 | ## Usage 24 | **Example usage:** 25 | ```python 26 | >>> import peakdetect 27 | >>> peaks = peakdetect.peakdetect(y_axis, x_axis, lookahead, delta) 28 | ``` 29 | 30 | **Documentation on peakdetect function, keyword arguments:** 31 | ``` 32 | y_axis -- A list containing the signal over which to find peaks 33 | 34 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 35 | in the return to specify the position of the peaks. If omitted an 36 | index of the y_axis is used. 37 | (default: None) 38 | 39 | lookahead -- distance to look ahead from a peak candidate to determine if 40 | it is the actual peak 41 | (default: 200) 42 | '(samples / period) / f' where '4 >= f >= 1.25' might be a good value 43 | 44 | delta -- this specifies a minimum difference between a peak and 45 | the following points, before a peak may be considered a peak. Useful 46 | to hinder the function from picking up false peaks towards to end of 47 | the signal. To work well delta should be set to delta >= RMSnoise * 5. 48 | (default: 0) 49 | When omitted delta function causes a 20% decrease in speed. 50 | When used Correctly it can double the speed of the function 51 | ``` 52 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Peakdetect 2 | ========== 3 | |PyPI| 4 | |Build| 5 | |Python Versions| 6 | 7 | Read, `Markdown Documentation`_. 8 | 9 | 10 | .. |PyPI| image:: https://badge.fury.io/py/peakdetect.svg 11 | :target: https://pypi.org/project/peakdetect 12 | .. |Build| image:: https://travis-ci.org/Anaxilaus/peakdetect.svg?branch=master 13 | :target: https://travis-ci.org/Anaxilaus/peakdetect 14 | .. |Python Versions| image:: https://img.shields.io/badge/python-2%20and%203-blue.svg 15 | :target: https://github.com/Anaxilaus/peakdetect/tree/master/.travis.yml 16 | .. _`Markdown Documentation`: https://github.com/avhn/peakdetect/tree/master/README.md 17 | -------------------------------------------------------------------------------- /peakdetect/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from .peakdetect import * 4 | -------------------------------------------------------------------------------- /peakdetect/peakdetect.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from math import pi, log 4 | import numpy as np 5 | from scipy import fft, ifft 6 | from scipy.optimize import curve_fit 7 | from scipy.signal import cspline1d_eval, cspline1d 8 | 9 | __all__ = [ 10 | "peakdetect", 11 | "peakdetect_fft", 12 | "peakdetect_parabola", 13 | "peakdetect_sine", 14 | "peakdetect_sine_locked", 15 | "peakdetect_spline", 16 | "peakdetect_zero_crossing", 17 | "zero_crossings", 18 | "zero_crossings_sine_fit" 19 | ] 20 | 21 | 22 | def _datacheck_peakdetect(x_axis, y_axis): 23 | if x_axis is None: 24 | x_axis = range(len(y_axis)) 25 | 26 | if len(y_axis) != len(x_axis): 27 | raise ValueError( 28 | "Input vectors y_axis and x_axis must have same length") 29 | 30 | # needs to be a numpy array 31 | y_axis = np.array(y_axis) 32 | x_axis = np.array(x_axis) 33 | return x_axis, y_axis 34 | 35 | 36 | def _pad(fft_data, pad_len): 37 | """ 38 | Pads fft data to interpolate in time domain 39 | 40 | keyword arguments: 41 | fft_data -- the fft 42 | pad_len -- By how many times the time resolution should be increased by 43 | 44 | return: padded list 45 | """ 46 | l = len(fft_data) 47 | n = _n(l * pad_len) 48 | fft_data = list(fft_data) 49 | 50 | return fft_data[:l // 2] + [0] * (2**n-l) + fft_data[l // 2:] 51 | 52 | 53 | def _n(x): 54 | """ 55 | Find the smallest value for n, which fulfils 2**n >= x 56 | 57 | keyword arguments: 58 | x -- the value, which 2**n must surpass 59 | 60 | return: the integer n 61 | """ 62 | return int(log(x)/log(2)) + 1 63 | 64 | 65 | def _peakdetect_parabola_fitter(raw_peaks, x_axis, y_axis, points): 66 | """ 67 | Performs the actual parabola fitting for the peakdetect_parabola function. 68 | 69 | keyword arguments: 70 | raw_peaks -- A list of either the maxima or the minima peaks, as given 71 | by the peakdetect functions, with index used as x-axis 72 | 73 | x_axis -- A numpy array of all the x values 74 | 75 | y_axis -- A numpy array of all the y values 76 | 77 | points -- How many points around the peak should be used during curve 78 | fitting, must be odd. 79 | 80 | 81 | return: A list giving all the peaks and the fitted waveform, format: 82 | [[x, y, [fitted_x, fitted_y]]] 83 | 84 | """ 85 | func = lambda x, a, tau, c: a * ((x - tau) ** 2) + c 86 | fitted_peaks = [] 87 | distance = abs(x_axis[raw_peaks[1][0]] - x_axis[raw_peaks[0][0]]) / 4 88 | for peak in raw_peaks: 89 | index = peak[0] 90 | x_data = x_axis[index - points // 2: index + points // 2 + 1] 91 | y_data = y_axis[index - points // 2: index + points // 2 + 1] 92 | # get a first approximation of tau (peak position in time) 93 | tau = x_axis[index] 94 | # get a first approximation of peak amplitude 95 | c = peak[1] 96 | a = np.sign(c) * (-1) * (np.sqrt(abs(c))/distance)**2 97 | """Derived from ABC formula to result in a solution where A=(rot(c)/t)**2""" 98 | 99 | # build list of approximations 100 | p0 = (a, tau, c) 101 | popt, pcov = curve_fit(func, x_data, y_data, p0) 102 | # retrieve tau and c i.e x and y value of peak 103 | x, y = popt[1:3] 104 | 105 | # create a high resolution data set for the fitted waveform 106 | x2 = np.linspace(x_data[0], x_data[-1], points * 10) 107 | y2 = func(x2, *popt) 108 | 109 | fitted_peaks.append([x, y, [x2, y2]]) 110 | 111 | return fitted_peaks 112 | 113 | 114 | def peakdetect(y_axis, x_axis=None, lookahead=200, delta=0): 115 | """ 116 | Converted from/based on a MATLAB script at: 117 | http://billauer.co.il/peakdet.html 118 | 119 | function for detecting local maxima and minima in a signal. 120 | Discovers peaks by searching for values which are surrounded by lower 121 | or larger values for maxima and minima respectively 122 | 123 | keyword arguments: 124 | y_axis -- A list containing the signal over which to find peaks 125 | 126 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 127 | in the return to specify the position of the peaks. If omitted an 128 | index of the y_axis is used. 129 | (default: None) 130 | 131 | lookahead -- distance to look ahead from a peak candidate to determine if 132 | it is the actual peak 133 | (default: 200) 134 | '(samples / period) / f' where '4 >= f >= 1.25' might be a good value 135 | 136 | delta -- this specifies a minimum difference between a peak and 137 | the following points, before a peak may be considered a peak. Useful 138 | to hinder the function from picking up false peaks towards to end of 139 | the signal. To work well delta should be set to delta >= RMSnoise * 5. 140 | (default: 0) 141 | When omitted delta function causes a 20% decrease in speed. 142 | When used Correctly it can double the speed of the function 143 | 144 | 145 | return: two lists [max_peaks, min_peaks] containing the positive and 146 | negative peaks respectively. Each cell of the lists contains a tuple 147 | of: (position, peak_value) 148 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 149 | results to unpack one of the lists into x, y coordinates do: 150 | x, y = zip(*max_peaks) 151 | """ 152 | 153 | max_peaks = [] 154 | min_peaks = [] 155 | dump = [] # Used to pop the first hit which almost always is false 156 | 157 | # check input data 158 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 159 | # store data length for later use 160 | length = len(y_axis) 161 | 162 | # perform some checks 163 | if lookahead < 1: 164 | raise ValueError("Lookahead must be '1' or above in value") 165 | if not (np.isscalar(delta) and delta >= 0): 166 | raise ValueError("delta must be a positive number") 167 | 168 | # maxima and minima candidates are temporarily stored in 169 | # mx and mn respectively 170 | mn, mx = np.Inf, -np.Inf 171 | 172 | # Only detect peak if there is 'lookahead' amount of points after it 173 | for index, (x, y) in enumerate(zip(x_axis[:-lookahead], 174 | y_axis[:-lookahead])): 175 | if y > mx: 176 | mx = y 177 | mxpos = x 178 | if y < mn: 179 | mn = y 180 | mnpos = x 181 | 182 | # look for max 183 | if y < mx-delta and mx != np.Inf: 184 | # Maxima peak candidate found 185 | # look ahead in signal to ensure that this is a peak and not jitter 186 | if y_axis[index:index+lookahead].max() < mx: 187 | max_peaks.append([mxpos, mx]) 188 | dump.append(True) 189 | # set algorithm to only find minima now 190 | mx = np.Inf 191 | mn = np.Inf 192 | if index+lookahead >= length: 193 | # end is within lookahead no more peaks can be found 194 | break 195 | continue 196 | # else: # slows shit down this does 197 | # mx = ahead 198 | # mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)] 199 | 200 | # look for min 201 | if y > mn+delta and mn != -np.Inf: 202 | # Minima peak candidate found 203 | # look ahead in signal to ensure that this is a peak and not jitter 204 | if y_axis[index:index+lookahead].min() > mn: 205 | min_peaks.append([mnpos, mn]) 206 | dump.append(False) 207 | # set algorithm to only find maxima now 208 | mn = -np.Inf 209 | mx = -np.Inf 210 | if index+lookahead >= length: 211 | # end is within lookahead no more peaks can be found 212 | break 213 | # else: # slows shit down this does 214 | # mn = ahead 215 | # mnpos = x_axis[np.where(y_axis[index:index+lookahead]==mn)] 216 | 217 | # Remove the false hit on the first value of the y_axis 218 | try: 219 | if dump[0]: 220 | max_peaks.pop(0) 221 | else: 222 | min_peaks.pop(0) 223 | del dump 224 | except IndexError: 225 | # no peaks were found, should the function return empty lists? 226 | pass 227 | 228 | return [max_peaks, min_peaks] 229 | 230 | 231 | def peakdetect_fft(y_axis, x_axis, pad_len = 20): 232 | """ 233 | Performs a FFT calculation on the data and zero-pads the results to 234 | increase the time domain resolution after performing the inverse fft and 235 | send the data to the 'peakdetect' function for peak 236 | detection. 237 | 238 | Omitting the x_axis is forbidden as it would make the resulting x_axis 239 | value silly if it was returned as the index 50.234 or similar. 240 | 241 | Will find at least 1 less peak then the 'peakdetect_zero_crossing' 242 | function, but should result in a more precise value of the peak as 243 | resolution has been increased. Some peaks are lost in an attempt to 244 | minimize spectral leakage by calculating the fft between two zero 245 | crossings for n amount of signal periods. 246 | 247 | The biggest time eater in this function is the ifft and thereafter it's 248 | the 'peakdetect' function which takes only half the time of the ifft. 249 | Speed improvements could include to check if 2**n points could be used for 250 | fft and ifft or change the 'peakdetect' to the 'peakdetect_zero_crossing', 251 | which is maybe 10 times faster than 'peakdetct'. The pro of 'peakdetect' 252 | is that it results in one less lost peak. It should also be noted that the 253 | time used by the ifft function can change greatly depending on the input. 254 | 255 | keyword arguments: 256 | y_axis -- A list containing the signal over which to find peaks 257 | 258 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 259 | in the return to specify the position of the peaks. 260 | 261 | pad_len -- By how many times the time resolution should be 262 | increased by, e.g. 1 doubles the resolution. The amount is rounded up 263 | to the nearest 2**n amount 264 | (default: 20) 265 | 266 | 267 | return: two lists [max_peaks, min_peaks] containing the positive and 268 | negative peaks respectively. Each cell of the lists contains a tuple 269 | of: (position, peak_value) 270 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 271 | results to unpack one of the lists into x, y coordinates do: 272 | x, y = zip(*max_peaks) 273 | """ 274 | # check input data 275 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 276 | zero_indices = zero_crossings(y_axis, window_len = 11) 277 | # select a n amount of periods 278 | last_indice = - 1 - (1 - len(zero_indices) & 1) 279 | ### 280 | # Calculate the fft between the first and last zero crossing 281 | # this method could be ignored if the beginning and the end of the signal 282 | # are unnecessary as any errors induced from not using whole periods 283 | # should mainly manifest in the beginning and the end of the signal, but 284 | # not in the rest of the signal 285 | # this is also unnecessary if the given data is an amount of whole periods 286 | ### 287 | fft_data = fft(y_axis[zero_indices[0]:zero_indices[last_indice]]) 288 | padd = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:] 289 | n = lambda x: int(log(x)/log(2)) + 1 290 | # pads to 2**n amount of samples 291 | fft_padded = padd(list(fft_data), 2 ** 292 | n(len(fft_data) * pad_len) - len(fft_data)) 293 | 294 | # There is amplitude decrease directly proportional to the sample increase 295 | sf = len(fft_padded) / float(len(fft_data)) 296 | # There might be a leakage giving the result an imaginary component 297 | # Return only the real component 298 | y_axis_ifft = ifft(fft_padded).real * sf # (pad_len + 1) 299 | x_axis_ifft = np.linspace( 300 | x_axis[zero_indices[0]], x_axis[zero_indices[last_indice]], 301 | len(y_axis_ifft)) 302 | # get the peaks to the interpolated waveform 303 | max_peaks, min_peaks = peakdetect(y_axis_ifft, x_axis_ifft, 500, 304 | delta = abs(np.diff(y_axis).max() * 2)) 305 | # max_peaks, min_peaks = peakdetect_zero_crossing(y_axis_ifft, x_axis_ifft) 306 | 307 | # store one 20th of a period as waveform data 308 | data_len = int(np.diff(zero_indices).mean()) / 10 309 | data_len += 1 - data_len & 1 310 | 311 | return [max_peaks, min_peaks] 312 | 313 | 314 | def peakdetect_parabola(y_axis, x_axis, points = 31): 315 | """ 316 | Function for detecting local maxima and minima in a signal. 317 | Discovers peaks by fitting the model function: y = k (x - tau) ** 2 + m 318 | to the peaks. The amount of points used in the fitting is set by the 319 | points argument. 320 | 321 | Omitting the x_axis is forbidden as it would make the resulting x_axis 322 | value silly, if it was returned as index 50.234 or similar. 323 | 324 | will find the same amount of peaks as the 'peakdetect_zero_crossing' 325 | function, but might result in a more precise value of the peak. 326 | 327 | keyword arguments: 328 | y_axis -- A list containing the signal over which to find peaks 329 | 330 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 331 | in the return to specify the position of the peaks. 332 | 333 | points -- How many points around the peak should be used during curve 334 | fitting (default: 31) 335 | 336 | 337 | return: two lists [max_peaks, min_peaks] containing the positive and 338 | negative peaks respectively. Each cell of the lists contains a tuple 339 | of: (position, peak_value) 340 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 341 | results to unpack one of the lists into x, y coordinates do: 342 | x, y = zip(*max_peaks) 343 | """ 344 | 345 | # check input data 346 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 347 | # make the points argument odd 348 | points += 1 - points % 2 349 | # points += 1 - int(points) & 1 slower when int conversion needed 350 | 351 | # get raw peaks 352 | max_raw, min_raw = peakdetect_zero_crossing(y_axis) 353 | 354 | max_ = _peakdetect_parabola_fitter(max_raw, x_axis, y_axis, points) 355 | min_ = _peakdetect_parabola_fitter(min_raw, x_axis, y_axis, points) 356 | 357 | max_peaks = map(lambda x: [x[0], x[1]], max_) 358 | # max_fitted = map(lambda x: x[-1], max_) 359 | min_peaks = map(lambda x: [x[0], x[1]], min_) 360 | # min_fitted = map(lambda x: x[-1], min_) 361 | 362 | return [max_peaks, min_peaks] 363 | 364 | 365 | def peakdetect_sine(y_axis, x_axis, points=31, lock_frequency=False): 366 | """ 367 | Function for detecting local maxima and minima in a signal. 368 | Discovers peaks by fitting the model function: 369 | y = A * sin(2 * pi * f * (x - tau)) to the peaks. The amount of points used 370 | in the fitting is set by the points argument. 371 | 372 | Omitting the x_axis is forbidden as it would make the resulting x_axis 373 | value silly if it was returned as index 50.234 or similar. 374 | 375 | will find the same amount of peaks as the 'peakdetect_zero_crossing' 376 | function, but might result in a more precise value of the peak. 377 | 378 | The function might have some problems if the sine wave has a 379 | non-negligible total angle i.e. a k*x component, as this messes with the 380 | internal offset calculation of the peaks, might be fixed by fitting a 381 | y = k * x + m function to the peaks for offset calculation. 382 | 383 | keyword arguments: 384 | y_axis -- A list containing the signal over which to find peaks 385 | 386 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 387 | in the return to specify the position of the peaks. 388 | 389 | points -- How many points around the peak should be used during curve 390 | fitting (default: 31) 391 | 392 | lock_frequency -- Specifies if the frequency argument of the model 393 | function should be locked to the value calculated from the raw peaks 394 | or if optimization process may tinker with it. 395 | (default: False) 396 | 397 | 398 | return: two lists [max_peaks, min_peaks] containing the positive and 399 | negative peaks respectively. Each cell of the lists contains a tuple 400 | of: (position, peak_value) 401 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 402 | results to unpack one of the lists into x, y coordinates do: 403 | x, y = zip(*max_peaks) 404 | """ 405 | # check input data 406 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 407 | # make the points argument odd 408 | points += 1 - points % 2 409 | # points += 1 - int(points) & 1 slower when int conversion needed 410 | 411 | # get raw peaks 412 | max_raw, min_raw = peakdetect_zero_crossing(y_axis) 413 | 414 | # get global offset 415 | offset = np.mean([np.mean(max_raw, 0)[1], np.mean(min_raw, 0)[1]]) 416 | # fitting a k * x + m function to the peaks might be better 417 | # offset_func = lambda x, k, m: k * x + m 418 | 419 | # calculate an approximate frequency of the signal 420 | Hz_h_peak = np.diff(zip(*max_raw)[0]).mean() 421 | Hz_l_peak = np.diff(zip(*min_raw)[0]).mean() 422 | Hz = 1 / np.mean([Hz_h_peak, Hz_l_peak]) 423 | 424 | # model function 425 | # if cosine is used then tau could equal the x position of the peak 426 | # if sine were to be used then tau would be the first zero crossing 427 | if lock_frequency: 428 | func = lambda x_ax, A, tau: A * np.sin( 429 | 2 * pi * Hz * (x_ax - tau) + pi / 2) 430 | else: 431 | func = lambda x_ax, A, Hz, tau: A * np.sin( 432 | 2 * pi * Hz * (x_ax - tau) + pi / 2) 433 | # func = lambda x_ax, A, Hz, tau: A * np.cos(2 * pi * Hz * (x_ax - tau)) 434 | 435 | # get peaks 436 | fitted_peaks = [] 437 | for raw_peaks in [max_raw, min_raw]: 438 | peak_data = [] 439 | for peak in raw_peaks: 440 | index = peak[0] 441 | x_data = x_axis[index - points // 2: index + points // 2 + 1] 442 | y_data = y_axis[index - points // 2: index + points // 2 + 1] 443 | # get a first approximation of tau (peak position in time) 444 | tau = x_axis[index] 445 | # get a first approximation of peak amplitude 446 | A = peak[1] 447 | 448 | # build list of approximations 449 | if lock_frequency: 450 | p0 = (A, tau) 451 | else: 452 | p0 = (A, Hz, tau) 453 | 454 | # subtract offset from wave-shape 455 | y_data -= offset 456 | popt, pcov = curve_fit(func, x_data, y_data, p0) 457 | # retrieve tau and A i.e x and y value of peak 458 | x = popt[-1] 459 | y = popt[0] 460 | 461 | # create a high resolution data set for the fitted waveform 462 | x2 = np.linspace(x_data[0], x_data[-1], points * 10) 463 | y2 = func(x2, *popt) 464 | 465 | # add the offset to the results 466 | y += offset 467 | y2 += offset 468 | y_data += offset 469 | 470 | peak_data.append([x, y, [x2, y2]]) 471 | 472 | fitted_peaks.append(peak_data) 473 | 474 | # structure date for output 475 | max_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[0]) 476 | # max_fitted = map(lambda x: x[-1], fitted_peaks[0]) 477 | min_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[1]) 478 | # min_fitted = map(lambda x: x[-1], fitted_peaks[1]) 479 | 480 | return [max_peaks, min_peaks] 481 | 482 | 483 | def peakdetect_sine_locked(y_axis, x_axis, points = 31): 484 | """ 485 | Convenience function for calling the 'peakdetect_sine' function with 486 | the lock_frequency argument as True. 487 | 488 | keyword arguments: 489 | y_axis -- A list containing the signal over which to find peaks 490 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 491 | in the return to specify the position of the peaks. 492 | points -- How many points around the peak should be used during curve 493 | fitting (default: 31) 494 | 495 | return: see the function 'peakdetect_sine' 496 | """ 497 | return peakdetect_sine(y_axis, x_axis, points, True) 498 | 499 | 500 | def peakdetect_spline(y_axis, x_axis, pad_len=20): 501 | """ 502 | Performs a b-spline interpolation on the data to increase resolution and 503 | send the data to the 'peakdetect_zero_crossing' function for peak 504 | detection. 505 | 506 | Omitting the x_axis is forbidden as it would make the resulting x_axis 507 | value silly if it was returned as the index 50.234 or similar. 508 | 509 | will find the same amount of peaks as the 'peakdetect_zero_crossing' 510 | function, but might result in a more precise value of the peak. 511 | 512 | keyword arguments: 513 | y_axis -- A list containing the signal over which to find peaks 514 | 515 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 516 | in the return to specify the position of the peaks. 517 | x-axis must be equally spaced. 518 | 519 | pad_len -- By how many times the time resolution should be increased by, 520 | e.g. 1 doubles the resolution. 521 | (default: 20) 522 | 523 | 524 | return: two lists [max_peaks, min_peaks] containing the positive and 525 | negative peaks respectively. Each cell of the lists contains a tuple 526 | of: (position, peak_value) 527 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 528 | results to unpack one of the lists into x, y coordinates do: 529 | x, y = zip(*max_peaks) 530 | """ 531 | # check input data 532 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 533 | # could perform a check if x_axis is equally spaced 534 | # if np.std(np.diff(x_axis)) > 1e-15: raise ValueError 535 | # perform spline interpolations 536 | dx = x_axis[1] - x_axis[0] 537 | x_interpolated = np.linspace(x_axis.min(), x_axis.max(), len(x_axis) * (pad_len + 1)) 538 | cj = cspline1d(y_axis) 539 | y_interpolated = cspline1d_eval(cj, x_interpolated, dx=dx,x0=x_axis[0]) 540 | # get peaks 541 | max_peaks, min_peaks = peakdetect_zero_crossing(y_interpolated, x_interpolated) 542 | 543 | return [max_peaks, min_peaks] 544 | 545 | 546 | def peakdetect_zero_crossing(y_axis, x_axis = None, window = 11): 547 | """ 548 | Function for detecting local maxima and minima in a signal. 549 | Discovers peaks by dividing the signal into bins and retrieving the 550 | maximum and minimum value of each the even and odd bins respectively. 551 | Division into bins is performed by smoothing the curve and finding the 552 | zero crossings. 553 | 554 | Suitable for repeatable signals, where some noise is tolerated. Executes 555 | faster than 'peakdetect', although this function will break if the offset 556 | of the signal is too large. It should also be noted that the first and 557 | last peak will probably not be found, as this function only can find peaks 558 | between the first and last zero crossing. 559 | 560 | keyword arguments: 561 | y_axis -- A list containing the signal over which to find peaks 562 | 563 | x_axis -- A x-axis whose values correspond to the y_axis list 564 | and is used in the return to specify the position of the peaks. If 565 | omitted an index of the y_axis is used. 566 | (default: None) 567 | 568 | window -- the dimension of the smoothing window; should be an odd integer 569 | (default: 11) 570 | 571 | 572 | return: two lists [max_peaks, min_peaks] containing the positive and 573 | negative peaks respectively. Each cell of the lists contains a tuple 574 | of: (position, peak_value) 575 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 576 | results to unpack one of the lists into x, y coordinates do: 577 | x, y = zip(*max_peaks) 578 | """ 579 | # check input data 580 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 581 | 582 | zero_indices = zero_crossings(y_axis, window_len = window) 583 | period_lengths = np.diff(zero_indices) 584 | 585 | bins_y = [y_axis[index:index + diff] for index, diff in 586 | zip(zero_indices, period_lengths)] 587 | bins_x = [x_axis[index:index + diff] for index, diff in 588 | zip(zero_indices, period_lengths)] 589 | 590 | even_bins_y = bins_y[::2] 591 | odd_bins_y = bins_y[1::2] 592 | even_bins_x = bins_x[::2] 593 | odd_bins_x = bins_x[1::2] 594 | hi_peaks_x = [] 595 | lo_peaks_x = [] 596 | 597 | # check if even bin contains maxima 598 | if abs(even_bins_y[0].max()) > abs(even_bins_y[0].min()): 599 | hi_peaks = [bin.max() for bin in even_bins_y] 600 | lo_peaks = [bin.min() for bin in odd_bins_y] 601 | # get x values for peak 602 | for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, hi_peaks): 603 | hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 604 | for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, lo_peaks): 605 | lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 606 | else: 607 | hi_peaks = [bin.max() for bin in odd_bins_y] 608 | lo_peaks = [bin.min() for bin in even_bins_y] 609 | # get x values for peak 610 | for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, hi_peaks): 611 | hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 612 | for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, lo_peaks): 613 | lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 614 | 615 | max_peaks = [[x, y] for x,y in zip(hi_peaks_x, hi_peaks)] 616 | min_peaks = [[x, y] for x,y in zip(lo_peaks_x, lo_peaks)] 617 | 618 | return [max_peaks, min_peaks] 619 | 620 | 621 | def _smooth(x, window_len=11, window="hanning"): 622 | """ 623 | smooth the data using a window of the requested size. 624 | 625 | This method is based on the convolution of a scaled window on the signal. 626 | The signal is prepared by introducing reflected copies of the signal 627 | (with the window size) in both ends so that transient parts are minimized 628 | in the beginning and end part of the output signal. 629 | 630 | keyword arguments: 631 | x -- the input signal 632 | 633 | window_len -- the dimension of the smoothing window; should be an odd 634 | integer (default: 11) 635 | 636 | window -- the type of window from 'flat', 'hanning', 'hamming', 637 | 'bartlett', 'blackman', where flat is a moving average 638 | (default: 'hanning') 639 | 640 | 641 | return: the smoothed signal 642 | 643 | example: 644 | 645 | t = linspace(-2,2,0.1) 646 | x = sin(t)+randn(len(t))*0.1 647 | y = _smooth(x) 648 | 649 | see also: 650 | 651 | numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, 652 | numpy.convolve, scipy.signal.lfilter 653 | """ 654 | 655 | if x.ndim != 1: 656 | raise ValueError("smooth only accepts 1 dimension arrays.") 657 | 658 | if x.size < window_len: 659 | raise ValueError("Input vector needs to be bigger than window size.") 660 | 661 | if window_len<3: 662 | return x 663 | # declare valid windows in a dictionary 664 | window_funcs = { 665 | "flat": lambda _len: np.ones(_len, "d"), 666 | "hanning": np.hanning, 667 | "hamming": np.hamming, 668 | "bartlett": np.bartlett, 669 | "blackman": np.blackman 670 | } 671 | 672 | s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]] 673 | try: 674 | w = window_funcs[window](window_len) 675 | except KeyError: 676 | raise ValueError( 677 | "Window is not one of '{0}', '{1}', '{2}', '{3}', '{4}'".format( 678 | *window_funcs.keys())) 679 | 680 | y = np.convolve(w / w.sum(), s, mode = "valid") 681 | 682 | return y 683 | 684 | 685 | def zero_crossings(y_axis, window_len = 11, 686 | window_f="hanning", offset_corrected=False): 687 | """ 688 | Algorithm to find zero crossings. Smooths the curve and finds the 689 | zero-crossings by looking for a sign change. 690 | 691 | 692 | keyword arguments: 693 | y_axis -- A list containing the signal over which to find zero-crossings 694 | 695 | window_len -- the dimension of the smoothing window; should be an odd 696 | integer (default: 11) 697 | 698 | window_f -- the type of window from 'flat', 'hanning', 'hamming', 699 | 'bartlett', 'blackman' (default: 'hanning') 700 | 701 | offset_corrected -- Used for recursive calling to remove offset when needed 702 | 703 | 704 | return: the index for each zero-crossing 705 | """ 706 | # smooth the curve 707 | length = len(y_axis) 708 | 709 | # discard tail of smoothed signal 710 | y_axis = _smooth(y_axis, window_len, window_f)[:length] 711 | indices = np.where(np.diff(np.sign(y_axis)))[0] 712 | 713 | # check if zero-crossings are valid 714 | diff = np.diff(indices) 715 | if diff.std() / diff.mean() > 0.1: 716 | # Possibly bad zero crossing, see if it's offsets 717 | if ((diff[::2].std() / diff[::2].mean()) < 0.1 and 718 | (diff[1::2].std() / diff[1::2].mean()) < 0.1 and 719 | not offset_corrected): 720 | # offset present attempt to correct by subtracting the average 721 | offset = np.mean([y_axis.max(), y_axis.min()]) 722 | return zero_crossings(y_axis-offset, window_len, window_f, True) 723 | # Invalid zero crossings and the offset has been removed 724 | print(diff.std() / diff.mean()) 725 | print(np.diff(indices)) 726 | raise ValueError( 727 | "False zero-crossings found, indicates problem {0!s} or {1!s}".format( 728 | "with smoothing window", "unhandled problem with offset")) 729 | # check if any zero crossings were found 730 | if len(indices) < 1: 731 | raise ValueError("No zero crossings found") 732 | # remove offset from indices due to filter function when returning 733 | return indices - (window_len // 2 - 1) 734 | # used this to test the fft function's sensitivity to spectral leakage 735 | # return indices + np.asarray(30 * np.random.randn(len(indices)), int) 736 | 737 | # -- Frequency calculation -- 738 | # diff = np.diff(indices) 739 | # time_p_period = diff.mean() 740 | # 741 | # if diff.std() / time_p_period > 0.1: 742 | # raise ValueError( 743 | # "smoothing window too small, false zero-crossing found") 744 | # 745 | # #return frequency 746 | # return 1.0 / time_p_period 747 | 748 | 749 | def zero_crossings_sine_fit(y_axis, x_axis, fit_window=None, smooth_window=11): 750 | """ 751 | Detects the zero crossings of a signal by fitting a sine model function 752 | around the zero crossings: 753 | y = A * sin(2 * pi * Hz * (x - tau)) + k * x + m 754 | Only tau (the zero crossing) is varied during fitting. 755 | 756 | Offset and a linear drift of offset is accounted for by fitting a linear 757 | function the negative respective positive raw peaks of the wave-shape and 758 | the amplitude is calculated using data from the offset calculation i.e. 759 | the 'm' constant from the negative peaks is subtracted from the positive 760 | one to obtain amplitude. 761 | 762 | Frequency is calculated using the mean time between raw peaks. 763 | 764 | Algorithm seems to be sensitive to first guess e.g. a large smooth_window 765 | will give an error in the results. 766 | 767 | 768 | keyword arguments: 769 | y_axis -- A list containing the signal over which to find peaks 770 | 771 | x_axis -- A x-axis whose values correspond to the y_axis list 772 | and is used in the return to specify the position of the peaks. If 773 | omitted an index of the y_axis is used. (default: None) 774 | 775 | fit_window -- Number of points around the approximate zero crossing that 776 | should be used when fitting the sine wave. Must be small enough that 777 | no other zero crossing will be seen. If set to none then the mean 778 | distance between zero crossings will be used (default: None) 779 | 780 | smooth_window -- the dimension of the smoothing window; should be an odd 781 | integer (default: 11) 782 | 783 | 784 | return: A list containing the positions of all the zero crossings. 785 | """ 786 | 787 | # check input data 788 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 789 | # get first guess 790 | zero_indices = zero_crossings(y_axis, window_len = smooth_window) 791 | # modify fit_window to show distance per direction 792 | if fit_window: 793 | fit_window = np.diff(zero_indices).mean() // 3 794 | else: 795 | fit_window = fit_window // 2 796 | 797 | # x_axis is a np array, use the indices to get a subset with zero crossings 798 | approx_crossings = x_axis[zero_indices] 799 | 800 | # get raw peaks for calculation of offsets and frequency 801 | raw_peaks = peakdetect_zero_crossing(y_axis, x_axis) 802 | # Use mean time between peaks for frequency 803 | ext = lambda x: list(zip(*x)[0]) 804 | _diff = map(np.diff, map(ext, raw_peaks)) 805 | 806 | Hz = 1 / np.mean(map(np.mean, _diff)) 807 | # Hz = 1 / np.diff(approx_crossings).mean() #probably bad precision 808 | 809 | # offset model function 810 | offset_func = lambda x, k, m: k * x + m 811 | k = [] 812 | m = [] 813 | amplitude = [] 814 | 815 | for peaks in raw_peaks: 816 | # get peak data as nparray 817 | x_data, y_data = map(np.asarray, zip(*peaks)) 818 | # x_data = np.asarray(x_data) 819 | # y_data = np.asarray(y_data) 820 | # calc first guess 821 | A = np.mean(y_data) 822 | p0 = (0, A) 823 | popt, pcov = curve_fit(offset_func, x_data, y_data, p0) 824 | # append results 825 | k.append(popt[0]) 826 | m.append(popt[1]) 827 | amplitude.append(abs(A)) 828 | 829 | # store offset constants 830 | p_offset = (np.mean(k), np.mean(m)) 831 | A = m[0] - m[1] 832 | # define model function to fit to zero crossing 833 | # y = A * sin(2*pi * Hz * (x - tau)) + k * x + m 834 | func = lambda x, tau: A * np.sin(2 * pi * Hz * (x - tau)) + offset_func(x, *p_offset) 835 | 836 | # get true crossings 837 | true_crossings = [] 838 | for indice, crossing in zip(zero_indices, approx_crossings): 839 | p0 = (crossing, ) 840 | subset_start = max(indice - fit_window, 0.0) 841 | subset_end = min(indice + fit_window + 1, len(x_axis) - 1.0) 842 | x_subset = np.asarray(x_axis[subset_start:subset_end]) 843 | y_subset = np.asarray(y_axis[subset_start:subset_end]) 844 | # fit 845 | popt, pcov = curve_fit(func, x_subset, y_subset, p0) 846 | 847 | true_crossings.append(popt[0]) 848 | 849 | return true_crossings 850 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from setuptools import setup 4 | 5 | reqs = list() 6 | with open('requirements.txt') as file: 7 | for line in file: 8 | line = line.strip() 9 | 10 | if line != '': 11 | reqs.append(line) 12 | 13 | README = open('README.rst').read() 14 | 15 | setup( 16 | name='peakdetect', 17 | version='1.2', 18 | description='Simple peak detection', 19 | long_description=README, 20 | packages=['peakdetect'], 21 | url='https://github.com/avhn/peakdetect', 22 | author='avhn', 23 | install_requires=reqs 24 | ) 25 | -------------------------------------------------------------------------------- /stale.yml: -------------------------------------------------------------------------------- 1 | daysUntilStale: 60 2 | daysUntilClose: 7 3 | exemptLabels: 4 | - pinned 5 | - security 6 | staleLabel: stale 7 | markComment: > 8 | This issue has been automatically marked as stale because it has not had 9 | recent activity. It will be closed if no further activity occurs. Thank you 10 | for your contributions. 11 | closeComment: false 12 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import unittest 5 | 6 | import peakdetect 7 | import waveform 8 | 9 | # generate time axis for 5 cycles @ 50 Hz 10 | linspace_standard = np.linspace(0, 0.10, 1000) 11 | linspace_peakdetect = np.linspace(0, 0.10, 10000) 12 | 13 | 14 | def prng(): 15 | """ 16 | A numpy random number generator with a known starting state 17 | 18 | return: a random number generator 19 | """ 20 | return np.random.RandomState(773889874) 21 | 22 | 23 | def _write_log(file, header, message): 24 | with open(file, "ab") as f: 25 | f.write(header) 26 | f.write("\n") 27 | f.writelines(message) 28 | f.write("\n") 29 | f.write("\n") 30 | 31 | 32 | def _calculate_missmatch(received, expected): 33 | """ 34 | Calculates the mean mismatch between received and expected data 35 | 36 | keyword arguments: 37 | received -- [[time of peak], [ampl of peak]] 38 | expected -- [[time of peak], [ampl of peak]] 39 | 40 | return (time mismatch, ampl mismatch) 41 | """ 42 | #t_diff = np.abs(np.asarray(received[0]) - expected[0]) 43 | t_diff = np.asarray(received[0]) - expected[0] 44 | a_diff = np.abs(np.asarray(received[1]) - expected[1]) 45 | 46 | 47 | #t_diff /= np.abs(expected[0]) time error in absolute terms 48 | a_diff /= np.abs(expected[1]) 49 | 50 | return (t_diff, a_diff) 51 | 52 | 53 | def _log_diff(t_max, y_max, 54 | t_min, y_min, 55 | t_max_expected, y_max_expected, 56 | t_min_expected, y_min_expected, 57 | file, name 58 | ): 59 | """ 60 | keyword arguments: 61 | t_max -- time of maxima 62 | y_max -- amplitude of maxima 63 | t_min -- time of minima 64 | y_min -- amplitude of maxima 65 | t_max_expected -- expected time of maxima 66 | y_max_expected -- expected amplitude of maxima 67 | t_min_expected -- expected time of minima 68 | y_min_expected -- expected amplitude of maxima 69 | file -- log file to write to 70 | name -- name of the test performed 71 | """ 72 | t_diff_h, a_diff_h = _calculate_missmatch([t_max, y_max], 73 | [t_max_expected, y_max_expected]) 74 | 75 | 76 | t_diff_l, a_diff_l = _calculate_missmatch([t_min, y_min], 77 | [t_min_expected, y_min_expected]) 78 | 79 | #data = ["\t{0:.2e}\t{1:.2e}\t{2:.2e}\t{3:.2e}".format(*d) for d in 80 | # [t_diff_h, t_diff_l, a_diff_h, a_diff_l] 81 | # ] 82 | frt = "val:{0} error:{1:.2e}" 83 | data = ["\t{0}".format("\t".join(map(frt.format, val, err))) for val, err in 84 | [(t_max, t_diff_h), 85 | (t_min, t_diff_l), 86 | (y_max, a_diff_h), 87 | (y_min, a_diff_l)] 88 | ] 89 | 90 | _write_log(file, name, "\n".join(data)) 91 | 92 | 93 | def _is_close(max_p, min_p, 94 | expected_max, expected_min, 95 | atol_time, tol_ampl, 96 | file, name): 97 | """ 98 | Determines if the peaks are within the given tolerance 99 | 100 | keyword arguments: 101 | max_p -- location and value of maxima 102 | min_p -- location and value of minima 103 | expected_max -- expected location and value of maxima 104 | expected_min -- expected location and value of minima 105 | atol_time -- absolute tolerance of location of vertex 106 | tol_ampl -- relative tolerance of value of vertex 107 | file -- log file to write to 108 | name -- name of the test performed 109 | """ 110 | if len(max_p) == 5: 111 | t_max_expected, y_max_expected = zip(*expected_max) 112 | else: 113 | if abs(max_p[0][0] - expected_max[0][0]) > 0.001: 114 | t_max_expected, y_max_expected = zip(*expected_max[1:]) 115 | else: 116 | t_max_expected, y_max_expected = zip(*expected_max[:-1]) 117 | 118 | if len(min_p) == 5: 119 | t_min_expected, y_min_expected = zip(*expected_min) 120 | else: 121 | t_min_expected, y_min_expected = zip(*expected_min[:-1]) 122 | 123 | t_max, y_max = zip(*max_p) 124 | t_min, y_min = zip(*min_p) 125 | 126 | t_max_close = np.isclose(t_max, t_max_expected, atol=atol_time, rtol=1e-12) 127 | y_max_close = np.isclose(y_max, y_max_expected, tol_ampl) 128 | t_min_close = np.isclose(t_min, t_min_expected, atol=atol_time, rtol=1e-12) 129 | y_min_close = np.isclose(y_min, y_min_expected, tol_ampl) 130 | 131 | _log_diff(t_max, y_max, t_min, y_min, 132 | t_max_expected, y_max_expected, 133 | t_min_expected, y_min_expected, 134 | file, name) 135 | 136 | return t_max_close, y_max_close, t_min_close, y_min_close 137 | 138 | 139 | class Test_analytic_wfm(unittest.TestCase): 140 | def test_ACV1(self): 141 | # compare with previous lambda implementation 142 | old = waveform._ACV_A1_L(linspace_standard) 143 | acv = waveform.ACV_A1(linspace_standard) 144 | 145 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 146 | 147 | def test_ACV2(self): 148 | # compare with previous lambda implementation 149 | old = waveform._ACV_A2_L(linspace_standard) 150 | acv = waveform.ACV_A2(linspace_standard) 151 | 152 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 153 | 154 | def test_ACV3(self): 155 | # compare with previous lambda implementation 156 | old = waveform._ACV_A3_L(linspace_standard) 157 | acv = waveform.ACV_A3(linspace_standard) 158 | 159 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 160 | 161 | def test_ACV4(self): 162 | # compare with previous lambda implementation 163 | old = waveform._ACV_A4_L(linspace_standard) 164 | acv = waveform.ACV_A4(linspace_standard) 165 | 166 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 167 | 168 | def test_ACV5(self): 169 | # compare with previous lambda implementation 170 | old = waveform._ACV_A5_L(linspace_standard) 171 | acv = waveform.ACV_A5(linspace_standard) 172 | 173 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 174 | 175 | def test_ACV6(self): 176 | # compare with previous lambda implementation 177 | old = waveform._ACV_A6_L(linspace_standard) 178 | acv = waveform.ACV_A6(linspace_standard) 179 | 180 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 181 | 182 | def test_ACV7(self): 183 | num = np.linspace(0, 20, 1000) 184 | old = waveform._ACV_A7_OLD(num) 185 | acv = waveform.ACV_A7(num) 186 | 187 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 188 | 189 | def test_ACV8(self): 190 | num = np.linspace(0, 3150, 10000) 191 | old = waveform._ACV_A8_OLD(num) 192 | acv = waveform.ACV_A8(num) 193 | 194 | self.assertTrue(np.allclose(acv, old, rtol=1e-9)) 195 | 196 | 197 | class TestPeakdetectTemplate(unittest.TestCase): 198 | func = None 199 | file = "Mismatch data.txt" 200 | name = "template" 201 | args = [] 202 | kwargs = {} 203 | msg_t = "Time of {0!s} not within tolerance:\n\t{1}" 204 | msg_y = "Amplitude of {0!s} not within tolerance:\n\t{1}" 205 | 206 | def _test_peak_template(self, waveform, 207 | expected_max, expected_min, 208 | wav_name, 209 | atol_time = 1e-5, tol_ampl = 1e-5): 210 | 211 | """ 212 | keyword arguments: 213 | waveform -- a function that given x can generate a test waveform 214 | expected_max -- position and amplitude where maxima are expected 215 | expected_min -- position and amplitude where minima are expected 216 | wav_name -- Name of the test waveform 217 | atol_time -- absolute tolerance for position of vertex (default: 1e-5) 218 | tol_ampl -- relative tolerance for position of vertex (default: 1e-5) 219 | """ 220 | 221 | y = waveform(linspace_peakdetect) 222 | max_p, min_p = self.func(y, linspace_peakdetect, 223 | *self.args, **self.kwargs 224 | ) 225 | # check if the correct amount of peaks were discovered 226 | self.assertIn(len(max_p), [4,5]) 227 | self.assertIn(len(min_p), [4,5]) 228 | 229 | # 230 | # check if position and amplitude is within 0.001% which is approx the 231 | # numeric uncertainty from the amount of samples used 232 | # 233 | t_max_close, y_max_close, t_min_close, y_min_close = _is_close(max_p, 234 | min_p, 235 | expected_max, 236 | expected_min, 237 | atol_time, tol_ampl, 238 | self.file, "{0}: {1}".format(wav_name, self.name)) 239 | 240 | # assert if values are outside of tolerance 241 | self.assertTrue(np.all(t_max_close), 242 | msg=self.msg_t.format("maxima", t_max_close)) 243 | self.assertTrue(np.all(y_max_close), 244 | msg=self.msg_y.format("maxima", y_max_close)) 245 | self.assertTrue(np.all(t_min_close), 246 | msg=self.msg_t.format("minima", t_min_close)) 247 | self.assertTrue(np.all(y_min_close), 248 | msg=self.msg_y.format("minima", y_min_close)) 249 | 250 | def test_peak_ACV1(self): 251 | peak_pos = 1000*np.sqrt(2) # 1414.2135623730951 252 | peak_neg = -peak_pos 253 | expected_max = [ 254 | (0.005, peak_pos), 255 | (0.025, peak_pos), 256 | (0.045, peak_pos), 257 | (0.065, peak_pos), 258 | (0.085, peak_pos) 259 | ] 260 | expected_min = [ 261 | (0.015, peak_neg), 262 | (0.035, peak_neg), 263 | (0.055, peak_neg), 264 | (0.075, peak_neg), 265 | (0.095, peak_neg) 266 | ] 267 | atol_time = 1e-5 268 | tol_ampl = 1e-6 269 | 270 | self._test_peak_template(waveform.ACV_A1, 271 | expected_max, expected_min, 272 | "ACV1", 273 | atol_time, tol_ampl) 274 | 275 | def test_peak_ACV2(self): 276 | peak_pos = 1000*np.sqrt(2) + 500 # 1414.2135623730951 + 500 277 | peak_neg = (-1000*np.sqrt(2)) + 500 # -914.2135623730951 278 | expected_max = [ 279 | (0.005, peak_pos), 280 | (0.025, peak_pos), 281 | (0.045, peak_pos), 282 | (0.065, peak_pos), 283 | (0.085, peak_pos) 284 | ] 285 | expected_min = [ 286 | (0.015, peak_neg), 287 | (0.035, peak_neg), 288 | (0.055, peak_neg), 289 | (0.075, peak_neg), 290 | (0.095, peak_neg) 291 | ] 292 | atol_time = 1e-5 293 | tol_ampl = 2e-6 294 | 295 | self._test_peak_template(waveform.ACV_A2, 296 | expected_max, expected_min, 297 | "ACV2", 298 | atol_time, tol_ampl) 299 | 300 | def test_peak_ACV3(self): 301 | """ 302 | Sine wave with a 3rd overtone 303 | 304 | WolframAlpha solution 305 | 306 | max{y = sin(100 pi x)+0.05 sin(400 pi x+(2 pi)/3)}~~ 307 | sin(6.28319 n+1.51306)-0.05 sin(25.1327 n+5.00505) 308 | at x~~0.00481623+0.02 n for integer n 309 | 310 | min{y = sin(100 pi x)+0.05 sin(400 pi x+(2 pi)/3)}~~ 311 | 0.05 sin(6.55488-25.1327 n)-sin(1.37692-6.28319 n) 312 | at x~~-0.00438287+0.02 n for integer n 313 | 314 | Derivative for 50 Hz in 2 alternative forms 315 | y = 100pi*cos(100pi*x) - 25pi*cos(400pi*x)-0.3464*50*pi*sin(400pi*x) 316 | y = 100pi*cos(100pi*x) + 20pi*cos(400pi*x + 2*pi/3) 317 | 318 | root 0 = 1/(50 * pi) * (pi*0 - 0.68846026579266880983) 319 | The exact solution according to WolframAlpha - I haven't the foggiest 320 | (tan^(-1)(root of 321 | {#1^2-3&, 11 #2^8-8 #1 #2^7-8 #2^6+56 #1 #2^5+70 #2^4-56 #1 #2^3-48 #2^2+8 #1 #2-9&}(x) 322 | near x = -0.822751)+pi n) / (50 * pi) 323 | 324 | 325 | root 1 = 1/(50 * pi) * (pi*0 + 0.75653155241276430710) 326 | 327 | period = 0.02 328 | """ 329 | 330 | base = 1000*np.sqrt(2) 331 | 332 | # def peak_pos(n): 333 | # return base * (np.sin(6.28319 * n + 1.51306) 334 | # -0.05*np.sin(25.1327 * n + 5.00505)) 335 | # def peak_neg(n): 336 | # return base * (0.05 * np.sin(6.55488 - 25.1327 * n) 337 | # - np.sin(1.37692 - 6.28319 * n)) 338 | 339 | def peak_pos(n): 340 | return base * (np.sin(2*np.pi * n + 1.51306) 341 | -0.05*np.sin(8*np.pi * n + 5.00505)) 342 | 343 | def peak_neg(n): 344 | return base * (0.05 * np.sin(6.55488 - 8*np.pi * n) 345 | - np.sin(1.37692 - 2*np.pi * n)) 346 | t_max = [ 347 | 0.75653155241276430710/(50*np.pi)+0.00,#0.004816229446859069 348 | 0.75653155241276430710/(50*np.pi)+0.02,#0.024816229446859069 349 | 0.75653155241276430710/(50*np.pi)+0.04,#0.044816229446859069 350 | 0.75653155241276430710/(50*np.pi)+0.06,#0.064816229446859069 351 | 0.75653155241276430710/(50*np.pi)+0.08 #0.084816229446859069 352 | ] 353 | t_min = [ 354 | -0.68846026579266880983/(50*np.pi)+0.02,#0.015617125823069466 355 | -0.68846026579266880983/(50*np.pi)+0.04,#0.035617125823069466 356 | -0.68846026579266880983/(50*np.pi)+0.06,#0.055617125823069466 357 | -0.68846026579266880983/(50*np.pi)+0.08,#0.075617125823069466 358 | -0.68846026579266880983/(50*np.pi)+0.10 #0.095617125823069466 359 | ] 360 | 361 | expected_max = [ 362 | (t_max[0], waveform.ACV_A3(t_max[0])), 363 | (t_max[1], waveform.ACV_A3(t_max[1])), 364 | (t_max[2], waveform.ACV_A3(t_max[2])), 365 | (t_max[3], waveform.ACV_A3(t_max[3])), 366 | (t_max[4], waveform.ACV_A3(t_max[4])), 367 | ] 368 | expected_min = [ 369 | (t_min[0], waveform.ACV_A3(t_min[0])), 370 | (t_min[1], waveform.ACV_A3(t_min[1])), 371 | (t_min[2], waveform.ACV_A3(t_min[2])), 372 | (t_min[3], waveform.ACV_A3(t_min[3])), 373 | (t_min[4], waveform.ACV_A3(t_min[4])), 374 | ] 375 | 376 | atol_time = 1e-5 377 | tol_ampl = 2e-6 378 | # reduced tolerance since the expected values are only approximated 379 | 380 | self._test_peak_template(waveform.ACV_A3, 381 | expected_max, expected_min, 382 | "ACV3", 383 | atol_time, tol_ampl) 384 | 385 | def test_peak_ACV4(self): 386 | """ 387 | Sine wave with a 4th overtone 388 | 389 | Expected data is from a numerical solution using 1e8 samples 390 | The numerical solution used about 2 GB memory and required 64-bit 391 | python 392 | 393 | Test is currently disabled as it pushes time index forward enough to 394 | change what peaks are discovers by peakdetect_fft, such that the last 395 | maxima is lost instead of the first one, which is expected from all the 396 | other functions 397 | """ 398 | expected_max = [ 399 | (0.0059351920593519207, 1409.2119572886963), 400 | (0.025935191259351911, 1409.2119572887088), 401 | (0.045935191459351918, 1409.2119572887223), 402 | (0.065935191659351911, 1409.2119572887243), 403 | (0.085935191859351917, 1409.2119572887166) 404 | ] 405 | expected_min = [ 406 | (0.015935191159351911, -1409.2119572886984), 407 | (0.035935191359351915, -1409.2119572887166), 408 | (0.055935191559351914, -1409.2119572887245), 409 | (0.075935191759351914, -1409.2119572887223), 410 | (0.09593519195935192, -1409.2119572887068) 411 | ] 412 | atol_time = 1e-5 413 | tol_ampl = 2.5e-6 414 | # reduced tolerance since the expected values are only approximated 415 | 416 | self._test_peak_template(waveform.ACV_A4, 417 | expected_max, expected_min, 418 | "ACV4", 419 | atol_time, tol_ampl) 420 | 421 | def test_peak_ACV5(self): 422 | """ 423 | Realistic triangle wave 424 | 425 | Easy enough to solve, but here is the numerical solution from 1e8 426 | samples. Numerical solution used about 2 GB memory and required 427 | 64-bit python 428 | 429 | expected_max = [ 430 | [0.0050000000500000008, 1598.0613254815967] 431 | [0.025000000250000001, 1598.0613254815778], 432 | [0.045000000450000008, 1598.0613254815346], 433 | [0.064999999650000001, 1598.0613254815594], 434 | [0.084999999849999994, 1598.0613254815908] 435 | ] 436 | expected_min = [ 437 | [0.015000000150000001, -1598.0613254815908], 438 | [0.035000000350000005, -1598.0613254815594], 439 | [0.054999999549999998, -1598.0613254815346], 440 | [0.074999999750000004, -1598.0613254815778], 441 | [0.094999999949999997, -1598.0613254815967] 442 | ] 443 | """ 444 | 445 | peak_pos = 1130*np.sqrt(2) # 1598.0613254815976 446 | peak_neg = -1130*np.sqrt(2) # -1598.0613254815967 447 | expected_max = [ 448 | (0.005, peak_pos), 449 | (0.025, peak_pos), 450 | (0.045, peak_pos), 451 | (0.065, peak_pos), 452 | (0.085, peak_pos) 453 | ] 454 | expected_min = [ 455 | (0.015, peak_neg), 456 | (0.035, peak_neg), 457 | (0.055, peak_neg), 458 | (0.075, peak_neg), 459 | (0.095, peak_neg) 460 | ] 461 | atol_time = 1e-5 462 | tol_ampl = 4e-6 463 | 464 | self._test_peak_template(waveform.ACV_A5, 465 | expected_max, expected_min, 466 | "ACV5", 467 | atol_time, tol_ampl) 468 | 469 | def test_peak_ACV6(self): 470 | """ 471 | Realistic triangle wave 472 | 473 | Easy enough to solve, but here is the numerical solution from 1e8 474 | samples. Numerical solution used about 2 GB memory and required 475 | 64-bit python 476 | 477 | expected_max = [ 478 | [0.0050000000500000008, 1485.6313472729362], 479 | [0.025000000250000001, 1485.6313472729255], 480 | [0.045000000450000008, 1485.6313472729012], 481 | [0.064999999650000001, 1485.6313472729153], 482 | [0.084999999849999994, 1485.6313472729323] 483 | ] 484 | expected_min = [ 485 | [0.015000000150000001, -1485.6313472729323], 486 | [0.035000000350000005, -1485.6313472729153], 487 | [0.054999999549999998, -1485.6313472729012], 488 | [0.074999999750000004, -1485.6313472729255], 489 | [0.094999999949999997, -1485.6313472729362] 490 | ] 491 | """ 492 | peak_pos = 1050.5*np.sqrt(2) # 1485.6313472729364 493 | peak_neg = -1050.5*np.sqrt(2) # 1485.6313472729255 494 | expected_max = [ 495 | (0.005, peak_pos), 496 | (0.025, peak_pos), 497 | (0.045, peak_pos), 498 | (0.065, peak_pos), 499 | (0.085, peak_pos) 500 | ] 501 | expected_min = [ 502 | (0.015, peak_neg), 503 | (0.035, peak_neg), 504 | (0.055, peak_neg), 505 | (0.075, peak_neg), 506 | (0.095, peak_neg) 507 | ] 508 | atol_time = 1e-5 509 | tol_ampl = 2.5e-6 510 | 511 | self._test_peak_template(waveform.ACV_A6, 512 | expected_max, expected_min, 513 | "ACV6", 514 | atol_time, tol_ampl) 515 | 516 | 517 | class Test_peakdetect(TestPeakdetectTemplate): 518 | name = "peakdetect" 519 | 520 | def __init__(self, *args, **kwargs): 521 | super(Test_peakdetect, self).__init__(*args, **kwargs) 522 | self.func = peakdetect.peakdetect 523 | 524 | 525 | class Test_peakdetect_fft(TestPeakdetectTemplate): 526 | name = "peakdetect_fft" 527 | 528 | def __init__(self, *args, **kwargs): 529 | super(Test_peakdetect_fft, self).__init__(*args, **kwargs) 530 | self.func = peakdetect.peakdetect_fft 531 | 532 | 533 | class Test_peakdetect_parabola(TestPeakdetectTemplate): 534 | name = "peakdetect_parabola" 535 | 536 | def __init__(self, *args, **kwargs): 537 | super(Test_peakdetect_parabola, self).__init__(*args, **kwargs) 538 | self.func = peakdetect.peakdetect_parabola 539 | 540 | 541 | class Test_peakdetect_sine(TestPeakdetectTemplate): 542 | name = "peakdetect_sine" 543 | 544 | def __init__(self, *args, **kwargs): 545 | super(Test_peakdetect_sine, self).__init__(*args, **kwargs) 546 | self.func = peakdetect.peakdetect_sine 547 | 548 | 549 | class Test_peakdetect_sine_locked(TestPeakdetectTemplate): 550 | name = "peakdetect_sine_locked" 551 | 552 | def __init__(self, *args, **kwargs): 553 | super(Test_peakdetect_sine_locked, self).__init__(*args, **kwargs) 554 | self.func = peakdetect.peakdetect_sine_locked 555 | 556 | 557 | class Test_peakdetect_spline(TestPeakdetectTemplate): 558 | name = "peakdetect_spline" 559 | 560 | def __init__(self, *args, **kwargs): 561 | super(Test_peakdetect_spline, self).__init__(*args, **kwargs) 562 | self.func = peakdetect.peakdetect_spline 563 | 564 | 565 | class Test_peakdetect_zero_crossing(TestPeakdetectTemplate): 566 | name = "peakdetect_zero_crossing" 567 | 568 | def __init__(self, *args, **kwargs): 569 | super(Test_peakdetect_zero_crossing, self).__init__(*args, **kwargs) 570 | self.func = peakdetect.peakdetect_zero_crossing 571 | 572 | 573 | class Test_peakdetect_misc(unittest.TestCase): 574 | def test__pad(self): 575 | data = [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1] 576 | pad_len = 2 577 | pad = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:] 578 | expected = pad(list(data), 2 ** 579 | peakdetect._n(len(data) * pad_len) - len(data)) 580 | received = peakdetect._pad(data, pad_len) 581 | 582 | self.assertListEqual(received, expected) 583 | def test__n(self): 584 | self.assertEqual(2**peakdetect._n(1000), 1024) 585 | 586 | def test_zero_crossings(self): 587 | y = waveform.ACV_A1(linspace_peakdetect) 588 | expected_indice = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000] 589 | indice = peakdetect.zero_crossings(y, 50) 590 | msg = "index:{0:d} should be within 1 of expected:{1:d}" 591 | for rec, exp in zip(indice, expected_indice): 592 | self.assertAlmostEqual(rec, exp, delta=1, msg=msg.format(rec, exp)) 593 | 594 | 595 | if __name__ == "__main__": 596 | tests_to_run = [ 597 | # Test_analytic_wfm, 598 | Test_peakdetect, 599 | Test_peakdetect_parabola, 600 | Test_peakdetect_fft, 601 | # Test_peakdetect_sine, #sine tests disabled pending rework 602 | # Test_peakdetect_sine_locked, 603 | Test_peakdetect_spline, 604 | Test_peakdetect_zero_crossing, 605 | Test_peakdetect_misc 606 | ] 607 | 608 | suites_list = [unittest.TestLoader().loadTestsFromTestCase(test_class) for test_class in tests_to_run] 609 | big_suite = unittest.TestSuite(suites_list) 610 | unittest.TextTestRunner(verbosity=2).run(big_suite) -------------------------------------------------------------------------------- /waveform.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from math import pi, sqrt 4 | import numpy as np 5 | 6 | __all__ = [ 7 | 'ACV_A1', 8 | 'ACV_A2', 9 | 'ACV_A3', 10 | 'ACV_A4', 11 | 'ACV_A5', 12 | 'ACV_A6', 13 | 'ACV_A7', 14 | 'ACV_A8' 15 | ] 16 | 17 | # Heavy-side step function 18 | H_num = lambda t: 1 if t > 0 else 0 19 | H = lambda T: np.asarray([1 if t > 0 else 0 for t in T]) 20 | 21 | 22 | # pure sine 23 | def ACV_A1(T, Hz=50): 24 | """ 25 | Generate a pure sine wave at a specified frequency 26 | 27 | keyword arguments: 28 | T -- time points to generate the waveform given in seconds 29 | Hz -- The desired frequency of the signal (default:50) 30 | """ 31 | ampl = 1000 32 | T = np.asarray(T, dtype=np.float64) 33 | return ampl * sqrt(2) * np.sin(2*pi*Hz * T) 34 | 35 | 36 | def ACV_A2(T, Hz=50): 37 | """ 38 | Generate a pure sine wave with a DC offset at a specified frequency 39 | 40 | keyword arguments: 41 | T -- time points to generate the waveform given in seconds 42 | Hz -- The desired frequency of the signal (default:50) 43 | """ 44 | ampl = 1000 45 | offset = 500 46 | T = np.asarray(T, dtype=np.float64) 47 | return ampl * sqrt(2) * np.sin(2*pi*Hz * T) + offset 48 | 49 | 50 | def ACV_A3(T, Hz=50): 51 | """ 52 | Generate a fundamental with a 3rd overtone 53 | 54 | keyword arguments: 55 | T -- time points to generate the waveform given in seconds 56 | Hz -- The desired frequency of the signal (default:50) 57 | """ 58 | ampl = 1000 59 | T = np.asarray(T, dtype=np.float64) 60 | main_wave = np.sin(2*pi*Hz * T) 61 | harmonic_wave = 0.05 * np.sin(2*pi*Hz * T * 4 + pi * 2 / 3) 62 | return ampl * sqrt(2) * (main_wave + harmonic_wave) 63 | 64 | 65 | def ACV_A4(T, Hz=50): 66 | """ 67 | Generate a fundamental with a 4th overtone 68 | 69 | keyword arguments: 70 | T -- time points to generate the waveform given in seconds 71 | Hz -- The desired frequency of the signal (default:50) 72 | """ 73 | ampl = 1000 74 | T = np.asarray(T, dtype=np.float64) 75 | main_wave = np.sin(2*pi*Hz * T) 76 | harmonic_wave = 0.07 * np.sin(2*pi*Hz * T * 5 + pi * 22 / 18) 77 | return ampl * sqrt(2) * (main_wave + harmonic_wave) 78 | 79 | 80 | def ACV_A5(T, Hz=50): 81 | """ 82 | Generate a realistic triangle wave 83 | 84 | keyword arguments: 85 | T -- time points to generate the waveform given in seconds 86 | Hz -- The desired frequency of the signal (default:50) 87 | """ 88 | ampl = 1000 89 | T = np.asarray(T, dtype=np.float64) 90 | wave_1 = np.sin(2*pi*Hz * T) 91 | wave_2 = 0.05 * np.sin(2*pi*Hz * T * 3 - pi) 92 | wave_3 = 0.05 * np.sin(2*pi*Hz * T * 5) 93 | wave_4 = 0.02 * np.sin(2*pi*Hz * T * 7 - pi) 94 | wave_5 = 0.01 * np.sin(2*pi*Hz * T * 9) 95 | return ampl * sqrt(2) * (wave_1 + wave_2 + wave_3 + wave_4 + wave_5) 96 | 97 | 98 | def ACV_A6(T, Hz=50): 99 | """ 100 | Generate a realistic triangle wave 101 | 102 | keyword arguments: 103 | T -- time points to generate the waveform given in seconds 104 | Hz -- The desired frequency of the signal (default:50) 105 | """ 106 | ampl = 1000 107 | T = np.asarray(T, dtype=np.float64) 108 | wave_1 = np.sin(2*pi*Hz * T) 109 | wave_2 = 0.02 * np.sin(2*pi*Hz * T * 3 - pi) 110 | wave_3 = 0.02 * np.sin(2*pi*Hz * T * 5) 111 | wave_4 = 0.0015 * np.sin(2*pi*Hz * T * 7 - pi) 112 | wave_5 = 0.009 * np.sin(2*pi*Hz * T * 9) 113 | return ampl * sqrt(2) * (wave_1 + wave_2 + wave_3 + wave_4 + wave_5) 114 | 115 | 116 | def ACV_A7(T, Hz=50): 117 | """ 118 | Generate a growing sine wave, where the wave starts at 0 and reaches 0.9 of 119 | full amplitude at 250 cycles. Thereafter it will linearly increase to full 120 | amplitude at 500 cycles and terminate to 0 121 | 122 | Frequency locked to 50Hz and = 0 at t>10 123 | 124 | keyword arguments: 125 | T -- time points to generate the waveform given in seconds 126 | Hz -- The desired frequency of the signal (default:50) 127 | """ 128 | ampl = 1000 129 | Hz = 50 130 | T = np.asarray(T, dtype=np.float64) 131 | wave_main = np.sin(2*pi*Hz * T) 132 | step_func = (0.9 * T / 5 * H(5-T) + H(T-5) * H(10-T) * (0.9 + 0.1 * (T-5) / 5)) 133 | return ampl * sqrt(2) * wave_main * step_func 134 | 135 | def ACV_A8(T, Hz=50): 136 | """ 137 | Generate a growing sine wave, which reaches 100 times the amplitude at 138 | 500 cycles 139 | 140 | frequency not implemented and signal = 0 at t>1000*pi 141 | signal frequency = 0.15915494309189535 Hz? 142 | 143 | keyword arguments: 144 | T -- time points to generate the waveform given in seconds 145 | Hz -- The desired frequency of the signal (default:50) 146 | """ 147 | ampl = 1000 148 | Hz = 50 149 | T = np.asarray(T, dtype=np.float64) 150 | wave_main = np.sin(T) 151 | step_func = T / (10 * pi) * H(10 - T / (2*pi*Hz)) 152 | return ampl * sqrt(2) * wave_main * step_func 153 | 154 | 155 | _ACV_A1_L = lambda T, Hz = 50: 1000 * sqrt(2) * np.sin(2*pi*Hz * T) 156 | 157 | _ACV_A2_L = lambda T, Hz = 50: 1000 * sqrt(2) * np.sin(2*pi*Hz * T) + 500 158 | 159 | _ACV_A3_L = lambda T, Hz = 50: 1000 * sqrt(2) * (np.sin(2*pi*Hz * T) + 160 | 0.05 * np.sin(2*pi*Hz * T * 4 + pi * 2 / 3)) 161 | 162 | _ACV_A4_L = lambda T, Hz = 50:( 1000 * sqrt(2) * (np.sin(2*pi*Hz * T) + 163 | 0.07 * np.sin(2*pi*Hz * T * 5 + pi * 22 / 18))) 164 | 165 | # Realistic triangle 166 | _ACV_A5_L = lambda T, Hz = 50:( 1000 * sqrt(2) * (np.sin(2*pi*Hz * T) + 167 | 0.05 * np.sin(2*pi*Hz * T * 3 - pi) + 168 | 0.05 * np.sin(2*pi*Hz * T * 5) + 169 | 0.02 * np.sin(2*pi*Hz * T * 7 - pi) + 170 | 0.01 * np.sin(2*pi*Hz * T * 9))) 171 | 172 | _ACV_A6_L = lambda T, Hz = 50:( 1000 * sqrt(2) * (np.sin(2*pi*Hz * T) + 173 | 0.02 * np.sin(2*pi*Hz * T * 3 - pi) + 174 | 0.02 * np.sin(2*pi*Hz * T * 5) + 175 | 0.0015 * np.sin(2*pi*Hz * T * 7 - pi) + 176 | 0.009 * np.sin(2*pi*Hz * T * 9))) 177 | 178 | # A7 & A8 convert so that a input of 16*pi corresponds to a input 0.25 in the current version 179 | _ACV_A7_OLD = lambda T: [1000 * sqrt(2) * np.sin(100 * pi * t) * 180 | (0.9 * t / 5 * H_num(5-t) + H_num(t-5) * H_num(10-t) * (0.9 + 0.1 * (t-5) / 5)) for t in T] 181 | _ACV_A8_OLD = lambda T: [1000 * sqrt(2) * np.sin(t) * 182 | t / (10 * pi) * H_num(10 - t / (100 * pi)) for t in T] 183 | --------------------------------------------------------------------------------