├── README.md ├── VMD.py └── VMD_test.py /README.md: -------------------------------------------------------------------------------- 1 | # VMD_python 2 | 3 | Variational Mode Decomposition for Python 4 | 5 | This is python realization for Variatioanl Mode Decomposition 6 | 7 | Authors: Konstantin Dragomiretskiy and Dominique Zosso 8 | 9 | ## Input and Parameters: 10 | 11 | --------------------- 12 | signal - the time domain signal (1D) to be decomposed 13 | 14 | alpha - the balancing parameter of the data-fidelity constraint 15 | 16 | tau - time-step of the dual ascent ( pick 0 for noise-slack ) 17 | 18 | K - the number of modes to be recovered 19 | 20 | DC - true if the first mode is put and kept at DC (0-freq) 21 | 22 | init - 0 = all omegas start at 0 23 | 24 | - 1 = all omegas start uniformly distributed 25 | 26 | - 2 = all omegas initialized randomly 27 | 28 | tol - tolerance of convergence criterion; typically around 1e-6 29 | 30 | 31 | ## Output: 32 | ------- 33 | u - the collection of decomposed modes 34 | 35 | u_hat - spectra of the modes 36 | 37 | omega - estimated mode center-frequencies 38 | 39 | ## When using this code, please do cite the paper: 40 | ----------------------------------------------- 41 | 42 | K. Dragomiretskiy, D. Zosso, Variational Mode Decomposition, IEEE Trans. on Signal Processing (in press) 43 | please check here for update reference: 44 | http://dx.doi.org/10.1109/TSP.2013.2288675 45 | -------------------------------------------------------------------------------- /VMD.py: -------------------------------------------------------------------------------- 1 | def VMD(signal, alpha, tau, K, DC, init, tol): 2 | # --------------------- 3 | # signal - the time domain signal (1D) to be decomposed 4 | # alpha - the balancing parameter of the data-fidelity constraint 5 | # tau - time-step of the dual ascent ( pick 0 for noise-slack ) 6 | # K - the number of modes to be recovered 7 | # DC - true if the first mode is put and kept at DC (0-freq) 8 | # init - 0 = all omegas start at 0 9 | # 1 = all omegas start uniformly distributed 10 | # 2 = all omegas initialized randomly 11 | # tol - tolerance of convergence criterion; typically around 1e-6 12 | # 13 | # Output: 14 | # ------- 15 | # u - the collection of decomposed modes 16 | # u_hat - spectra of the modes 17 | # omega - estimated mode center-frequencies 18 | # 19 | 20 | import numpy as np 21 | import math 22 | import matplotlib.pyplot as plt 23 | # Period and sampling frequency of input signal 24 | save_T=len(signal) 25 | fs=1/float(save_T) 26 | 27 | # extend the signal by mirroring 28 | T=save_T 29 | # print(T) 30 | f_mirror=np.zeros(2*T) 31 | #print(f_mirror) 32 | f_mirror[0:T//2]=signal[T//2-1::-1] 33 | # print(f_mirror) 34 | f_mirror[T//2:3*T//2]= signal 35 | # print(f_mirror) 36 | f_mirror[3*T//2:2*T]=signal[-1:-T//2-1:-1] 37 | # print(f_mirror) 38 | f=f_mirror 39 | # print('f_mirror') 40 | # print(f_mirror) 41 | print('-------') 42 | 43 | # Time Domain 0 to T (of mirrored signal) 44 | T=float(len(f)) 45 | # print(T) 46 | t=np.linspace(1/float(T),1,int(T),endpoint=True) 47 | # print(t) 48 | 49 | # Spectral Domain discretization 50 | freqs=t-0.5-1/T 51 | # print(freqs) 52 | # print('-----') 53 | # Maximum number of iterations (if not converged yet, then it won't anyway) 54 | N=500 55 | 56 | # For future generalizations: individual alpha for each mode 57 | Alpha=alpha*np.ones(K,dtype=complex) 58 | # print(Alpha.shape) 59 | # print(Alpha) 60 | # print('-----') 61 | 62 | # Construct and center f_hat 63 | f_hat=np.fft.fftshift(np.fft.fft(f)) 64 | # print('f_hat') 65 | # print(f_hat.shape) 66 | # print(f_hat) 67 | # print('-----') 68 | f_hat_plus=f_hat 69 | f_hat_plus[0:int(int(T)/2)]=0 70 | # print('f_hat_plus') 71 | # print(f_hat_plus.shape) 72 | # print(f_hat_plus) 73 | # print('-----') 74 | # matrix keeping track of every iterant // could be discarded for mem 75 | u_hat_plus=np.zeros((N,len(freqs),K),dtype=complex) 76 | # print('u_hat_plus') 77 | # print(u_hat_plus.shape) 78 | # print(u_hat_plus) 79 | # print('-----') 80 | 81 | 82 | # Initialization of omega_k 83 | omega_plus=np.zeros((N,K),dtype=complex) 84 | # print('omega_plus') 85 | # print(omega_plus.shape) 86 | # print(omega_plus) 87 | 88 | if (init==1): 89 | for i in range(1,K+1): 90 | omega_plus[0,i-1]=(0.5/K)*(i-1) 91 | elif (init==2): 92 | omega_plus[0,:]=np.sort(math.exp(math.log(fs))+(math.log(0.5)-math.log(fs))*np.random.rand(1,K)) 93 | else: 94 | omega_plus[0,:]=0 95 | 96 | if (DC): 97 | omega_plus[0,0]=0 98 | 99 | # print('omega_plus') 100 | # print(omega_plus.shape) 101 | # print(omega_plus) 102 | 103 | # start with empty dual variables 104 | lamda_hat=np.zeros((N,len(freqs)),dtype=complex) 105 | 106 | # other inits 107 | uDiff=tol+2.2204e-16 #updata step 108 | # print('uDiff') 109 | # print(uDiff) 110 | # print('----') 111 | n=1 #loop counter 112 | sum_uk=0 #accumulator 113 | 114 | T=int(T) 115 | 116 | 117 | # ----------- Main loop for iterative updates 118 | 119 | while uDiff > tol and n