├── LMS&ItsVariantsReport.pdf ├── LMS and its variants_ProjectDescription.pdf ├── README.md ├── LEvinSonDurbin.m ├── SHARF.m ├── NewtonAlgorithm.m ├── NormalizedLMS.m ├── LMS.m ├── BlockLMS.m ├── FDAF_LMS.m ├── OutPutErrorModel.m └── RLS_LMS.m /LMS&ItsVariantsReport.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chinmaysahu/LMS-itsVariants/HEAD/LMS&ItsVariantsReport.pdf -------------------------------------------------------------------------------- /LMS and its variants_ProjectDescription.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chinmaysahu/LMS-itsVariants/HEAD/LMS and its variants_ProjectDescription.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LMS-itsVariants 2 | Various adaptive signal processing algorithms such as LMS, Block LMS, FDAF, Levinson Durbin algorithms have been studied and implemented. 3 | -------------------------------------------------------------------------------- /LEvinSonDurbin.m: -------------------------------------------------------------------------------- 1 | 2 | mcN = 1; % monte carlo experiment length 3 | 4 | N = 10000; 5 | 6 | b =1; 7 | a = [1 -0.8]; % true estimates 8 | c_hat = [0 0 0 0]'; 9 | mu = 0.03; 10 | 11 | s=3; 12 | e = zeros(mcN,N); 13 | for mc_loop = 1:mcN 14 | x = randn(N,1); 15 | d = filter(b,a,x); 16 | r = autocorr(d); 17 | y = zeros(1,s)'; 18 | y(1) = r(2); 19 | beta = 1; 20 | c = r(2); 21 | g = r(2); 22 | for k = 2:s 23 | beta = (1-g*g)*beta; 24 | g = (r(k+1) - c.'*r(2:k))/beta; 25 | c = [g; c - g*c(k-1:-1:1)]; 26 | y(k) = g; 27 | end 28 | c_hat_array(:,mc_loop) = [1; -c(s:-1:1)]; 29 | end 30 | mean(c_hat_array,2) -------------------------------------------------------------------------------- /SHARF.m: -------------------------------------------------------------------------------- 1 | %SHARF ALgorithm 2 | mcN = 50; % monte carlo experiment length 3 | 4 | N = 10000; 5 | 6 | b =1; 7 | a = [1 0.8 0.64 0.512]; % true estimates 8 | c_hat = [0 0 0 0]'; 9 | s = length(c_hat); 10 | mu = 0.03; 11 | % Montecarlo 12 | e = zeros(mcN,N); 13 | for mc_loop = 1:mcN 14 | c_hat = [0 0 0 0]'; 15 | x = randn(N,1); 16 | d = filter(b,a,x); 17 | % LMS Iterations 18 | for LMS_loop = s:N-s 19 | u = zeros(1,s)'; 20 | u(1) = x(LMS_loop); 21 | u(2:s) = -d(LMS_loop-1:-1:LMS_loop-s+1); 22 | e(mc_loop,LMS_loop) = d(LMS_loop) - u.'*c_hat; 23 | 24 | c_hat = c_hat + 2*mu*e(mc_loop,LMS_loop)*u; 25 | end 26 | end 27 | MSE = mean(e(:,s:N).^2); 28 | if mcN==1 29 | MSE = e(:,s:N).^2; 30 | end 31 | plot(s:N,db(MSE)) -------------------------------------------------------------------------------- /NewtonAlgorithm.m: -------------------------------------------------------------------------------- 1 | % LMS Example 2 | 3 | mcN = 50; 4 | 5 | N = 10000; 6 | 7 | a =1; 8 | b = [1 0.2 0 -0.8]; 9 | 10 | % bn = [1 -0.8]; 11 | 12 | b_hat = [0 0 0 0]'; 13 | s = length(b_hat); 14 | 15 | mu = 0.05; 16 | 17 | % Montecarlo 18 | e = zeros(mcN,N); 19 | for mc_loop = 1:mcN 20 | x = randn(N,1); 21 | % x = filter(bn,1,w)/100; 22 | 23 | d = filter(b,a,x); 24 | 25 | % LMS Iterations 26 | 27 | for LMS_loop = s:N 28 | x_e = x(LMS_loop:-1:LMS_loop-s+1); 29 | e(mc_loop,LMS_loop) = d(LMS_loop) - b_hat'*x_e; 30 | b_hat = b_hat + 2*mu*eye(4)*x_e*e(mc_loop,LMS_loop); 31 | end 32 | 33 | b_hat 34 | % plot(s:N,db(e(s:N).^2)) 35 | 36 | end 37 | 38 | MSE = mean(e(:,s:N).^2); 39 | box on 40 | hold on 41 | plot(s:N,db(MSE)) -------------------------------------------------------------------------------- /NormalizedLMS.m: -------------------------------------------------------------------------------- 1 | % LMS Example 2 | 3 | mcN = 50; % monte carlo experiment length 4 | 5 | N = 10000; 6 | 7 | a =1; 8 | b = [1 0.2 0 -0.8]; % true estimates 9 | 10 | b_hat = [0 0 0 0]'; %initial b estimates 11 | s = length(b_hat); 12 | Rxx=eye(s); 13 | mu = 1; % step size 14 | norm_mu=mu/((s+1)*trace(Rxx)); 15 | 16 | % Montecarlo 17 | e = zeros(mcN,N); 18 | for mc_loop = 1:mcN 19 | x = randn(N,1); 20 | d = filter(b,a,x); 21 | % [~,R]=corrmtx(x,10); 22 | % norm_mu=mu/((s+1)*trace(R)); 23 | % LMS Iterations 24 | for LMS_loop = s:N 25 | x_e = x(LMS_loop:-1:LMS_loop-s+1); 26 | e(mc_loop,LMS_loop) = d(LMS_loop) - b_hat'*x_e; 27 | b_hat = b_hat + 2*norm_mu*x_e*e(mc_loop,LMS_loop); 28 | end 29 | b_hat 30 | % plot(s:N,db(e(s:N).^2)) 31 | end 32 | MSE = mean(e(:,s:N).^2); 33 | box on 34 | hold on 35 | plot(s:N,db(MSE)) -------------------------------------------------------------------------------- /LMS.m: -------------------------------------------------------------------------------- 1 | % LMS Example 2 | 3 | mcN = 50; % monte carlo experiment length 4 | 5 | N = 10000; 6 | 7 | b =1; 8 | a = [1 -0.8]; % true estimates 9 | % b_hat = [0 0]'; %initial b estimates L=2 10 | % b_hat = [0 0 0 0]'; %initial b estimates L=4 11 | b_hat = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]'; %initial b estimates L=10 12 | s = length(b_hat); 13 | 14 | mu = 0.01; % step size 15 | 16 | % Montecarlo 17 | e = zeros(mcN,N); 18 | for mc_loop = 1:mcN 19 | x = randn(N,1); 20 | d = filter(b,a,x); 21 | % LMS Iterations 22 | for LMS_loop = s:N 23 | x_e = x(LMS_loop:-1:LMS_loop-s+1); 24 | e(mc_loop,LMS_loop) = d(LMS_loop) - b_hat'*x_e; 25 | b_hat = b_hat + 2*mu*x_e*e(mc_loop,LMS_loop); 26 | end 27 | b_hat 28 | % plot(s:N,db(e(s:N).^2)) 29 | end 30 | 31 | MSE = mean(e(:,s:N).^2); 32 | 33 | % plot(s:N,db(MSE)) 34 | 35 | % d_hat=filter(b_hat,a,x); 36 | % figure 37 | % freqz(b,a); 38 | % figure 39 | % freqz(b_hat,a); 40 | % figure 41 | % impz(b,a); 42 | figure 43 | impz(b_hat,a); 44 | -------------------------------------------------------------------------------- /BlockLMS.m: -------------------------------------------------------------------------------- 1 | % Block LMS Example 2 | 3 | mcN =50; % monte carlo experiment length 4 | 5 | N = 10000; 6 | 7 | a =1; 8 | b = [1 0.2 0 -0.8]; % true estimates 9 | %% Block LMS 10 | b_hat = [0 0 0 0]'; 11 | s = length(b_hat); %Number of new samples per iteration 12 | M =4; %Block size 13 | mu = 0.05; 14 | % Montecarlo 15 | e = zeros(M,N,mcN-1); 16 | for mc_loop = 1:mcN 17 | b_hat = [0 0 0 0]'; 18 | x = randn(N,1); 19 | d = filter(b,a,x); 20 | % LMS Iterations 21 | %for LMS_loop = (1+M/s):N/s-M 22 | for BLOCK_loop = s:N/M-s 23 | x_e = x(BLOCK_loop*s:1:BLOCK_loop*s+M-1); 24 | x_b = zeros(M,length(b_hat)); 25 | for i=1:1:M 26 | for j=1:1:length(b_hat) 27 | x_b(i,j)=x(BLOCK_loop*s+i-j); 28 | end 29 | end 30 | e(:,BLOCK_loop,mc_loop) = d(BLOCK_loop*s:1:BLOCK_loop*s+M-1) - x_b*b_hat; 31 | b_hat = b_hat + 2*mu/M*x_b.'*e(:,BLOCK_loop,mc_loop); 32 | end 33 | end 34 | 35 | MSE = mean(mean(e(:,:,:).^2,3)); 36 | if mcN==1 37 | MSE = mean(e(:,:,mc_loop).^2); 38 | end 39 | box on 40 | hold on 41 | plot(s:N,db(MSE(s:N))) -------------------------------------------------------------------------------- /FDAF_LMS.m: -------------------------------------------------------------------------------- 1 | % FDAF LMS Example 2 | 3 | mcN =50; % monte carlo experiment length 4 | 5 | N = 10000; 6 | 7 | a =1; 8 | b = [1 0.2 0 -0.8 ]; % true estimates 9 | B_hat = fft([0 0 0 0])'; %initial b estimates 10 | s = length(b_hat); 11 | M=4; %block size 12 | padding=2*s; 13 | mu = 0.01; % step size 14 | 15 | % Montecarlo 16 | e = zeros(s,N/M,mcN-1); 17 | for mc_loop = 1:mcN 18 | b_hat = [0 0 0 0]'; 19 | B_hat = fft(b_hat,padding); 20 | x = randn(N,1); 21 | d = filter(b,a,x); 22 | for BLOCK_loop = s:N/M-s 23 | X_e=diag(fft(x(BLOCK_loop*s-s:1:BLOCK_loop*s+s-1))); 24 | y = ifft(X_e*B_hat); 25 | e(:,BLOCK_loop,mc_loop) = d(BLOCK_loop*s:1:BLOCK_loop*s+s-1)-y(padding-s+1:padding); 26 | e_padded = zeros(1,padding)'; 27 | e_padded(padding-s+1:padding) = e(:,BLOCK_loop,mc_loop); 28 | PHI = X_e'*fft(e_padded,padding); 29 | B_hat = B_hat + mu*PHI; 30 | end 31 | b_hat = ifft(B_hat); 32 | b_hat_array(:,mc_loop) = b_hat(1:s) 33 | end 34 | mean(b_hat_array,2) 35 | MSE = mean(e(:,:,:).^2,3); 36 | if mcN==1 37 | MSE = mean(e(:,:,mc_loop).^2); 38 | end 39 | plot(s:N/M,db(MSE(s:N/M))) -------------------------------------------------------------------------------- /OutPutErrorModel.m: -------------------------------------------------------------------------------- 1 | %Equation Error Model 2 | mcN = 50; 3 | N = 10000; 4 | a = [1 0.8 0.64 0.5120]; 5 | % a = [1 0.4 0.4*.4 .4*.4*.4]; 6 | b = 1'; 7 | c_hat = [0.1 0.1 0.1 0.1]'; 8 | s = length(c_hat); 9 | mu = 0.005; 10 | % Montecarlo 11 | e = zeros(mcN,N); 12 | for mc_loop = 1:mcN 13 | c_hat = [0.1 0.1 0.1 0.1]'; 14 | x = randn(N,1); 15 | d = filter(b,a,x); 16 | beta_alpha=zeros(s,N); 17 | % LMS Iterations 18 | for LMS_loop = s+1:N-s-1 19 | u = zeros(1,s)'; 20 | u(1) = x(LMS_loop); 21 | u(2:s) = -d(LMS_loop-1:-1:LMS_loop-s+1); 22 | e(mc_loop,LMS_loop) = d(LMS_loop) - u.'*c_hat; 23 | 24 | beta_alpha(1,LMS_loop) = -x(LMS_loop); 25 | beta_alpha(2:s,LMS_loop) = d(LMS_loop-1:-1:LMS_loop-s+1); 26 | 27 | for p=1:1:s-1 28 | beta_alpha(1,LMS_loop) = beta_alpha(1,LMS_loop) - c_hat(p+1)*beta_alpha(1,LMS_loop-p); 29 | end 30 | for z=2:1:s 31 | for p=1:1:s-1 32 | beta_alpha(z,LMS_loop) = beta_alpha(z,LMS_loop) - c_hat(p+1)*beta_alpha(z,LMS_loop-p); 33 | end 34 | end 35 | 36 | del = 2*e(mc_loop,LMS_loop)*beta_alpha(:,LMS_loop); 37 | c_hat = c_hat - 2*mu*del; 38 | end 39 | 40 | end 41 | MSE = mean(e(:,s:N).^2); 42 | if mcN==1 43 | MSE = e(:,s:N).^2; 44 | end 45 | plot(s:N,db(MSE)) -------------------------------------------------------------------------------- /RLS_LMS.m: -------------------------------------------------------------------------------- 1 | %% System Identification Using Recursive Least Square (RLS) and Least Mean Square (LMS) algorithm 2 | %% Start 3 | clc; 4 | clear all; 5 | close all; 6 | N = 1000; % Number of samples 7 | Bits = 2; % For PSK modulation 8 | SNR = 10; % Noise level 9 | h = [0.9 0.2 0.5 -0.7]; % Plant impulse response 10 | data = randi(1,N); % Random index for input data 11 | x = real(pskmod(data,Bits)); % Phase shit keying (PSK) modulation 12 | r = filter(h,1,x); % Input passed trought system(h) 13 | d = awgn(r, SNR); % Addition of white gaussian noise of decined SNR 14 | %% LMS parameter 15 | etac = 1e-3; % Learning rate for LMS 16 | Wlms = zeros(size(h)); % Initial weights of LMS 17 | V = zeros(1,length(h)); % Input frame length of LMS 18 | %% RLS Parameters 19 | p=3; 20 | lamda=1; 21 | sigma=1; 22 | Wrls = Wlms'; % Initial weights of RLS 23 | P=(sigma^-1)*eye(p+1); 24 | U = zeros(size(Wrls)); % Input frame length of RLS 25 | for n = 1 : N 26 | 27 | %% LMS 28 | V(1,2:end) = V(1,1:end-1); % Shifting of frame window 29 | V(1,1) = x(n); % Input of LMS 30 | 31 | yc = (Wlms)*V'; % Output of LMS 32 | ec = d(n) - yc; % Instantaneous error of LMS 33 | Wlms = Wlms + etac * ec * V; % Weight update rule of LMS 34 | %% RLS 35 | U(2:end,1) = U(1:end-1,1); % Shifting of frame window 36 | U(1,1) = x(n); % Input of RLS 37 | 38 | y(n) = U'*Wrls; % Output of RLS 39 | alpha(n) = d(n) - y(n); % Instantaneous error of RLS 40 | g(:,n)=P*U*((lamda + U'*P*U).^-1); % Gain vector 41 | P=(lamda^-1)*P - g(:,n)*U'*(lamda^-1)*P; % RLS intermediate term 42 | Wrls = Wrls + alpha(n)*g(:,n); % Weight update rule of RLS 43 | 44 | %% Normalized weight difference (NWD) 45 | NWDc(n) = norm(Wlms-h)./norm(h); % Normalized weight difference of LMS 46 | NWD(n) = norm(Wrls'-h)./norm(h); % Normalized weight difference of RLS 47 | end 48 | % %% Cost function plots 49 | figure 50 | fsize=14; % plot text font size 51 | plot(10*log10(NWDc),'','linewidth',4) 52 | hold on 53 | plot(10*log10(NWD),'r','linewidth',4) 54 | lgh=legend(strcat('Least mean square (LMS):', int2str(SNR),' (dB)'),strcat('Recursive least square (RLS):', int2str(SNR),' (dB)'),'Location','NorthEast'); 55 | grid minor 56 | xlabel('Iterations','FontName','Times New Roman','FontSize',fsize); 57 | ylabel('Normalized weight difference (NWD) in (dB)','FontName','Times New Roman','FontSize',fsize); 58 | title('Cost function (NWD vs epochs iteration)','FontName','Times New Roman','FontSize',6*fsize/5); 59 | set(lgh,'FontName','Times New Roman','FontSize',fsize) 60 | set(gca,'FontName','Times New Roman','FontSize',fsize) 61 | saveas(gcf,strcat('LMS_RLS_Comparision.png'),'png') 62 | [h;Wrls';Wlms] --------------------------------------------------------------------------------