├── Continuous_model ├── ODE_Solvers.m ├── main_model.m ├── Matrix_Vec.m ├── main_model_with_inte.m ├── TV_Params.m ├── model_repo.m ├── model_repo_with_inte.m └── AFs.m ├── Discrete_model ├── Vectorb.m ├── MatrixA.m └── main.m └── README.md /Continuous_model/ODE_Solvers.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/guijiejie/Zeroing-Neural-Network-Toolbox/HEAD/Continuous_model/ODE_Solvers.m -------------------------------------------------------------------------------- /Discrete_model/Vectorb.m: -------------------------------------------------------------------------------- 1 | function output=Vectorb(t,x) 2 | output=[exp(-i*5*t); 3 | exp(-i*10*t); 4 | exp(-i*5*t); 5 | exp(-i*10*t); 6 | exp(-i*5*t); 7 | exp(-i*10*t); 8 | exp(-i*10*t)+1]; 9 | -------------------------------------------------------------------------------- /Discrete_model/MatrixA.m: -------------------------------------------------------------------------------- 1 | function y = MatrixA(t,x) 2 | y = [5.6495 -1.1408 -0.3991 -2.8239 -0.4783 -0.2294 -i*sin(5*t); 3 | -1.1408 4.7393 -0.6880 2.1276 -1.9964 1.6156 -i*cos(5*t); 4 | -0.3991 -0.6880 3.3049 0.6073 0.0071 -0.8164 -i*sin(5*t); 5 | -2.8239 2.1276 0.6073 9.0250 -5.7920 -1.6932 -i*cos(5*t); 6 | -0.4783 -1.9964 0.0071 -5.7920 8.6618 1.7437 -i*sin(5*t); 7 | -0.2294 1.6156 -0.8164 -1.6932 1.7437 1.5558 -i*cos(5*t); 8 | i*sin(5*t) i*cos(5*t) i*sin(5*t) i*cos(5*t) i*sin(5*t) i*cos(5*t) 0]; -------------------------------------------------------------------------------- /Discrete_model/main.m: -------------------------------------------------------------------------------- 1 | %% Code implementation of 2 | %% "Complex-Valued Discrete-Time Neural Dynamics for Perturbed Time-Dependent Complex Quadratic Programming With Applications" 3 | %% Equ. 14 4 | 5 | clear; 6 | clc; 7 | close all; 8 | 9 | m = 7; 10 | tau = 0.0001; 11 | tf=10; 12 | 13 | % 7*1 14 | z = rand(m,1)+i*(rand(m,1)); 15 | t = 0:tau:tf; 16 | 17 | S = MatrixA(t(1)) ; 18 | q = Vectorb(t(1)); 19 | z = z - pinv(S)*(S*z-q); 20 | 21 | Fnorm = zeros(length(t)-1,1); 22 | zdata = zeros(length(t)-1,1); 23 | 24 | inte = zeros(7, 1); 25 | for k = 2 : length(t) 26 | TTprev = t(k-1); 27 | Sprev = MatrixA(TTprev); 28 | qprev = Vectorb(TTprev); 29 | 30 | TT = t(k); 31 | S = MatrixA(TT); 32 | q = Vectorb(TT); 33 | 34 | z = z + pinv(S) * (-(S-Sprev)*z + (q-qprev) - (S*z-q) - inte); 35 | err = S*z-q; 36 | inte = inte + err; 37 | 38 | zdata(k) = z(7); 39 | Fnorm(k) = norm(S*z-q); 40 | k 41 | end 42 | 43 | figure(1) 44 | plot(Fnorm(2:end), 'linewidth',2); 45 | hold on; 46 | 47 | figure(2) 48 | plot3(real(zdata), imag(zdata),t); 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ZNN_Toolbox 2 | 3 | Code implementation for various zeroing neural networks and gradient neural networks. By default, the unconstrained quadratic minimization problem is adopted as an example. 4 | 5 | ## Environments 6 | 7 | - Matlab 2021b 8 | - Matlab Optimization Toolbox 9 | 10 | ## Run 11 | 12 | ### Continuous model without integration 13 | - Adopt Matlab ode45 to run continuous models without integration: `main_model.m`. 14 | `[t, x] = ode45(@model.0ZNN, tspan, xθ, options, AF, hyper_params, gamma, noise_info);` 15 | - Adopt 4-order Runge-Kutta to run continuous models: `main_model.m`. 16 | `[t, x] = ODE.RK4(@model.0ZNN, tspan, iter_gap, xθ, AF, hyper_params, gamma, noise_info);` 17 | 18 | ### Continuous model with integration 19 | 20 | - Adopt Matlab ode45 to run continuous models with integration: `main_model_with_inte.m`. 21 | `[t, x] = ode45(@model.NTZNNAF, tspan, xθ, options, AF, AF_params, hyperparams, noise_info);` 22 | 23 | - Adopt 4-order Runge-Kutta to run continuous models with integration: `main_model_with_inte.m`. 24 | `[t, x] = ODE.RK4_Inte(@model.NTZNN, tspan, iter_gap, xθ, AF, AF_params, hyperparams, noise_info);` -------------------------------------------------------------------------------- /Continuous_model/main_model.m: -------------------------------------------------------------------------------- 1 | clc 2 | clear 3 | format long; 4 | 5 | %% For complex-valued problems, x0 must be initialized as complex-valued number. 6 | % x0 = [-1.1-1i; -1.2 + 1i; 2; 1]; % For OZNN and OGNN 7 | x0 = [-1.1;-1.2]; 8 | % x0 = [-1.122511280687204;-1.236883085813983; 2; 1; 0;0;0;0]; 9 | gamma = 5; 10 | tspan = [0, 2]; 11 | iter_gap = 0.01; 12 | 13 | %% Construct activation functions 14 | AF = 'hs'; 15 | hyper_params = [3, 0.5, 2, 0.5]; 16 | 17 | %% Noise Define (0: Noise Free, 1: Constant, 2: Linear, 3: Random) 18 | noise_info = [1, 0]; 19 | 20 | %% Model define 21 | model = model_repo; 22 | ODE = ODE_Solvers; 23 | options = odeset(); 24 | [t, x] = ode45(@model.OZNN, tspan, x0, options, AF, hyper_params, gamma, noise_info); 25 | % [t, x] = ODE.RK4(@model.OZNN, tspan, iter_gap, x0, AF, hyper_params, gamma, noise_info); 26 | 27 | %% Residual error compute 28 | Mat_Vec = Matrix_Vec; 29 | for j = 1:length(t) 30 | T = t(j); 31 | D = Mat_Vec.D(T); 32 | w = Mat_Vec.w(T); 33 | X = x(j,1:length(w)); 34 | Err = D*X.'+w; 35 | nerr(j) = norm(Err); 36 | end 37 | 38 | %% Result print 39 | % figure 40 | set(gca,'FontSize',14) 41 | plot(t, nerr, 'LineWidth', 2); 42 | xlabel('{\itt} (s)') 43 | ylabel('||{\itE}(t)||_F') 44 | hold on; -------------------------------------------------------------------------------- /Continuous_model/Matrix_Vec.m: -------------------------------------------------------------------------------- 1 | function output = Matrix_Vec 2 | output.D = @Matrix_D; 3 | output.w = @Vector_w; 4 | output.DotD = @Diff_D; 5 | output.Dotw = @Diff_w; 6 | end 7 | 8 | %% A static linear equation example (Only for GNN) 9 | % function D = Matrix_D(t) 10 | % D = [6.51243521061066, 2.74891421134180; 11 | % 2.74891421134180, 1.36445000945816]; 12 | % end 13 | % 14 | % function w = Vector_w(t) 15 | % w = ones(2,1); 16 | % end 17 | 18 | %% A dynamic linear equation exmple 19 | function D = Matrix_D(t) 20 | D = [sin(t) cos(t); -cos(t) sin(t)]; 21 | end 22 | 23 | function w = Vector_w(t) 24 | w = [-3*sin(5*t) -cos(4*t)]'; 25 | end 26 | 27 | %% A dynamic complex-valued linear equation example 28 | % function D = Matrix_D(t) 29 | % D = [2+sin(5*t), exp(5*i*t), -sin(5*t)*i, -cos(5*t)*i; 30 | % exp(-5*i*t), 2+sin(5*t), -cos(5*t)*i, sin(5*t)*i; 31 | % sin(5*t)*i, cos(5*t)*i, 0, 0; 32 | % cos(5*t)*i, -sin(5*t)*i, 0, 0]; 33 | % end 34 | % 35 | % function w = Vector_w(t) 36 | % w = [exp(-5*i*t), exp(-10*i*t), exp(-10*i*t), exp(-20*i*t)]'; 37 | % end 38 | 39 | 40 | %% Compute the time derivative 41 | function output = Diff_D(t) 42 | syms u; 43 | D = Matrix_D(u); 44 | Dot_D = diff(D); 45 | u=t; 46 | output = eval(Dot_D); 47 | end 48 | 49 | function output = Diff_w(t) 50 | syms u; 51 | w = Vector_w(u); 52 | Dot_w = diff(w); 53 | u=t; 54 | output = eval(Dot_w); 55 | end -------------------------------------------------------------------------------- /Continuous_model/main_model_with_inte.m: -------------------------------------------------------------------------------- 1 | clc 2 | clear 3 | format long; 4 | 5 | %% For complex-valued problems, x0 must be initialized as complex-valued number. 6 | % x0 = [-1.1-1i; -1.2 + 1i; 2; 1; 0;0;0;0]; 7 | x0 = randn(4,1); 8 | tspan = [0, 10]; 9 | iter_gap = 0.1; 10 | 11 | %% Construct activation functions 12 | AF = ["linear", "linear"]; 13 | AF_params_one = [2, 0.5, 2, 0.5]; 14 | AF_params_two = [2, 3, 2, 0.5]; 15 | AF_params = [AF_params_one; AF_params_two]; 16 | 17 | %% Define hyperparameters 18 | gamma = 10; 19 | mu = 10; 20 | hyperparams = [gamma, mu]; 21 | 22 | %% Noise Define [noise type, strength] (noise - 0: Noise Free, 1: Constant, 2: Linear) 23 | noise_info = [0, 0]; 24 | 25 | %% Model define 26 | model = model_repo_with_inte; 27 | ODE = ODE_Solvers; 28 | options = odeset(); 29 | [t, x] = ode45(@model.NTZNNAF, tspan, x0, options, AF, AF_params, hyperparams, noise_info); 30 | % [t, x] = ODE.RK4_Inte(@model.NTZNN, tspan, iter_gap, x0, AF, AF_params, hyperparams, noise_info); 31 | % [t, x] = ODE.RK4_Inte(@model.NTGNN, tspan, iter_gap, x0, AF, AF_params, hyperparams, noise_info); 32 | 33 | %% Residual error compute 34 | Mat_Vec = Matrix_Vec; 35 | for j = 1:length(t) 36 | T = t(j); 37 | D = Mat_Vec.D(T); 38 | w = Mat_Vec.w(T); 39 | X = x(j,1:length(w)); 40 | Err = D*X.'+w; 41 | nerr(j) = norm(Err); 42 | end 43 | 44 | %% Result print 45 | % figure 46 | set(gca,'FontSize',14) 47 | plot(t, nerr, 'LineWidth', 2); 48 | xlabel('{\itt} (s)') 49 | ylabel('||{\itE}(t)||_F') 50 | hold on; 51 | 52 | -------------------------------------------------------------------------------- /Continuous_model/TV_Params.m: -------------------------------------------------------------------------------- 1 | function output = TV_Params 2 | output.vp1 = @VPF1; 3 | output.vp2 = @VPF2; 4 | output.vp3 = @VPF3; 5 | output.vp4 = @VPF4; 6 | output.vp5 = @VPF5; 7 | output.nac = @NAC; 8 | end 9 | 10 | % Varying-Parameter RNN Activated by Finite-Time Functions for Solving Joint-Drift Problems of Redundant Robot Manipulators 11 | function out = VPF1(t, p) 12 | assert(p > 0, 'Parameter p is not in the feasible range (p > 0).') 13 | out = t^p + p; 14 | end 15 | 16 | % Varying-Parameter RNN Activated by Finite-Time Functions for Solving Joint-Drift Problems of Redundant Robot Manipulators 17 | function out = VPF2(t, p) 18 | assert(p > 0, 'Parameter p is not in the feasible range (p > 0).') 19 | out = p^t + p; 20 | end 21 | 22 | % A New Varying-Parameter Convergent-Differential Neural-Network for Solving Time-Varying Convex QP Problem Constrained by Linear-Equality 23 | function out = VPF3(t, p) 24 | assert(p > 0, 'Parameter p is not in the feasible range (p > 0).') 25 | out = p*exp(t); 26 | end 27 | 28 | % A Parameter-Changing and Complex-Valued Zeroing Neural-Network for Finding Solution of Time-Varying Complex Linear Matrix Equations in Finite Time 29 | function out = VPF4(t, p) 30 | assert(p > 0, 'Parameter p is not in the feasible range (p > 0).') 31 | if p>0 && p<=1 32 | out = t^p + p; 33 | else 34 | out = p^t + 2*p*t + p; 35 | end 36 | end 37 | 38 | % A Parameter-Changing and Complex-Valued Zeroing Neural-Network for Finding Solution of Time-Varying Complex Linear Matrix Equations in Finite Time 39 | function out = VPF5(t, p) 40 | assert(p > 0, 'Parameter p is not in the feasible range (p > 0).') 41 | if p>0 && p<=1 42 | out = p*exp(t); 43 | else 44 | out = p^t + 2*p*t + p; 45 | end 46 | end 47 | 48 | % Norm-Based Adaptive Coefficient ZNN for Solving the Time-Dependent Algebraic Riccati Equation 49 | function out = NAC(E, eta, zeta) 50 | assert((eta > 0 && zeta>1), 'Parameter eta or zeta is not in the feasible range (eta > 0 and zeta>1).') 51 | out = (norm(E)^eta)+zeta; 52 | end 53 | 54 | -------------------------------------------------------------------------------- /Continuous_model/model_repo.m: -------------------------------------------------------------------------------- 1 | function output = model_repo 2 | output.OZNN = @Model_OZNN; 3 | output.OGNN = @Model_OGNN; 4 | end 5 | 6 | function output = AF_Select(AF_Name, err, t, hyper_params) 7 | AF = AFs; 8 | if strcmp(AF_Name, 'linear') 9 | output = AF.linear(err); 10 | elseif strcmp(AF_Name, 'powerQ') 11 | a = hyper_params(1); 12 | q = hyper_params(2); 13 | output = AF.powerQ(err, a, q); 14 | elseif strcmp(AF_Name, 'power') 15 | output = AF.power(err); 16 | elseif strcmp(AF_Name, 'PTC') 17 | Conv_time = hyper_params(1); 18 | output = AF.PTC(err, t, Conv_time); 19 | elseif strcmp(AF_Name, 'hs') 20 | zeta = hyper_params(1); 21 | output = AF.hs(err, zeta); 22 | elseif strcmp(AF_Name, 'ps') 23 | zeta = hyper_params(1); 24 | m = hyper_params(2); 25 | output = AF.ps(err, zeta, m); 26 | elseif strcmp(AF_Name, 'bs') 27 | zeta = hyper_params(1); 28 | output = AF.bs(err, zeta); 29 | elseif strcmp(AF_Name, 'sbp') 30 | r = hyper_params(1); 31 | output = AF.sbp(err, r); 32 | elseif strcmp(AF_Name, 'wsbp') 33 | r = hyper_params(1); 34 | k1 = hyper_params(2); 35 | k2 = hyper_params(3); 36 | k3 = hyper_params(4); 37 | output = AF.wsbp(err, r, k1, k2, k3); 38 | elseif strcmp(AF_Name, 'VAF') 39 | eta = hyper_params(1); 40 | w = hyper_params(2); 41 | a1 = hyper_params(3); 42 | a2 = hyper_params(4); 43 | a3 = hyper_params(5); 44 | a4 = hyper_params(6); 45 | output = AF.VAF(err, eta, w, a1, a2, a3, a4); 46 | elseif strcmp(AF_Name, 'TAF') 47 | eta = hyper_params(1); 48 | k1 = hyper_params(2); 49 | k2 = hyper_params(3); 50 | k3 = hyper_params(4); 51 | output = AF.TAF(err, eta, k1, k2, k3); 52 | elseif strcmp(AF_Name, 'bound') 53 | b = hyper_params(1); 54 | output = AF.bound(err, b); 55 | elseif strcmp(AF_Name, 'ball') 56 | b = hyper_params(1); 57 | output = AF.ball(err, b); 58 | elseif strcmp(AF_Name, 'tp') 59 | a = hyper_params(1); 60 | p = hyper_params(2); 61 | output = AF.tp(err, a, p); 62 | elseif strcmp(AF_Name, 'BiP') 63 | a = hyper_params(1); 64 | p = hyper_params(2); 65 | q = hyper_params(3); 66 | output = AF.BiP(err, a, p, q); 67 | end 68 | end 69 | 70 | %% System noises define 71 | function output = Noises(kind, Ele_length, t, strength) 72 | assert(ismember(kind, [0, 1, 2, 3]), 'Kind is not support (kind = 0 (noise free), 1 (constant), 2 (linear tv), or 3 (random)).') 73 | % Noise Free 74 | if kind == 0 75 | output = 0; 76 | % Constant Noise 77 | elseif kind == 1 78 | output = strength; 79 | % Linear Noise 80 | elseif kind == 2 81 | output = strength * t; 82 | % Bounded Random Noise 83 | elseif kind == 3 84 | output = strength * rand(Ele_length, 1); 85 | else 86 | output = 0; 87 | end 88 | end 89 | 90 | %% ZNN with activation function 91 | function output=Model_OZNN(t, x, AF, hyper_params, gamma, noise_setting) 92 | Mat_Vec = Matrix_Vec; 93 | D = Mat_Vec.D(t); 94 | w = Mat_Vec.w(t); 95 | dot_D = Mat_Vec.DotD(t); 96 | dot_w = Mat_Vec.Dotw(t); 97 | 98 | err = D * x + w; 99 | 100 | noise_kind = noise_setting(1); 101 | noise_strength = noise_setting(2); 102 | Ele_length = length(x); 103 | 104 | % Complex-valued activation method is implemented according to 105 | % "Nonlinearly Activated Neural Network for Solving Time-Varying Complex Sylvester Equation" (Equ. 7). 106 | % "Nonconvex_and_Bound_Constraint_Zeroing_Neural_Network_for_Solving_Time-Varying_Complex-Valued_Quadratic_Programming_Problem 107 | % (Equ. 8) 108 | if ~isreal(err) 109 | err_real = AF_Select(AF, real(err), t, hyper_params); 110 | err_imag = AF_Select(AF, imag(err), t, hyper_params); 111 | err = err_real + 1i*err_imag; 112 | else 113 | err = AF_Select(AF, err, t, hyper_params); 114 | end 115 | 116 | dotX = pinv(D)*(-gamma*err-dot_D*x-dot_w + Noises(noise_kind, Ele_length, t, noise_strength)); 117 | output = dotX; 118 | t 119 | end 120 | 121 | %% GNN model with activation function 122 | function output=Model_OGNN(t, x, AF, hyper_params, gamma, noise_setting) 123 | Mat_Vec = Matrix_Vec; 124 | D = Mat_Vec.D(t); 125 | w = Mat_Vec.w(t); 126 | 127 | err = D * x + w; 128 | 129 | noise_kind = noise_setting(1); 130 | noise_strength = noise_setting(2); 131 | Ele_length = length(x); 132 | 133 | if ~isreal(err) 134 | err_real = AF_Select(AF, real(err), t, hyper_params); 135 | err_imag = AF_Select(AF, imag(err), t, hyper_params); 136 | err = err_real + 1i*err_imag; 137 | else 138 | err = AF_Select(AF, err, t, hyper_params); 139 | end 140 | 141 | dotX = - gamma * D'*(err) + Noises(noise_kind, Ele_length, t, noise_strength); 142 | output = dotX; 143 | t 144 | end 145 | -------------------------------------------------------------------------------- /Continuous_model/model_repo_with_inte.m: -------------------------------------------------------------------------------- 1 | function output = model_repo_with_inte 2 | output.NTZNN = @Model_NTZNN; 3 | output.NTZNNAF = @Model_AFNTZNN; 4 | output.NTGNN = @Model_NTGNN; 5 | end 6 | 7 | function output = AF_Select(AF_Name, err, t, hyper_params) 8 | AF = AFs; 9 | if strcmp(AF_Name, 'linear') 10 | output = AF.linear(err); 11 | elseif strcmp(AF_Name, 'powerQ') 12 | a = hyper_params(1); 13 | q = hyper_params(2); 14 | output = AF.powerQ(err, a, q); 15 | elseif strcmp(AF_Name, 'power') 16 | output = AF.power(err); 17 | elseif strcmp(AF_Name, 'PTC') 18 | Conv_time = hyper_params(1); 19 | output = AF.PTC(err, t, Conv_time); 20 | elseif strcmp(AF_Name, 'hs') 21 | zeta = hyper_params(1); 22 | output = AF.hs(err, zeta); 23 | elseif strcmp(AF_Name, 'ps') 24 | zeta = hyper_params(1); 25 | m = hyper_params(2); 26 | output = AF.ps(err, zeta, m); 27 | elseif strcmp(AF_Name, 'bs') 28 | zeta = hyper_params(1); 29 | output = AF.bs(err, zeta); 30 | elseif strcmp(AF_Name, 'sbp') 31 | r = hyper_params(1); 32 | output = AF.sbp(err, r); 33 | elseif strcmp(AF_Name, 'wsbp') 34 | r = hyper_params(1); 35 | k1 = hyper_params(2); 36 | k2 = hyper_params(3); 37 | k3 = hyper_params(4); 38 | output = AF.wsbp(err, r, k1, k2, k3); 39 | elseif strcmp(AF_Name, 'VAF') 40 | eta = hyper_params(1); 41 | w = hyper_params(2); 42 | a1 = hyper_params(3); 43 | a2 = hyper_params(4); 44 | a3 = hyper_params(5); 45 | a4 = hyper_params(6); 46 | output = AF.VAF(err, eta, w, a1, a2, a3, a4); 47 | elseif strcmp(AF_Name, 'TAF') 48 | eta = hyper_params(1); 49 | k1 = hyper_params(2); 50 | k2 = hyper_params(3); 51 | k3 = hyper_params(4); 52 | output = AF.TAF(err, eta, k1, k2, k3); 53 | elseif strcmp(AF_Name, 'bound') 54 | b = hyper_params(1); 55 | output = AF.bound(err, b); 56 | elseif strcmp(AF_Name, 'ball') 57 | b = hyper_params(1); 58 | output = AF.ball(err, b); 59 | elseif strcmp(AF_Name, 'tp') 60 | a = hyper_params(1); 61 | p = hyper_params(2); 62 | output = AF.tp(err, a, p); 63 | elseif strcmp(AF_Name, 'BiP') 64 | a = hyper_params(1); 65 | p = hyper_params(2); 66 | q = hyper_params(3); 67 | output = AF.BiP(err, a, p, q); 68 | end 69 | end 70 | 71 | %% System noises define 72 | function output = Noises(kind, Ele_length, t, strength) 73 | assert(ismember(kind, [0, 1, 2, 3]), 'Kind is not support (kind = 0 (noise free), 1 (constant), 2 (linear tv), or 3 (random)).') 74 | % Noise Free 75 | if kind == 0 76 | output = 0; 77 | % Constant Noise 78 | elseif kind == 1 79 | output = strength; 80 | % Linear Noise 81 | elseif kind == 2 82 | output = strength * t; 83 | % Bounded Random Noise 84 | elseif kind == 3 85 | output = strength * rand(Ele_length, 1); 86 | else 87 | output = 0; 88 | end 89 | end 90 | 91 | %% ZNN with with integration and activation functions 92 | function output=Model_AFNTZNN(t, x, AF, AF_params, hyperparams, noise_setting) 93 | Mat_Vec = Matrix_Vec; 94 | D = Mat_Vec.D(t); 95 | w = Mat_Vec.w(t); 96 | dot_D = Mat_Vec.DotD(t); 97 | dot_w = Mat_Vec.Dotw(t); 98 | 99 | x_length = length(x); 100 | half = (x_length/2); 101 | x_cur = x(1:half); 102 | 103 | inte = x(half+1:x_length); 104 | err = D * x_cur + w; 105 | gamma = hyperparams(1); 106 | mu = hyperparams(2); 107 | 108 | noise_kind = noise_setting(1); 109 | noise_strength = noise_setting(2); 110 | Ele_length = length(x_cur); 111 | 112 | AF_One = AF(1); 113 | AF_Two = AF(2); 114 | AF_Params_One = AF_params(1,:); 115 | AF_Params_Two = AF_params(2,:); 116 | if ~isreal(err) 117 | err_real = AF_Select(AF_One, real(err), t, AF_Params_One); 118 | err_imag = AF_Select(AF_One, imag(err), t, AF_Params_One); 119 | inte_real = AF_Select(AF_Two, real(err + gamma*inte), t, AF_Params_Two); 120 | inte_imag = AF_Select(AF_Two, imag(err + gamma*inte), t, AF_Params_Two); 121 | err_AF = err_real + 1i*err_imag; 122 | inte_AF = inte_real + 1i*inte_imag; 123 | else 124 | err = AF_Select(AF_One, err, t, AF_Params_One); 125 | err_AF = err; 126 | inte_AF = AF_Select(AF_Two, (err + gamma*inte), t, AF_Params_Two); 127 | end 128 | 129 | dotX = pinv(D)*(-gamma * err_AF - dot_D*x_cur-dot_w - mu * inte_AF + Noises(noise_kind, Ele_length, t, noise_strength)); 130 | output = [dotX; err]; 131 | norm(err) 132 | t 133 | end 134 | 135 | %% ZNN with with integration 136 | function output=Model_NTZNN(t, x, AF, AF_params, hyperparams, noise_setting) 137 | Mat_Vec = Matrix_Vec; 138 | D = Mat_Vec.D(t); 139 | w = Mat_Vec.w(t); 140 | dot_D = Mat_Vec.DotD(t); 141 | dot_w = Mat_Vec.Dotw(t); 142 | 143 | x_length = length(x); 144 | half = (x_length/2); 145 | x_cur = x(1:half); 146 | 147 | inte = x(half+1:x_length); 148 | err = D * x_cur + w; 149 | gamma = hyperparams(1); 150 | mu = hyperparams(2); 151 | 152 | noise_kind = noise_setting(1); 153 | noise_strength = noise_setting(2); 154 | Ele_length = length(x_cur); 155 | 156 | dotX = pinv(D)*(-gamma*err-dot_D*x_cur-dot_w - mu*inte + Noises(noise_kind, Ele_length, t, noise_strength)); 157 | output = [dotX;err]; 158 | t 159 | end 160 | 161 | %% GNN with with integration 162 | function output=Model_NTGNN(t, x, AF, AF_params, hyperparams, noise_setting) 163 | Mat_Vec = Matrix_Vec; 164 | D = Mat_Vec.D(t); 165 | w = Mat_Vec.w(t); 166 | 167 | x_length = length(x); 168 | half = (x_length/2); 169 | x_cur = x(1:half); 170 | inte = x(half+1:x_length); 171 | 172 | err = D * x_cur + w; 173 | gamma = hyperparams(1); 174 | mu = hyperparams(2); 175 | 176 | noise_kind = noise_setting(1); 177 | noise_strength = noise_setting(2); 178 | Ele_length = length(x_cur); 179 | 180 | dotX = - D'*(gamma * err + mu * inte) + Noises(noise_kind, Ele_length, t, noise_strength); 181 | output = [dotX;err]; 182 | t 183 | end -------------------------------------------------------------------------------- /Continuous_model/AFs.m: -------------------------------------------------------------------------------- 1 | function output = AFs 2 | output.linear = @Linear; 3 | output.powerQ = @Power_Q; 4 | output.power = @PowerSum; 5 | output.PTC = @PTC; 6 | output.hs = @Hyperbolic_Sine; 7 | output.ps = @Power_Sigmoid; 8 | output.bs = @Bipolar_sigmoid; 9 | output.sbp = @Sign_Bi_Power; 10 | output.wsbp = @Weighted_Sign_Bi_Power; 11 | output.VAF = @Versatile_AF; 12 | output.TAF = @Tunable_AF; 13 | output.bound = @Bound; 14 | output.ball = @Ball; 15 | output.tp = @Tanh_Power; 16 | output.BiP = @Bi_Power; 17 | output.tl = @Tanh_Linear; 18 | end 19 | 20 | % RNN Models for Dynamic Matrix Inversion: A Control-Theoretical Perspective 21 | function out = Linear(E) 22 | for i = 1:length(E) 23 | out(i) = E(i); 24 | end 25 | out = out'; 26 | end 27 | 28 | % Saturation-Allowed Neural Dynamics Applied to Perturbed Time-Dependent System of Linear Equations and Robots 29 | function out = PowerSum(E) 30 | for i = 1:length(E) 31 | out(i) = E(i) + (E(i))^3 + (E(i))^5; 32 | end 33 | out = out'; 34 | end 35 | 36 | % Finite-Time and Predefined-Time Convergence Design for Zeroing Neural Network: Theorem, Method, and Verification 37 | % Section 4-B 38 | function out = Power_Q(E, a, q) 39 | assert(a > 0, 'Parameter a is not in the feasible range (a > 0).') 40 | assert(q > 0, 'Parameter q is not in the feasible range (q > 1).') 41 | for i = 1:length(E) 42 | out(i) = a*(E(i).^q); 43 | end 44 | out = out'; 45 | end 46 | 47 | % Finite-Time and Predefined-Time Convergence Design for Zeroing Neural Network: Theorem, Method, and Verification 48 | % Section 4-B 49 | function out = Tanh_Linear(E, a, xi) 50 | assert(a > 0, 'Parameter a is not in the feasible range (a > 0).') 51 | assert(xi > 0, 'Parameter xi is not in the feasible range (xi > 0).') 52 | for i = 1:length(E) 53 | out(i) = a*tanh(xi*E(i)); 54 | end 55 | out = out'; 56 | end 57 | 58 | % RNN Models for Dynamic Matrix Inversion: A Control-Theoretical Perspective 59 | function out = Hyperbolic_Sine(E, zeta) 60 | assert(zeta >= 1, 'Parameter zeta is not in the feasible range (zeta >= 1).') 61 | for i = 1 : length(E) 62 | out(i) = (exp(zeta*E(i)) - exp(-zeta*E(i))); 63 | end 64 | out = out'; 65 | end 66 | 67 | % Reference: Zeroing Neural Networks: Finite-time Convergence Design, 68 | % Analysis and Applications(P15) 69 | function out = Power_Sigmoid(E, zeta, m) 70 | assert(zeta >= 2, 'Parameter zeta is not in the feasible range (zeta >= 2).') 71 | assert((mod(m, 2) ~= 0) && m >= 3, 'Feasible range of parameter m >= 2 and m must be odd number.') 72 | for i = 1 : length(E) 73 | if abs(E(i)) < 1 74 | out(i) = ((1+exp(-zeta))/(1-exp(-zeta))) * ((1-exp(-zeta*E(i)))/(1+exp(-zeta*E(i)))); 75 | else 76 | out(i) = (E(i))^m; 77 | end 78 | end 79 | out = out'; 80 | end 81 | 82 | % Reference: Zeroing Neural Networks: Finite-time Convergence Design, 83 | % Analysis and Applications(P15) 84 | function out = Bipolar_sigmoid(E, zeta) 85 | assert(zeta >= 2, 'Parameter eta is not in the feasible range (zeta >= 2).') 86 | for i = 1 : length(E) 87 | out(i) = (1-exp(-zeta*E(i)))/(1+exp(-zeta*E(i))); 88 | end 89 | out = out'; 90 | end 91 | 92 | % A Strictly Predefined-Time Convergent Neural Solution to Equality- and Inequality-Constrained Time-Variant Quadratic Programming 93 | function out = PTC(E, t, Conv_time) 94 | assert(Conv_time > 0, 'Parameter Conv_time is not in the feasible range (Conv_time > 0).') 95 | for i = 1 : length(E) 96 | if t < Conv_time 97 | out(i) = (exp(E(i))-1)./((Conv_time-t).*exp(E(i))); 98 | else 99 | out(i) = E(i); 100 | end 101 | end 102 | out = out'; 103 | end 104 | 105 | % Reference: Zeroing Neural Networks: Finite-time Convergence Design, 106 | % Analysis and Applications(P42) 107 | function out = Sign_Bi_Power(E, r) 108 | assert(r > 0 && r < 1, 'Parameter eta is not in the feasible range (0 < eta < 1).') 109 | for i = 1 : length(E) 110 | out(i) = 0.5*(abs(E(i))^r + abs(E(i))^(1/r)) * sign(E(i)); 111 | end 112 | out = out'; 113 | end 114 | 115 | % Reference: Zeroing Neural Networks: Finite-time Convergence Design, 116 | % Analysis and Applications(P174) 117 | function out = Weighted_Sign_Bi_Power(E, r, k1, k2, k3) 118 | assert(r > 0 && r < 1, 'Parameter eta is not in the feasible range (0 < eta < 1).') 119 | assert((k1>0 && k1<1) && (k2>0 && k2<1) && (k3>0 && k3<1), 'Parameter k is not in the feasible range (0 < k < 1).') 120 | for i = 1 : length(E) 121 | out(i) = 0.5*(k1*abs(E(i))^r + k2*abs(E(i))^(1/r)) * sign(E(i)) + 0.5*k3*E(i); 122 | end 123 | out = out'; 124 | end 125 | 126 | % Reference: Zeroing Neural Networks: Finite-time Convergence Design, 127 | % Analysis and Applications(P42) 128 | function out = Versatile_AF(E, eta, w, a1, a2, a3, a4) 129 | assert(eta > 0 && eta < 1, 'Parameter eta is not in the feasible range (0 < eta < 1).') 130 | assert(w > 1, 'Parameter w is not in the feasible range (w > 1).') 131 | assert(a1>0 && a2>0 && a3>=0 && a4>=0, 'Parameter a is not in the feasible range (a1 and a2 > 0, a3 and a4 >= 0).') 132 | for i = 1 : length(E) 133 | out(i) = (a1*abs(E(i))^eta + a2*abs(E(i))^w) * sign(E(i)) + a3*E(i) + a4*sign(E(i)); 134 | end 135 | out = out'; 136 | end 137 | 138 | % Reference: Zeroing Neural Networks: Finite-time Convergence Design, 139 | % Analysis and Applications(P63) 140 | function out = Tunable_AF(E, eta, k1, k2, k3) 141 | assert(eta > 0 && eta < 1, 'Parameter eta is not in the feasible range (0 < eta < 1).') 142 | assert(k1 > 0 && k2 > 0 && k3 > 0, 'Parameter k is not in the feasible range (k > 0).') 143 | for i = 1 : length(E) 144 | out(i) = (k1*abs(E(i))^eta + k2*abs(E(i))^(1/eta)) * sign(E(i)) + k3*E(i); 145 | end 146 | out = out'; 147 | end 148 | 149 | % Reference: RNN for Solving Time-Variant Generalized Sylvester Equation With Applications to Robots and Acoustic Source Localization 150 | function out = Bound(E, b) 151 | assert(b > 0, 'Parameter is not in the feasible range (> 0).') 152 | for i = 1 : length(E) 153 | if E(i) > b 154 | out(i) = b; 155 | elseif E(i) < -b 156 | out(i) = -b; 157 | else 158 | out(i) = E(i); 159 | end 160 | end 161 | out = out'; 162 | end 163 | 164 | % Reference: RNN for Solving Time-Variant Generalized Sylvester Equation With Applications to Robots and Acoustic Source Localization 165 | function out = Ball(E, b) 166 | assert(b > 0, 'Parameter b is not in the feasible range (> 0).') 167 | for i = 1 : length(E) 168 | if norm(E) > b 169 | out(i) = b*(E(i)/norm(E)); 170 | else 171 | out(i) = E(i); 172 | end 173 | end 174 | out = out'; 175 | end 176 | 177 | % Finite-Time and Predefined-Time Convergence Design for Zeroing Neural Network: Theorem, Method, and Verification 178 | % Section 4-C 179 | function out = Tanh_Power(E, a, p) 180 | assert(a > 0, 'Parameter a is not in the feasible range (a > 0).') 181 | assert(p > 0, 'Parameter a is not in the feasible range (0 < p < 1).') 182 | for i = 1 : length(E) 183 | out(i) = a*tanh(E(i)^p); 184 | end 185 | out = out'; 186 | end 187 | 188 | % Finite-Time and Predefined-Time Convergence Design for Zeroing Neural Network: Theorem, Method, and Verification 189 | % Section 4-D 190 | function out = Bi_Power(E, a, p, q) 191 | assert(a > 0, 'Parameter a is not in the feasible range (a > 0).') 192 | assert(p < 1, 'Parameter a is not in the feasible range (p < 1).') 193 | assert(q > 1, 'Parameter a is not in the feasible range (q > 1).') 194 | for i = 1 : length(E) 195 | out(i) = a*(E(i)^p + E(i)^q); 196 | end 197 | out = out'; 198 | end 199 | --------------------------------------------------------------------------------