├── CNOEC_report.pdf ├── Mark 21_semidef ├── MeritT1.m ├── Model_daq.slx ├── Model_sim.slx ├── Project.m ├── definitivo.mat ├── direct_kin_test.m ├── linesearch_merit.m ├── myfmincon.m ├── mygradient.m ├── myoptimset.m ├── robot_dynamics.m ├── robot_sim_cost.m └── robot_sim_err.m ├── Mark 23_semidef ├── MeritT1.m ├── Model_daq.slx ├── Model_sim.slx ├── Project.m ├── direct_kin_test.m ├── linesearch_merit.m ├── myfmincon.m ├── mygradient.m ├── myoptimset.m ├── robot_dynamics.m ├── robot_sim_cost.m ├── robot_sim_err.m ├── test_1_tolgrad5_tolx8.mat ├── test_2_tolgrad6_tolx10.mat └── test_2_tolgrad6_tolx8.mat ├── Presentazione_CNOEC.pptx └── README.md /CNOEC_report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/CNOEC_report.pdf -------------------------------------------------------------------------------- /Mark 21_semidef/MeritT1.m: -------------------------------------------------------------------------------- 1 | function [T1val] = MeritT1(fun,xk,A,b,C,d,p,q,sigma,tau,mode) 2 | % MERITT1 Computes the vlaue of the merit function. 3 | % 4 | % INPUTS: 5 | % fun = cost function 6 | % xk = current point 7 | % A = matrix defining linear equality constraints 8 | % b = vector defining linear equality constraints 9 | % C = matrix defining linear inequality constraints 10 | % d = vector defining linear inequality constraints 11 | % p = number of nonlinear equality constraints 12 | % q = number of nonlinear inequality constraints 13 | % sigma = vector of weights, one for each equality 14 | % constraint 15 | % tau = vector of weights, one for each inequality 16 | % constraint 17 | % mode = 'GN': Gauss-Newton 18 | % 'BFGS': BFGS 19 | % OUTPUTS: 20 | % T1val = value of the merit function 21 | 22 | [Vk] = fun(xk); 23 | if strcmp(mode,'GN') 24 | Fxk = Vk(1:end-(p+q),1); 25 | fxk = Fxk'*Fxk; 26 | elseif strcmp(mode,'BFGS') 27 | fxk = Vk(1:end-(p+q),1); 28 | end 29 | if ~isempty(A) 30 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 31 | else 32 | gxk = Vk(end-p-q+1:end-q,1); 33 | end 34 | if ~isempty(C) 35 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 36 | else 37 | hxk = Vk(end-q+1:end,1); 38 | end 39 | T1val = fxk+sigma'*abs(gxk)+tau'*max(zeros(size(hxk)),-hxk); -------------------------------------------------------------------------------- /Mark 21_semidef/Model_daq.slx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 21_semidef/Model_daq.slx -------------------------------------------------------------------------------- /Mark 21_semidef/Model_sim.slx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 21_semidef/Model_sim.slx -------------------------------------------------------------------------------- /Mark 21_semidef/Project.m: -------------------------------------------------------------------------------- 1 | % Constrained numerical optimization for estimation and control. 2 | % Project by Rodrigo Senofieni, Andrea Ghezzi and Luca Brolese. 3 | % AA 2018/2019 4 | % Real time estimation of the human force acting on the end-effector of a collaborative robot. 5 | % Estimation procedure is carried out by the usage of a MHE approach based on past records of 6 | % the error (between reference and simulated joints position), evaluated in a sliding window of dimension M. 7 | 8 | %% 9 | close all 10 | clear all 11 | clc 12 | 13 | %% Parameters 14 | fs = 50; % sampling frequency 15 | Ts = 1/fs; % sampling period 16 | tau_p = 0.1; % pole of the human TF 17 | tau_z = 1; % zero of the human TF 18 | k = 100; % Gain of the human TF 19 | m_load = 10; % Mass of the load 20 | Fmax = 200; % Maximum force of the human. Can be used in saturation block 21 | s = tf('s'); % Initialize the TF 22 | H = k*(1+tau_z*s)/(1+tau_p*s); % Human TF 23 | [numH, denH] = tfdata(H, 'v'); % Human TF parameters for simulink 24 | % Parameters of the robot 25 | g = 9.81; % gravitational acceleration 26 | J1 = 10; % inertia link 1 27 | J2 = 10; % inertia link 2 28 | m1 = 50; % mass link 1 29 | m2 = 50; % mass link 2 30 | a1 = 1.2; % lenght link 1 31 | a2 = 1; % lenght link 2 32 | l1 = a1/2; % centre of mass link 1 33 | l2 = a2/2; % centre of mass link 2 34 | 35 | %% Data acquisition 36 | 37 | sim('Model_daq'); 38 | M = 5; % dimension of the sliding window 39 | downsampling = 50; % Reduce sampling frequency for higher simulation speed with FFD 40 | q_reference = [state(1:Ts*downsampling:500,1)'; % q1 41 | state(1:Ts*downsampling:500,2)']; % q2 42 | qd_reference = [state(1:Ts*downsampling:500,3)'; % q1d 43 | state(1:Ts*downsampling:500,4)']; % q2d 44 | Fh_reference = [F_h(1:Ts*downsampling:500,1)'; % Fh_x_ref 45 | F_h(1:Ts*downsampling:500,2)']; % Fh_z_ref 46 | position_ref = [xi(1:Ts*downsampling:500,1)'; % x_ref 47 | xi(1:Ts*downsampling:500,2)']; % z_ref 48 | speed_ref = [xid(1:Ts*downsampling:500,1)'; % xd_ref 49 | xid(1:Ts*downsampling:500,2)']; % zd_ref 50 | 51 | %% Parameters for the optimization routine 52 | Q = eye(4); % weight matrix diag([1 0.8 0.6 0.4 0.2]); 53 | x0 = [0;-200]; % initialize parameter estimate [F_hx; F_hz] 54 | scaling = Ts*downsampling; % set up the scaling for the downsampling 55 | dim = 500/scaling; 56 | F_h_star = zeros(2,dim); % initialize vector of F_h_estimated 57 | tau = zeros(2,M); % dimension of the torque vector passed to the functions. Torques are setted to zero 58 | xi_test = zeros(4,500); % vector for storing the simulated trajectories 59 | xi0 = zeros(2,M); 60 | xi0(:,2) = [2.18;2.72]; % Initial condition for the integrators, position 61 | xd0 = zeros(2,M); % Initial condition for the integrators, speed 62 | 63 | %% Optimization Routine 64 | % Linear equality constraint parameters 65 | A = []; 66 | b = []; 67 | % Linear inequality constraint parameters 68 | C = []; %1 0; -1 0;0 1; 0 -1 69 | d = []; %-5000;-5000;-5000;-5000 70 | 71 | for i = M+1:dim 72 | %Initialize solver options 73 | myoptions = myoptimset; 74 | myoptions.Hessmethod = 'GN'; 75 | myoptions.GN_funF = @(x)robot_sim_err(x,q_reference(:,i-M:i-1),... 76 | qd_reference(:,i-M:i-1), tau, m_load, Q); 77 | myoptions.gradmethod = 'CD'; 78 | myoptions.graddx = 2^-15; 79 | myoptions.tolgrad = 1e-7; 80 | myoptions.ls_nitermax = 50; 81 | myoptions.xsequence = 'on'; 82 | 83 | %initial condition for integrators 84 | q1_in = xi0(1,2) 85 | q2_in = xi0(2,2) 86 | q1_d_in = xd0(1,2); 87 | q2_d_in = xd0(2,2); 88 | 89 | % Run solver 90 | [xstar,fxstar,niter,exitflag] = myfmincon(@(x)robot_sim_cost(x,q_reference(:,i-M:i-1),... 91 | qd_reference(:,i-M:i-1),tau, m_load, Q),... 92 | x0,A,b,C,d,0,0,myoptions); 93 | xstar 94 | 95 | % Store the obtained result of xstar 96 | F_h_star(:,i) = xstar; 97 | xi_test(:,i) = xi_sim(:,1); 98 | 99 | %xi0 variable assigned from the function robot_sim_err, 100 | iter = i 101 | error 102 | 103 | end 104 | 105 | %% Plot the results 106 | 107 | t = 0:0.02:9.98; 108 | figure 109 | subplot 221 110 | plot (t, xi_test(1,:)),grid on 111 | hold on 112 | plot (t,position_ref(1,:)),grid on 113 | legend('simulated x trajectory','reference x trajectory') 114 | 115 | subplot 222 116 | plot (t,xi_test(2,:)),grid on 117 | hold on 118 | plot (t,position_ref(2,:)),grid on 119 | legend('simulated z trajectory','reference z trajectory') 120 | 121 | 122 | subplot 223 123 | plot (t,xi_test(3,:)),grid on 124 | hold on 125 | plot (t,speed_ref(1,:)), grid on 126 | legend('simulated x speed','reference x speed') 127 | 128 | subplot 224 129 | plot (t,xi_test(4,:)),grid on 130 | hold on 131 | plot (t,speed_ref(2,:)), grid on 132 | legend('simulated z speed','reference z speed') 133 | 134 | figure 135 | subplot 211 136 | plot (t, F_h_star(1,:)), grid on 137 | hold on 138 | plot (t, Fh_reference(1,:)), grid on 139 | legend('simulated F_hx','reference Fh_x') 140 | 141 | subplot 212 142 | plot (t, F_h_star(2,:)), grid on 143 | hold on 144 | plot (t, Fh_reference(2,:)), grid on 145 | legend('simulated Fh_z','reference Fh_z') 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | -------------------------------------------------------------------------------- /Mark 21_semidef/definitivo.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 21_semidef/definitivo.mat -------------------------------------------------------------------------------- /Mark 21_semidef/direct_kin_test.m: -------------------------------------------------------------------------------- 1 | function [xe, ze, x_d, z_d] = direct_kin_test(q1, q2, q1_d, q2_d) 2 | %robot parameters 3 | a1 = 1.2; %lenght link 1 4 | a2 = 1; %length link 2 5 | 6 | %kinematic functions 7 | c1= cos(q1); 8 | s1= sin(q1); 9 | c2= cos(q2); 10 | s2= sin(q2); 11 | c12= cos(q1+q2); 12 | s12= sin(q1+q2); 13 | 14 | % Computation of end effector quantities given the measure joints 15 | % quantities 16 | if q1 == 0 && q2 == 0 17 | xe = 0; 18 | ze = 0; 19 | 20 | else 21 | xe = 0.5 + a1*c1 + a2*c12; 22 | ze = a1*s1 + a2*s12; 23 | 24 | end 25 | %xi = [xe;ze]; 26 | 27 | %% computations of speeds 28 | J = [-a1*sin(q1)-a2*sin(q1+q2) -a2*sin(q1+q2); 29 | a1*cos(q1)+a2*cos(q1+q2) a2*cos(q1+q2)]; 30 | 31 | xi_d = J*[q1_d;q2_d]; 32 | 33 | x_d = xi_d(1,1); 34 | z_d = xi_d(2,1); 35 | 36 | end 37 | -------------------------------------------------------------------------------- /Mark 21_semidef/linesearch_merit.m: -------------------------------------------------------------------------------- 1 | function [xkp1,fxkp1,niter,tk] = linesearch_merit(fun,fxk,DT1k,xk,pk,tkmax,beta,c,nitermax) 2 | % LINESEARCH_MERIT Computes the value of decision variables xkp1 at iteration k+1 3 | % by carrying out a back-tracking line search approach, trying to achieve a 4 | % sufficient decrease of the merit function T1(xk+tk*pk) where pk is the 5 | % search direction. 6 | % 7 | % INPUTS: 8 | % fun = merit function 9 | % fxk = merit function evaluated at xk 10 | % DT1k = directional derivative of the merit function 11 | % xk = value of the decision variables at the current 12 | % iterate 13 | % pk = search direction 14 | % tkmax = maximum step size 15 | % beta = ratio tk_ip1/tk_i during the back-tracking line 16 | % search (beta in (0,1)) 17 | % c = ratio between acheived decrease and decrease 18 | % predicted by the first-order Taylor expansion, 19 | % sufficient to exit the back-tracking line 20 | % search algorithm 21 | % nitermax = maximum number of iterations 22 | % 23 | % OUTPUTS: 24 | % xkp1 = obtained value of xk+tk*pk 25 | % fxkp1 = cost function evaluated at xkp1 26 | % niter = number of iterations employed to satisfy Armijo 27 | % condition 28 | % tk = step length 29 | 30 | niter = 0; 31 | tk = tkmax; 32 | xkp1 = xk+tk*pk; 33 | fxkp1 = fun(xkp1); 34 | 35 | while isnan(fxkp1) || (fxkp1>fxk+tk*c*DT1k && niter= d 7 | % g(x) = 0 8 | % h(x) >= 0 9 | % and, if successful, returns a local minimizer xstar and the related local 10 | % optimum fxstar=f(xstar). The solver employs a Sequential Quadratic programming 11 | % optimization scheme and back-tracking line search with l-1 norm merit function 12 | % and Armijo condition. 13 | % 14 | % INPUTS: 15 | % fun = function providing the scalar cost function value 16 | % f(x) and the vectors of p nonlinear equality 17 | % constraints g(x), g:R^n->R^p, and q nonlinear 18 | % inequality constraints h(x) 19 | % x0 = initial guess for the optimization variables 20 | % A = matrix defining linear equality constraints 21 | % b = vector defining linear equality constraints 22 | % C = matrix defining linear inequality constraints 23 | % d = vector defining linear inequality constraints 24 | % p = number of nonlinear equality constraints 25 | % q = number of nonlinear inequality constraints 26 | % myoptions = optimization options prepared with myoptimset 27 | % command 28 | % 29 | % OUTPUTS: 30 | % xstar = exit value, either a local minimizer or the 31 | % value of x at the last iterate 32 | % fxstar = cost function evaluated at xstar 33 | % niter = number of employed iterations 34 | % exitflag = termination condition: 35 | % -2: max iterations reached, unfeasible point 36 | % -1: max. number of iterations reached 37 | % 1: local minimum possible, gradient condition 38 | % 2: local minimum possible, step size condition 39 | % 3: local minimum possible, cost decrease condition 40 | % xsequence = sequence of iterations {xk} (only if option 41 | % xsequence is set to 'on') 42 | 43 | %% Initialization 44 | n = length(x0); 45 | k = 0; 46 | deltaxk_rel = 1; 47 | deltaf_rel = 1; 48 | lambdak = zeros(p+size(A,1),1); 49 | muk = zeros(q+size(C,1),1); 50 | sigmak = zeros(p+size(A,1),1); 51 | tauk = zeros(q+size(C,1),1); 52 | eq_constr_max = 0; 53 | ineq_constr_min = 0; 54 | 55 | 56 | if ~isempty(myoptions.outputfcn) 57 | outputfun = myoptions.outputfcn; 58 | end 59 | if strcmp(myoptions.display,'Iter') 60 | fprintf('Iteration NormGrad Cost Equality Inequality Rel. cost Rel. x Line-search\r') 61 | end 62 | 63 | % Start sequence of optimization variables at each iteration 64 | xsequence = []; 65 | if strcmp(myoptions.xsequence,'on') 66 | xsequence = [xsequence, x0]; 67 | end 68 | 69 | %% Iterations 70 | if strcmp(myoptions.Hessmethod,'GN') % Constrained Gauss-Newton method 71 | % Compute cost and constraints and their gradients 72 | funF = myoptions.GN_funF; 73 | xk = x0; 74 | [Vk,gradVk] = mygradient(funF,x0,myoptions.gradmethod,myoptions.graddx); 75 | Fxk = Vk(1:end-(p+q),1); 76 | gradFxk = gradVk(:,1:end-(p+q)); 77 | if ~isempty(A) 78 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 79 | gradgk = [A',gradVk(:,end-p-q+1:end-q,1)]; 80 | else 81 | gxk = Vk(end-p-q+1:end-q,1); 82 | gradgk = gradVk(:,end-p-q+1:end-q,1); 83 | end 84 | if ~isempty(C) 85 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 86 | gradhk = [C',gradVk(:,end-q+1:end,1)]; 87 | else 88 | hxk = Vk(end-q+1:end,1); 89 | gradhk = gradVk(:,end-q+1:end,1); 90 | end 91 | fxk = Fxk'*Fxk; 92 | gradfxk = 2*gradFxk*Fxk; 93 | 94 | % Compute graident of the Lagrange function 95 | gradLagr = gradfxk-gradgk*lambdak-gradhk*muk; 96 | 97 | % Compute worst-case equality and inequality constraints (for feedback 98 | % and termination conditions) 99 | if ~isempty(gxk) 100 | eq_constr_max = max(abs(gxk)); 101 | end 102 | if ~isempty(hxk) 103 | ineq_constr_min = min(hxk); 104 | end 105 | 106 | % Feedback and output function 107 | if strcmp(myoptions.display,'Iter') 108 | if ineq_constr_min<=0 && sign(ineq_constr_min)==-1 109 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 110 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,0) 111 | else 112 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 113 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,0) 114 | end 115 | end 116 | if ~isempty(myoptions.outputfcn) 117 | outputfun(xk); 118 | end 119 | 120 | % Solver iterations 121 | while norm(gradLagr) > myoptions.tolgrad... 122 | && k < myoptions.nitermax... 123 | && deltaxk_rel > myoptions.tolx... 124 | && deltaf_rel > myoptions.tolfun... 125 | || (max(eq_constr_max,-ineq_constr_min) > myoptions.tolconstr) 126 | 127 | % Compute search direction 128 | Hk = 2*(gradFxk*gradFxk')+myoptions.GN_sigma*eye(n); 129 | [pk,~,~,~,LagMult] = quadprog(Hk,gradfxk,-gradhk',hxk,gradgk',-gxk,[],[],[],myoptions.QPoptions); 130 | lambda_tilde = -LagMult.eqlin; 131 | mu_tilde = LagMult.ineqlin; 132 | delta_lambda = lambda_tilde-lambdak; 133 | delta_mu = mu_tilde-muk; 134 | 135 | % Update merit function 136 | sigmak = max(abs(lambda_tilde),(sigmak+abs(lambda_tilde))/2); 137 | tauk = max(abs(mu_tilde),(tauk+abs(mu_tilde))/2); 138 | T1fun = @(x)MeritT1(funF,x,A,b,C,d,p,q,sigmak,tauk,'GN'); 139 | 140 | % Compute current merit function value 141 | T1k = T1fun(xk); 142 | 143 | % compute directional derivative of merit function 144 | ind_h_viol = hxk<=0; 145 | gradhk_viol = gradhk(:,ind_h_viol); 146 | tauk_viol = tauk(ind_h_viol,1); 147 | DT1k = gradfxk'*pk-sigmak'*abs(gxk)-tauk_viol'*gradhk_viol'*pk; 148 | 149 | % Line search with merit function and directional derivative 150 | [xkp1,fxkp1,niter_LS,tk] = linesearch_merit(T1fun,T1k,DT1k,xk,pk,... 151 | myoptions.ls_tkmax,myoptions.ls_beta,... 152 | myoptions.ls_c,myoptions.ls_nitermax); 153 | 154 | % Update relative changes of cost and optimziation variables 155 | deltaxk_rel = norm(xkp1-xk)/max(eps,norm(xk)); 156 | deltaf_rel = abs(fxkp1-fxk)/max(eps,abs(fxk)); 157 | 158 | % Update primal and dual variables 159 | k = k+1; 160 | xk = xkp1; 161 | lambdak = lambdak+tk*delta_lambda; 162 | muk = muk+tk*delta_mu; 163 | 164 | % Compute new cost, constraints, and their gradients 165 | [Vk,gradVk] = mygradient(funF,xk,myoptions.gradmethod,myoptions.graddx); 166 | Fxk = Vk(1:end-(p+q),1); 167 | gradFxk = gradVk(:,1:end-(p+q)); 168 | fxk = Fxk'*Fxk; 169 | gradfxk = 2*gradFxk*Fxk; 170 | 171 | if ~isempty(A) 172 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 173 | gradgk = [A',gradVk(:,end-p-q+1:end-q,1)]; 174 | else 175 | gxk = Vk(end-p-q+1:end-q,1); 176 | gradgk = gradVk(:,end-p-q+1:end-q,1); 177 | end 178 | if ~isempty(C) 179 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 180 | gradhk = [C',gradVk(:,end-q+1:end,1)]; 181 | else 182 | hxk = Vk(end-q+1:end,1); 183 | gradhk = gradVk(:,end-q+1:end,1); 184 | end 185 | 186 | % Compute gradient of the Lagrangian 187 | gradLagr = gradfxk-gradgk*lambdak-gradhk*muk; 188 | 189 | % Compute worst-case equality and inequality constraints (for feedback 190 | % and termination conditions) 191 | if ~isempty(gxk) 192 | eq_constr_max = max(abs(gxk)); 193 | end 194 | if ~isempty(hxk) 195 | ineq_constr_min = min(hxk); 196 | end 197 | 198 | % Feedback and output function 199 | if strcmp(myoptions.display,'Iter') 200 | if ineq_constr_min<=0 && sign(ineq_constr_min)==-1 201 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 202 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,niter_LS) 203 | else 204 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 205 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,niter_LS) 206 | end 207 | end 208 | if ~isempty(myoptions.outputfcn) 209 | outputfun(xk); 210 | end 211 | 212 | % Store sequence of optimization variables at each iteration 213 | if strcmp(myoptions.xsequence,'on') 214 | xsequence = [xsequence, xk]; 215 | end 216 | end 217 | elseif strcmp(myoptions.Hessmethod,'BFGS') % BFGS method 218 | % Compute cost and constraints and their gradients 219 | xk = x0; 220 | Hk = 1e-4*eye(n); 221 | [Vk,gradVk] = mygradient(fun,x0,myoptions.gradmethod,myoptions.graddx); 222 | fxk = Vk(1:end-(p+q),1); 223 | gradfxk = gradVk(:,1:end-(p+q)); 224 | if ~isempty(A) 225 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 226 | gradgk = [A',gradVk(:,end-p-q+1:end-q,1)]; 227 | else 228 | gxk = Vk(end-p-q+1:end-q,1); 229 | gradgk = gradVk(:,end-p-q+1:end-q,1); 230 | end 231 | if ~isempty(C) 232 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 233 | gradhk = [C',gradVk(:,end-q+1:end,1)]; 234 | else 235 | hxk = Vk(end-q+1:end,1); 236 | gradhk = gradVk(:,end-q+1:end,1); 237 | end 238 | 239 | % Compute graident of the Lagrange function 240 | gradLagr = gradfxk-gradgk*lambdak-gradhk*muk; 241 | 242 | % Compute worst-case equality and inequality constraints (for feedback 243 | % and termination conditions) 244 | if ~isempty(gxk) 245 | eq_constr_max = max(abs(gxk)); 246 | end 247 | if ~isempty(hxk) 248 | ineq_constr_min = min(hxk); 249 | end 250 | 251 | % Feedback and output function 252 | if strcmp(myoptions.display,'Iter') 253 | if ineq_constr_min<=0 && sign(ineq_constr_min)==-1 254 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 255 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,0) 256 | else 257 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 258 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,0) 259 | end 260 | end 261 | 262 | % Store sequence of optimization variables at each iteration 263 | if ~isempty(myoptions.outputfcn) 264 | outputfun(xk); 265 | end 266 | 267 | % Solver iterations 268 | while norm(gradLagr) > myoptions.tolgrad... 269 | && k < myoptions.nitermax... 270 | && deltaxk_rel > myoptions.tolx... 271 | && deltaf_rel > myoptions.tolfun... 272 | || (max(eq_constr_max,-ineq_constr_min) > myoptions.tolconstr) 273 | 274 | % Compute new search direction 275 | [pk,~,~,~,LagMult] = quadprog(Hk,gradfxk,-gradhk',hxk,gradgk',-gxk,[],[],[],myoptions.QPoptions); 276 | lambda_tilde = -LagMult.eqlin; 277 | mu_tilde = LagMult.ineqlin; 278 | delta_lambda = lambda_tilde-lambdak; 279 | delta_mu = mu_tilde-muk; 280 | 281 | % Update merit function weights 282 | sigmak = max(abs(lambda_tilde),(sigmak+abs(lambda_tilde))/2); 283 | tauk = max(abs(mu_tilde),(tauk+abs(mu_tilde))/2); 284 | T1fun = @(x)MeritT1(fun,x,A,b,C,d,p,q,sigmak,tauk,'BFGS'); 285 | T1k = T1fun(xk); 286 | 287 | % Compute directional derivative 288 | ind_h_viol = hxk<=0; 289 | gradhk_viol = gradhk(:,ind_h_viol); 290 | tauk_viol = tauk(ind_h_viol,1); 291 | DT1k = gradfxk'*pk-sigmak'*abs(gxk)-tauk_viol'*gradhk_viol'*pk; 292 | 293 | % Line search with merit function 294 | [xkp1,fxkp1,niter_LS,tk] = linesearch_merit(T1fun,T1k,DT1k,xk,pk,... 295 | myoptions.ls_tkmax,myoptions.ls_beta,... 296 | myoptions.ls_c,myoptions.ls_nitermax); 297 | deltaxk_rel = norm(xkp1-xk)/max(eps,norm(xk)); 298 | deltaf_rel = abs(fxkp1-fxk)/max(eps,abs(fxk)); 299 | lambdakp1 = lambdak+tk*delta_lambda; 300 | mukp1 = muk+tk*delta_mu; 301 | 302 | % Compute new cost, constraints and their gradients 303 | [Vkp1,gradVkp1] = mygradient(fun,xkp1,myoptions.gradmethod,myoptions.graddx); 304 | fxkp1 = Vkp1(1:end-(p+q),1); 305 | gradfxkp1 = gradVkp1(:,1:end-(p+q)); 306 | if ~isempty(A) 307 | gxkp1 = [A*xkp1-b;Vkp1(end-p-q+1:end-q,1)]; 308 | gradgkp1 = [A',gradVkp1(:,end-p-q+1:end-q,1)]; 309 | else 310 | gxkp1 = Vkp1(end-p-q+1:end-q,1); 311 | gradgkp1 = gradVkp1(:,end-p-q+1:end-q,1); 312 | end 313 | if ~isempty(C) 314 | hxkp1 = [C*xkp1-d;Vkp1(end-q+1:end,1)]; 315 | gradhkp1 = [C',gradVkp1(:,end-q+1:end,1)]; 316 | else 317 | hxkp1 = Vkp1(end-q+1:end,1); 318 | gradhkp1 = gradVkp1(:,end-q+1:end,1); 319 | end 320 | 321 | % Compute new gradient of the Lagrangian 322 | gradLagrkp1 = gradfxkp1-gradgkp1*lambdakp1-gradhkp1*mukp1; 323 | 324 | % Compute gradient of the Lagrangian with previous xk and new 325 | % multipliers (for BFGS update) 326 | gradLagrk_kp1 = gradfxk-gradgk*lambdakp1-gradhk*mukp1; 327 | 328 | % Update Hessian estimate with BFGS rule 329 | y = gradLagrkp1-gradLagrk_kp1; 330 | s = xkp1-xk; 331 | if max(abs(s))>0 332 | if y'*s<= myoptions.BFGS_gamma*(s'*Hk*s) 333 | y = y+(myoptions.BFGS_gamma*s'*Hk*s-s'*y)/(s'*Hk*s-s'*y)*(Hk*s-y); 334 | end 335 | Hk = Hk-(Hk*(s*s')*Hk)/(s'*Hk*s)+(y*y')/(s'*y); 336 | Hk = 0.5*(Hk+Hk'); 337 | end 338 | 339 | % Update variables 340 | k = k+1; 341 | xk = xkp1; 342 | fxk = fxkp1; 343 | gradfxk = gradfxkp1; 344 | gxk = gxkp1; 345 | gradgk = gradgkp1; 346 | hxk = hxkp1; 347 | gradhk = gradhkp1; 348 | gradLagr = gradLagrkp1; 349 | 350 | % Compute worst-case equality and inequality constraints (for feedback 351 | % and termination conditions) 352 | if ~isempty(gxk) 353 | eq_constr_max = max(abs(gxk)); 354 | end 355 | if ~isempty(hxk) 356 | ineq_constr_min = min(hxk); 357 | end 358 | 359 | % Feedback and output function 360 | if strcmp(myoptions.display,'Iter') 361 | if ineq_constr_min<=0 && sign(ineq_constr_min)==-1 362 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 363 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,niter_LS) 364 | else 365 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 366 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,niter_LS) 367 | end 368 | end 369 | if ~isempty(myoptions.outputfcn) 370 | outputfun(xk); 371 | end 372 | 373 | % Store sequence of optimization variables at each iteration 374 | if strcmp(myoptions.xsequence,'on') 375 | xsequence = [xsequence, xk]; 376 | end 377 | end 378 | end 379 | 380 | %% Termination 381 | xstar = xk; 382 | fxstar = fxk; 383 | if max(eq_constr_max,-ineq_constr_min) <= myoptions.tolconstr 384 | if norm(gradLagr) <= myoptions.tolgrad 385 | exitflag = 1; 386 | if strcmp(myoptions.display,'Iter') 387 | fprintf('Local minimum possible, directional derivative smaller than tolerance. Constraints satisfied.\r') 388 | end 389 | elseif k >= myoptions.nitermax 390 | exitflag = -1; 391 | if strcmp(myoptions.display,'Iter') 392 | fprintf('Maximum number of iterations reached. Constraints satisfied.\r') 393 | end 394 | elseif deltaxk_rel <= myoptions.tolx 395 | exitflag = 2; 396 | if strcmp(myoptions.display,'Iter') 397 | fprintf('Local minimum possible, relative step size smaller than tolerance. Constraints satisfied.\r') 398 | end 399 | elseif deltaf_rel <= myoptions.tolfun 400 | exitflag = 3; 401 | if strcmp(myoptions.display,'Iter') 402 | fprintf('Local minimum possible, relative cost decrease smaller than tolerance. Constraints satisfied.\r') 403 | end 404 | end 405 | elseif max(eq_constr_max,-ineq_constr_min) > myotions.tolconstr 406 | if k >= myoptions.nitermax 407 | exitflag = -2; 408 | if strcmp(myoptions.display,'Iter') 409 | fprintf('Maximum number of iterations reached. Constraints not satisfied.\r') 410 | end 411 | end 412 | end 413 | -------------------------------------------------------------------------------- /Mark 21_semidef/mygradient.m: -------------------------------------------------------------------------------- 1 | function [fxk,gradient] = mygradient(fun,xk,method,dx) 2 | % MYGRADIENT Computes the gradient (i.e. the Jacobian transpose) of a 3 | % given function, using one of several possible methods. 4 | % INPUTS: 5 | % fun = function whose gradient shall be evaluated (in 6 | % general a vector of N functions) 7 | % xk = value of the n-dimensional input argument at which to evaluate 8 | % the gradient (dfun(x)/dx)' 9 | % method = string indicating the differentiation method: 'FD' 10 | % (Forward Finite Differences), 'CD' (Central 11 | % Finite Differences), 'UP' (User provided), 12 | % 'IM' (Imaginary-part trick). 13 | % If 'UP' is used, function fun(x) shall return as 14 | % second output the Jacobian dfun(x)/dx evaluated 15 | % at x = xk. 16 | % dx = perturbation step used in Finite Difference 17 | % approximation 18 | % 19 | % OUTPUTS: 20 | % fxk = Value of fun(xk) 21 | % gradient = n-by-N matrix containing in each column the 22 | % partial derivatives of the corresponding element of function 23 | % fun(x) with respect to each component of x, 24 | % evaluated at xk 25 | 26 | if strcmp(method,'UP') 27 | [fxk,gradient] = fun(xk); 28 | elseif strcmp(method,'FD') 29 | n = length(xk); 30 | p = zeros(n,1); 31 | fxk = fun(xk); 32 | N = length(fxk); 33 | gradient = zeros(n,N); 34 | p(1,1) = dx; 35 | gradient(1,:) = (fun(xk+p)-fun(xk))'/dx; 36 | for ind = 2:n 37 | p(ind-1,1) = 0; 38 | p(ind,1) = dx; 39 | gradient(ind,:) = (fun(xk+p)-fun(xk))'/dx; 40 | end 41 | elseif strcmp(method,'CD') 42 | n = length(xk); 43 | p = zeros(n,1); 44 | fxk = fun(xk); 45 | N = length(fxk); 46 | gradient = zeros(n,N); 47 | p(1,1) = dx; 48 | gradient(1,:) = (fun(xk+p)-fun(xk-p))'/(2*dx); 49 | for ind = 2:n 50 | p(ind-1,1) = 0; 51 | p(ind,1) = dx; 52 | gradient(ind,:) = (fun(xk+p)-fun(xk-p))'/(2*dx); 53 | end 54 | elseif strcmp(method,'IM') 55 | n = length(xk); 56 | p = zeros(n,1); 57 | fxk = fun(xk); 58 | N = length(fxk); 59 | gradient = zeros(n,N); 60 | tol = 1e-100; 61 | p(1,1) = 1j*tol; 62 | gradient(1,:) = imag(fun(xk+p))'/tol; 63 | for ind = 2:n 64 | p(ind-1,1) = 0; 65 | p(ind,1) = 1j*tol; 66 | gradient(ind,:) = imag(fun(xk+p))'/tol; 67 | end 68 | end -------------------------------------------------------------------------------- /Mark 21_semidef/myoptimset.m: -------------------------------------------------------------------------------- 1 | function myoptions = myoptimset 2 | % MYOPTIMSET Sets default options for user-developed nonlinear programming 3 | % routine, both unconstrained and constrained. 4 | % 5 | % OUTPUTS: 6 | % myoptions = structure containing all options 7 | % required by the NLP solvers 8 | % 9 | % %% General options 10 | % myoptions.display = 'Iter'; % Display iteration output 11 | % myoptions.xsequence = 'off'; % Store sequence of points {xk} 12 | % myoptions.tolgrad = 1e-6; % Termination tolerance on the norm 13 | % % of the directional derivative 14 | % myoptions.tolX = 1e-12; % Termination tolerance on the relative 15 | % % change of optimization variables 16 | % myoptions.tolfun = 1e-12; % Termination tolerance on the relative 17 | % % improvement of the cost function 18 | % myoptions.tolconstr = 1e-6; % Constraint tolerance satisfaction 19 | % tolerance 20 | % myoptions.nitermax = 50; % Termination tolerance on the number of 21 | % % iterations 22 | % 23 | % %% Differentiation options 24 | % myoptions.gradmethod = 'FD'; % Method for gradient computation 25 | % % FD = Forward Differences 26 | % % CD = Central Differences 27 | % % IM = Imaginary-part trick 28 | % % UP = User Provided 29 | % myoptions.graddx = 2^-26; % Perturbation for gradient computation 30 | % % use 2^-26 for FD 31 | % % use 2^-17 for CD 32 | % 33 | % %% Line search options 34 | % myoptions.ls_tkmax = 1; % Maximum step size 35 | % myoptions.ls_beta = 0.8; % Beta scaling factor for 36 | % % back-tracking line search 37 | % myoptions.ls_c = 0.1; % c coefficient factor for 38 | % % back-tracking line search 39 | % myoptions.ls_nitermax = 20; % max. number of back-tracking 40 | % % line search iterations 41 | % 42 | % %% Quasi-Newton method options 43 | % myoptions.Hessmethod = 'BFGS'; % Method for Hessian computation 44 | % % Exact = Exact Newton. In this 45 | % case, function f(x) shall 46 | % provide as second 47 | % output the gradient of 48 | % f, and as third output 49 | % the Hessian of f 50 | % % BFGS = BFGS algorithm 51 | % % GN = Gauss-Newton algorithm 52 | % % GD = Gradient descent 53 | % myoptions.BFGS_gamma = 0.2; % gamma factor for Powell's trick in 54 | % % BFGS algorithm 55 | 56 | %% General options 57 | myoptions.display = 'Iter'; % Display iteration output 58 | myoptions.xsequence = 'off'; % Store sequence of points {xk} 59 | myoptions.tolgrad = 1e-6; % Termination tolerance on the norm 60 | % of the directional derivative 61 | myoptions.tolconstr = 1e-6; % Constraint satisfaction tolerance 62 | myoptions.tolx = 1e-12; % Termination tolerance on the relative 63 | % change of optimization variables 64 | myoptions.tolfun = 1e-12; % Termination tolerance on the relative 65 | % improvement of the cost function 66 | myoptions.nitermax = 50; % Termination tolerance on the number of 67 | % iterations 68 | myoptions.outputfcn = []; % Handle for output function 69 | 70 | %% Differentiation options 71 | myoptions.gradmethod = 'FD'; % Method for gradient computation 72 | % FD = Forward Differences 73 | % CD = Central Differences 74 | % IM = Imaginary-part trick 75 | % UP = User Provided 76 | myoptions.graddx = 2^-26; % Perturbation for gradient computation 77 | % use 2^-26 for FD 78 | % use 2^-17 for CD 79 | 80 | %% Line search options 81 | myoptions.ls_tkmax = 1; % Maximum step size 82 | myoptions.ls_beta = 0.8; % Beta scaling factor for 83 | % back-tracking line search 84 | myoptions.ls_c = 0.1; % c coefficient factor for 85 | % back-tracking line search 86 | myoptions.ls_nitermax = 20; % max. number of back-tracking 87 | % line search iterations 88 | 89 | %% Quasi-Newton method options 90 | myoptions.Hessmethod = 'BFGS'; % Method for Hessian computation 91 | % Exact = Exact Newton. In this 92 | % case, it is expected that the 93 | % cost function provides as second 94 | % output the gradient, and as third 95 | % output the Hessian matrix. 96 | % BFGS = BFGS algorithm 97 | % GN = Gauss-Newton algorithm 98 | % SD = Steepest Descent 99 | myoptions.BFGS_gamma = 1e-1; % gamma factor for Powell's trick in 100 | % BFGS algorithm 101 | myoptions.GN_funF = []; % function providing the value of F 102 | % in Gauss-Newton method, assuming 103 | % f(x)=F(x)'*F(x) 104 | myoptions.GN_sigma = 0; % coefficient to ensure Hessian is 105 | % positive definite in Gauss-Newton 106 | % method (H=GradF*GradF'+eye(n)*sigma) 107 | myoptions.QPoptions = ... 108 | optimset('Display','none','Algorithm','interior-point-convex'); % options for QP solver in SQP methods 109 | end 110 | 111 | -------------------------------------------------------------------------------- /Mark 21_semidef/robot_dynamics.m: -------------------------------------------------------------------------------- 1 | function [q1_dd, q2_dd] = robot_dynamics(tau, state, d) 2 | 3 | F_hx = d(1); 4 | F_hz = d(2); 5 | m_load = d(3); 6 | q1 = state(1); 7 | q2 = state(2); 8 | q1_d = state(3); 9 | q2_d = state(4); 10 | 11 | %robot parameters 12 | g = 9.81; 13 | J1 = 10; %inertia link 1 14 | J2 = 10; %inertia link 2 15 | m1 = 50; %mass link 1 16 | m2 = 50; %mass link 2 17 | a1 = 1.2; %lenght link 1 18 | a2 = 1; %length link 2 19 | ac1 = a1/2; %centre of mass link 1 20 | ac2 = a2/2; %centre of mass link 2 21 | 22 | %kinematic functions 23 | % q1 = q(1); 24 | % q2 = q(2); 25 | % q1_d = q_d(1); 26 | % q2_d = q_d(2); 27 | c1 = cos(q1); 28 | s1 = sin(q1); 29 | c2 = cos(q2); 30 | s2 = sin(q2); 31 | c12 = cos(q1+q2); 32 | s12 = sin(q1+q2); 33 | 34 | J = [-a1*sin(q1)-a2*sin(q1+q2) -a2*sin(q1+q2); 35 | a1*cos(q1)+a2*cos(q1+q2) a2*cos(q1+q2)]; 36 | 37 | M_tot = [m1*ac1^2+m2*(a1^2+ac2^2+2*a1*ac2*c2)+J1+J2 m2*(ac2^2+a1*ac2*c2)+J2; 38 | m2*(ac2^2+a1*ac2*c2)+J2 m2*ac2^2+J2]; 39 | 40 | % C_tot = [-a1*q2_d*s2*ac2*m2 -a1*q1_d*s2*ac2*m2; 41 | % -a1*q1_d*s2*ac2*m2 0];- C_tot*[q1_d;q2_d] 42 | 43 | 44 | g_tot = [(m1*ac1+m2*a1)*g*c1+m2*g*ac2*c12; 45 | m2*g*ac2*c12]; 46 | 47 | 48 | %equation of motion of the 2dof manipulator 49 | q_dd = inv(M_tot)*(tau + J'*[F_hx;F_hz - m_load*g] - g_tot); 50 | q1_dd = q_dd(1); 51 | q2_dd = q_dd(2); 52 | 53 | end -------------------------------------------------------------------------------- /Mark 21_semidef/robot_sim_cost.m: -------------------------------------------------------------------------------- 1 | function [cost, q_sim] = robot_sim_cost(x,q_meas,qd_ref, m_load,Q) 2 | 3 | %Compute the quadratic cost of the error obteined after the simulation 4 | [err_vec, q_sim] = robot_sim_err(x, q_meas,qd_ref, m_load, Q); 5 | cost = sum(err_vec.*err_vec); 6 | 7 | end -------------------------------------------------------------------------------- /Mark 21_semidef/robot_sim_err.m: -------------------------------------------------------------------------------- 1 | function [err_vec,q_sim] = robot_sim_err(x, q_reference,qd_reference,tau, m_load, Q) 2 | 3 | %% Initialize simulation parameters 4 | 5 | F_h = [x(1,1); x(2,1)]; % set the optimization variables as [Fh_x;Fh_z] 6 | scaling = size(q_reference,2); % number of samples 7 | disturbances = zeros(5,4); 8 | t_sim = 0:0.02:0.08; 9 | 10 | for j = 1:5 %1 --> dimension of the sliding window 11 | disturbances(j,2:3) = F_h'; 12 | disturbances(j,4) = m_load; 13 | end 14 | disturbances(:,1) = t_sim'; 15 | dist_sim = disturbances; % assign to workspace matrix disturbances and pass it to simulink 16 | assignin('base', 'dist_sim', dist_sim); 17 | 18 | tau0 = [t_sim' tau']; 19 | tau_sim = tau0; 20 | assignin('base', 'tau_sim', tau_sim); % assign to workspace matrix disturbances and pass it to simulink 21 | 22 | % Simulate the model 23 | 24 | sim('Model_sim'); 25 | q_sim = [q1_sim q2_sim]'; 26 | qd_sim = [q1d_sim q2d_sim]'; 27 | 28 | %% Simulation of the trajectories 29 | 30 | xi_test = zeros(4,5); 31 | for j = 1:5 32 | [x, z, xd, zd] = direct_kin_test(q_sim(1,j),q_sim(2,j),... 33 | qd_sim(1,j), qd_sim(2,j)); % first 2 rows positions, second 2 speeds 34 | xi_test(:,j) = [x, z, xd, zd]'; 35 | end 36 | 37 | %% Stack errors in one vector 38 | 39 | q_ref = [q_reference; qd_reference]; 40 | q_test = [q_sim; qd_sim]; 41 | err = Q*(q_ref-q_test); 42 | err_vec = [err(1,:) err(2,:) err(3,:) err(4,:)]'/sqrt(scaling); 43 | 44 | %% Assign the parameters to the base workspace 45 | 46 | assignin('base', 'xi0', q_sim); 47 | assignin('base', 'xd0', qd_sim); 48 | assignin('base', 'error', err); 49 | assignin('base', 'xi_sim', xi_test); 50 | end 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /Mark 23_semidef/MeritT1.m: -------------------------------------------------------------------------------- 1 | function [T1val] = MeritT1(fun,xk,A,b,C,d,p,q,sigma,tau,mode) 2 | % MERITT1 Computes the vlaue of the merit function. 3 | % 4 | % INPUTS: 5 | % fun = cost function 6 | % xk = current point 7 | % A = matrix defining linear equality constraints 8 | % b = vector defining linear equality constraints 9 | % C = matrix defining linear inequality constraints 10 | % d = vector defining linear inequality constraints 11 | % p = number of nonlinear equality constraints 12 | % q = number of nonlinear inequality constraints 13 | % sigma = vector of weights, one for each equality 14 | % constraint 15 | % tau = vector of weights, one for each inequality 16 | % constraint 17 | % mode = 'GN': Gauss-Newton 18 | % 'BFGS': BFGS 19 | % OUTPUTS: 20 | % T1val = value of the merit function 21 | 22 | [Vk] = fun(xk); 23 | if strcmp(mode,'GN') 24 | Fxk = Vk(1:end-(p+q),1); 25 | fxk = Fxk'*Fxk; 26 | elseif strcmp(mode,'BFGS') 27 | fxk = Vk(1:end-(p+q),1); 28 | end 29 | if ~isempty(A) 30 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 31 | else 32 | gxk = Vk(end-p-q+1:end-q,1); 33 | end 34 | if ~isempty(C) 35 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 36 | else 37 | hxk = Vk(end-q+1:end,1); 38 | end 39 | T1val = fxk+sigma'*abs(gxk)+tau'*max(zeros(size(hxk)),-hxk); -------------------------------------------------------------------------------- /Mark 23_semidef/Model_daq.slx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 23_semidef/Model_daq.slx -------------------------------------------------------------------------------- /Mark 23_semidef/Model_sim.slx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 23_semidef/Model_sim.slx -------------------------------------------------------------------------------- /Mark 23_semidef/Project.m: -------------------------------------------------------------------------------- 1 | % Constrained numerical optimization for estimation and control. 2 | % Project by Rodrigo Senofieni, Andrea Ghezzi and Luca Brolese. 3 | % AA 2018/2019 4 | % Real time optimal control of the torques of a robot, helping the human lifting a load 5 | % optimal control problem is carried out 6 | 7 | %% 8 | close all 9 | clear all 10 | clc 11 | 12 | %% Parameters 13 | fs = 50; % sampling frequency 14 | Ts = 1/fs; % sampling period 15 | tau_p = 0.1; % pole of the human TF 16 | tau_z = 1; % zero of the human TF 17 | k = 100; % Gain of the human TF 18 | m_load = 10; % Mass of the load 19 | Fmax = 100; % Maximum force of the human. Can be used in saturation block 20 | s = tf('s'); % Initialize the TF 21 | H = k*(1+tau_z*s)/(1+tau_p*s); % Human TF 22 | [numH, denH] = tfdata(H, 'v'); % Human TF parameters for simulink 23 | % Parameters of the robot 24 | g = 9.81; % gravitational acceleration 25 | J1 = 10; % inertia link 1 26 | J2 = 10; % inertia link 2 27 | m1 = 50; % mass link 1 28 | m2 = 50; % mass link 2 29 | a1 = 1.2; % lenght link 1 30 | a2 = 1; % lenght link 2 31 | l1 = a1/2; % centre of mass link 1 32 | l2 = a2/2; % centre of mass link 2 33 | 34 | %% Data acquisition 35 | 36 | sim('Model_daq'); 37 | M = 5; % dimension of the sliding window 38 | downsampling = 50; % Reduce sampling frequency for higher simulation speed with FFD 39 | q_reference = [q_ref(1:Ts*downsampling:500,1)'; % q1 40 | q_ref(1:Ts*downsampling:500,2)']; % q2 41 | qd_reference = [qd_ref(1:Ts*downsampling:500,1)'; % q1d 42 | qd_ref(1:Ts*downsampling:500,2)']; % q2d 43 | position_ref = [xi_ref(1:Ts*downsampling:500,1)'; % x_ref 44 | xi_ref(1:Ts*downsampling:500,2)']; % z_ref 45 | speed_ref = [xid_ref(1:Ts*downsampling:500,1)'; % xd_ref 46 | xid_ref(1:Ts*downsampling:500,2)']; % zd_ref 47 | 48 | %% Parameters for the optimization routine 49 | Q = eye(4); % weight matrix diag([1 0.8 0.6 0.4 0.2]); 50 | x0 = [100;100]; % initialize parameter estimate [F_hx; F_hz; tau1; tau2] 51 | scaling = Ts*downsampling; % set up the scaling for the downsampling 52 | dim = 500/scaling; 53 | F_h_star = zeros(2,dim); % initialize vector of F_h_estimated 54 | xi_test = zeros(4,500); % vector for storing the simulated trajectories 55 | xi0 = zeros(2,M); 56 | xi0(:,2) = [2.18;2.72]; % Initial condition for the integrators, position 57 | xd0 = zeros(2,M); % Initial condition for the integrators, speed 58 | pos_ref = zeros(5,3); % initialize empty vector for storing reference position 59 | Fh = zeros(2,500); % empty vector for storing the resulting human force 60 | sim_time = 0:0.02:0.08; % vector for simulation time 61 | error_xi = zeros(5,3); %error along x-z direction for the human closed loop 62 | error_xi(:,2:3) = position_ref(:,1:5)'; %initialize the error = pos_ref 63 | error_xi(:,1) = sim_time'; 64 | %% Optimization Routine 65 | % Linear equality constraint parameters 66 | A = []; 67 | b = []; 68 | % Linear inequality constraint parameters 69 | C = []; 70 | d = []; 71 | tic 72 | for i = 1:dim-M 73 | %Initialize solver options 74 | myoptions = myoptimset; 75 | myoptions.Hessmethod = 'GN'; 76 | myoptions.GN_funF = @(x)robot_sim_err(x,q_reference(:,i+1:M+i),... 77 | qd_reference(:,i+1:M+i),pos_ref, m_load, Q); 78 | myoptions.gradmethod = 'CD'; 79 | myoptions.graddx = 2^-13; 80 | myoptions.tolgrad = 1e-6; 81 | myoptions.tolx = 1e-10; % Termination tolerance on the relative 82 | % change of optimization variable 83 | myoptions.ls_nitermax = 1e2; 84 | myoptions.xsequence = 'on'; 85 | 86 | %initial condition for integrators 87 | q1_in = xi0(1,2); 88 | q2_in = xi0(2,2); 89 | q1_d_in = xd0(1,2); 90 | q2_d_in = xd0(2,2); 91 | 92 | %initialize at each cycle the position reference for simulink 93 | 94 | pos_ref(:,2:3) = position_ref(:,i+1:M+i)'; 95 | pos_ref(:,1) = sim_time'; 96 | 97 | % Run solver 98 | [xstar,fxstar,niter,exitflag] = myfmincon(@(x)robot_sim_cost(x,q_reference(:,i+1:M+i),... 99 | qd_reference(:,i+1:M+i),pos_ref, m_load, Q),... 100 | x0,A,b,C,d,0,0,myoptions); 101 | xstar 102 | 103 | % Store the obtained result of data 104 | xi_test(:,i) = xi_sim(:,1); 105 | tau_star(:,i) = xstar(1:2,1); 106 | Fh(:,i) = F_hu(:,1); 107 | 108 | %xi0 variable assigned from the function robot_sim_err, 109 | iter = i 110 | error 111 | 112 | end 113 | toc 114 | %% Plot the results 115 | 116 | t = 0:0.02:9.98; 117 | figure 118 | subplot 221 119 | plot (t, xi_test(1,:)),grid on 120 | hold on 121 | plot (t,position_ref(1,:)),grid on 122 | legend('simulated x trajectory','reference x trajectory') 123 | 124 | subplot 222 125 | plot (t,xi_test(2,:)),grid on 126 | hold on 127 | plot (t,position_ref(2,:)),grid on 128 | legend('simulated z trajectory','reference z trajectory') 129 | 130 | 131 | subplot 223 132 | plot (t,xi_test(3,:)),grid on 133 | hold on 134 | plot (t,speed_ref(1,:)), grid on 135 | legend('simulated x speed','reference x speed') 136 | 137 | subplot 224 138 | plot (t,xi_test(4,:)),grid on 139 | hold on 140 | plot (t,speed_ref(2,:)), grid on 141 | legend('simulated z speed','reference z speed') 142 | 143 | figure 144 | subplot 211 145 | plot (t, Fh(1,:)), grid on 146 | hold on 147 | % plot (t, Fh_reference(1,:)), grid on 148 | % legend('simulated F_hx','reference Fh_x') 149 | 150 | subplot 212 151 | plot (t, Fh(2,:)), grid on 152 | hold on 153 | % plot (t, Fh_reference(2,:)), grid on 154 | % legend('simulated Fh_z','reference Fh_z') 155 | 156 | figure 157 | subplot 211 158 | plot (t(:,1:216), tau_star(1,:)), grid on %SET THE DIMENSION OF THE time vector, depending on the dimension of tau_star 159 | subplot 212 160 | plot (t(:,1:216), tau_star(2,:)), grid on 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | -------------------------------------------------------------------------------- /Mark 23_semidef/direct_kin_test.m: -------------------------------------------------------------------------------- 1 | function [xe, ze, x_d, z_d] = direct_kin_test(q1, q2, q1_d, q2_d) 2 | %robot parameters 3 | a1 = 1.2; %lenght link 1 4 | a2 = 1; %length link 2 5 | 6 | %kinematic functions 7 | c1= cos(q1); 8 | s1= sin(q1); 9 | c2= cos(q2); 10 | s2= sin(q2); 11 | c12= cos(q1+q2); 12 | s12= sin(q1+q2); 13 | 14 | % Computation of end effector quantities given the measure joints 15 | % quantities 16 | if q1 == 0 && q2 == 0 17 | xe = 0; 18 | ze = 0; 19 | 20 | else 21 | xe = 0.5 + a1*c1 + a2*c12; 22 | ze = a1*s1 + a2*s12; 23 | 24 | end 25 | %xi = [xe;ze]; 26 | 27 | %% computations of speeds 28 | J = [-a1*sin(q1)-a2*sin(q1+q2) -a2*sin(q1+q2); 29 | a1*cos(q1)+a2*cos(q1+q2) a2*cos(q1+q2)]; 30 | 31 | xi_d = J*[q1_d;q2_d]; 32 | 33 | x_d = xi_d(1,1); 34 | z_d = xi_d(2,1); 35 | 36 | end 37 | -------------------------------------------------------------------------------- /Mark 23_semidef/linesearch_merit.m: -------------------------------------------------------------------------------- 1 | function [xkp1,fxkp1,niter,tk] = linesearch_merit(fun,fxk,DT1k,xk,pk,tkmax,beta,c,nitermax) 2 | % LINESEARCH_MERIT Computes the value of decision variables xkp1 at iteration k+1 3 | % by carrying out a back-tracking line search approach, trying to achieve a 4 | % sufficient decrease of the merit function T1(xk+tk*pk) where pk is the 5 | % search direction. 6 | % 7 | % INPUTS: 8 | % fun = merit function 9 | % fxk = merit function evaluated at xk 10 | % DT1k = directional derivative of the merit function 11 | % xk = value of the decision variables at the current 12 | % iterate 13 | % pk = search direction 14 | % tkmax = maximum step size 15 | % beta = ratio tk_ip1/tk_i during the back-tracking line 16 | % search (beta in (0,1)) 17 | % c = ratio between acheived decrease and decrease 18 | % predicted by the first-order Taylor expansion, 19 | % sufficient to exit the back-tracking line 20 | % search algorithm 21 | % nitermax = maximum number of iterations 22 | % 23 | % OUTPUTS: 24 | % xkp1 = obtained value of xk+tk*pk 25 | % fxkp1 = cost function evaluated at xkp1 26 | % niter = number of iterations employed to satisfy Armijo 27 | % condition 28 | % tk = step length 29 | 30 | niter = 0; 31 | tk = tkmax; 32 | xkp1 = xk+tk*pk; 33 | fxkp1 = fun(xkp1); 34 | 35 | while isnan(fxkp1) || (fxkp1>fxk+tk*c*DT1k && niter= d 7 | % g(x) = 0 8 | % h(x) >= 0 9 | % and, if successful, returns a local minimizer xstar and the related local 10 | % optimum fxstar=f(xstar). The solver employs a Sequential Quadratic programming 11 | % optimization scheme and back-tracking line search with l-1 norm merit function 12 | % and Armijo condition. 13 | % 14 | % INPUTS: 15 | % fun = function providing the scalar cost function value 16 | % f(x) and the vectors of p nonlinear equality 17 | % constraints g(x), g:R^n->R^p, and q nonlinear 18 | % inequality constraints h(x) 19 | % x0 = initial guess for the optimization variables 20 | % A = matrix defining linear equality constraints 21 | % b = vector defining linear equality constraints 22 | % C = matrix defining linear inequality constraints 23 | % d = vector defining linear inequality constraints 24 | % p = number of nonlinear equality constraints 25 | % q = number of nonlinear inequality constraints 26 | % myoptions = optimization options prepared with myoptimset 27 | % command 28 | % 29 | % OUTPUTS: 30 | % xstar = exit value, either a local minimizer or the 31 | % value of x at the last iterate 32 | % fxstar = cost function evaluated at xstar 33 | % niter = number of employed iterations 34 | % exitflag = termination condition: 35 | % -2: max iterations reached, unfeasible point 36 | % -1: max. number of iterations reached 37 | % 1: local minimum possible, gradient condition 38 | % 2: local minimum possible, step size condition 39 | % 3: local minimum possible, cost decrease condition 40 | % xsequence = sequence of iterations {xk} (only if option 41 | % xsequence is set to 'on') 42 | 43 | %% Initialization 44 | n = length(x0); 45 | k = 0; 46 | deltaxk_rel = 1; 47 | deltaf_rel = 1; 48 | lambdak = zeros(p+size(A,1),1); 49 | muk = zeros(q+size(C,1),1); 50 | sigmak = zeros(p+size(A,1),1); 51 | tauk = zeros(q+size(C,1),1); 52 | eq_constr_max = 0; 53 | ineq_constr_min = 0; 54 | 55 | 56 | if ~isempty(myoptions.outputfcn) 57 | outputfun = myoptions.outputfcn; 58 | end 59 | if strcmp(myoptions.display,'Iter') 60 | fprintf('Iteration NormGrad Cost Equality Inequality Rel. cost Rel. x Line-search\r') 61 | end 62 | 63 | % Start sequence of optimization variables at each iteration 64 | xsequence = []; 65 | if strcmp(myoptions.xsequence,'on') 66 | xsequence = [xsequence, x0]; 67 | end 68 | 69 | %% Iterations 70 | if strcmp(myoptions.Hessmethod,'GN') % Constrained Gauss-Newton method 71 | % Compute cost and constraints and their gradients 72 | funF = myoptions.GN_funF; 73 | xk = x0; 74 | [Vk,gradVk] = mygradient(funF,x0,myoptions.gradmethod,myoptions.graddx); 75 | Fxk = Vk(1:end-(p+q),1); 76 | gradFxk = gradVk(:,1:end-(p+q)); 77 | if ~isempty(A) 78 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 79 | gradgk = [A',gradVk(:,end-p-q+1:end-q,1)]; 80 | else 81 | gxk = Vk(end-p-q+1:end-q,1); 82 | gradgk = gradVk(:,end-p-q+1:end-q,1); 83 | end 84 | if ~isempty(C) 85 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 86 | gradhk = [C',gradVk(:,end-q+1:end,1)]; 87 | else 88 | hxk = Vk(end-q+1:end,1); 89 | gradhk = gradVk(:,end-q+1:end,1); 90 | end 91 | fxk = Fxk'*Fxk; 92 | gradfxk = 2*gradFxk*Fxk; 93 | 94 | % Compute graident of the Lagrange function 95 | gradLagr = gradfxk-gradgk*lambdak-gradhk*muk; 96 | 97 | % Compute worst-case equality and inequality constraints (for feedback 98 | % and termination conditions) 99 | if ~isempty(gxk) 100 | eq_constr_max = max(abs(gxk)); 101 | end 102 | if ~isempty(hxk) 103 | ineq_constr_min = min(hxk); 104 | end 105 | 106 | % Feedback and output function 107 | if strcmp(myoptions.display,'Iter') 108 | if ineq_constr_min<=0 && sign(ineq_constr_min)==-1 109 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 110 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,0) 111 | else 112 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 113 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,0) 114 | end 115 | end 116 | if ~isempty(myoptions.outputfcn) 117 | outputfun(xk); 118 | end 119 | 120 | % Solver iterations 121 | while norm(gradLagr) > myoptions.tolgrad... 122 | && k < myoptions.nitermax... 123 | && deltaxk_rel > myoptions.tolx... 124 | && deltaf_rel > myoptions.tolfun... 125 | || (max(eq_constr_max,-ineq_constr_min) > myoptions.tolconstr) 126 | 127 | % Compute search direction 128 | Hk = 2*(gradFxk*gradFxk')+myoptions.GN_sigma*eye(n); 129 | [pk,~,~,~,LagMult] = quadprog(Hk,gradfxk,-gradhk',hxk,gradgk',-gxk,[],[],[],myoptions.QPoptions); 130 | lambda_tilde = -LagMult.eqlin; 131 | mu_tilde = LagMult.ineqlin; 132 | delta_lambda = lambda_tilde-lambdak; 133 | delta_mu = mu_tilde-muk; 134 | 135 | % Update merit function 136 | sigmak = max(abs(lambda_tilde),(sigmak+abs(lambda_tilde))/2); 137 | tauk = max(abs(mu_tilde),(tauk+abs(mu_tilde))/2); 138 | T1fun = @(x)MeritT1(funF,x,A,b,C,d,p,q,sigmak,tauk,'GN'); 139 | 140 | % Compute current merit function value 141 | T1k = T1fun(xk); 142 | 143 | % compute directional derivative of merit function 144 | ind_h_viol = hxk<=0; 145 | gradhk_viol = gradhk(:,ind_h_viol); 146 | tauk_viol = tauk(ind_h_viol,1); 147 | DT1k = gradfxk'*pk-sigmak'*abs(gxk)-tauk_viol'*gradhk_viol'*pk; 148 | 149 | % Line search with merit function and directional derivative 150 | [xkp1,fxkp1,niter_LS,tk] = linesearch_merit(T1fun,T1k,DT1k,xk,pk,... 151 | myoptions.ls_tkmax,myoptions.ls_beta,... 152 | myoptions.ls_c,myoptions.ls_nitermax); 153 | 154 | % Update relative changes of cost and optimziation variables 155 | deltaxk_rel = norm(xkp1-xk)/max(eps,norm(xk)); 156 | deltaf_rel = abs(fxkp1-fxk)/max(eps,abs(fxk)); 157 | 158 | % Update primal and dual variables 159 | k = k+1; 160 | xk = xkp1; 161 | lambdak = lambdak+tk*delta_lambda; 162 | muk = muk+tk*delta_mu; 163 | 164 | % Compute new cost, constraints, and their gradients 165 | [Vk,gradVk] = mygradient(funF,xk,myoptions.gradmethod,myoptions.graddx); 166 | Fxk = Vk(1:end-(p+q),1); 167 | gradFxk = gradVk(:,1:end-(p+q)); 168 | fxk = Fxk'*Fxk; 169 | gradfxk = 2*gradFxk*Fxk; 170 | 171 | if ~isempty(A) 172 | gxk = [A*xk-b;Vk(end-p-q+1:end-q,1)]; 173 | gradgk = [A',gradVk(:,end-p-q+1:end-q,1)]; 174 | else 175 | gxk = Vk(end-p-q+1:end-q,1); 176 | gradgk = gradVk(:,end-p-q+1:end-q,1); 177 | end 178 | if ~isempty(C) 179 | hxk = [C*xk-d;Vk(end-q+1:end,1)]; 180 | gradhk = [C',gradVk(:,end-q+1:end,1)]; 181 | else 182 | hxk = Vk(end-q+1:end,1); 183 | gradhk = gradVk(:,end-q+1:end,1); 184 | end 185 | 186 | % Compute gradient of the Lagrangian 187 | gradLagr = gradfxk-gradgk*lambdak-gradhk*muk; 188 | 189 | % Compute worst-case equality and inequality constraints (for feedback 190 | % and termination conditions) 191 | if ~isempty(gxk) 192 | eq_constr_max = max(abs(gxk)); 193 | end 194 | if ~isempty(hxk) 195 | ineq_constr_min = min(hxk); 196 | end 197 | 198 | % Feedback and output function 199 | if strcmp(myoptions.display,'Iter') 200 | if ineq_constr_min<=0 && sign(ineq_constr_min)==-1 201 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 202 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,niter_LS) 203 | else 204 | fprintf('%9.0f %7.5e %6.5e %6.5e %6.5e %6.5e %6.5e %4.0f\r',... 205 | k,norm(gradLagr),fxk,eq_constr_max,ineq_constr_min,deltaf_rel,deltaxk_rel,niter_LS) 206 | end 207 | end 208 | if ~isempty(myoptions.outputfcn) 209 | outputfun(xk); 210 | end 211 | 212 | % Store sequence of optimization variables at each iteration 213 | if strcmp(myoptions.xsequence,'on') 214 | xsequence = [xsequence, xk]; 215 | end 216 | end 217 | 218 | end 219 | 220 | %% Termination 221 | xstar = xk; 222 | fxstar = fxk; 223 | if max(eq_constr_max,-ineq_constr_min) <= myoptions.tolconstr 224 | if norm(gradLagr) <= myoptions.tolgrad 225 | exitflag = 1; 226 | if strcmp(myoptions.display,'Iter') 227 | fprintf('Local minimum possible, directional derivative smaller than tolerance. Constraints satisfied.\r') 228 | end 229 | elseif k >= myoptions.nitermax 230 | exitflag = -1; 231 | if strcmp(myoptions.display,'Iter') 232 | fprintf('Maximum number of iterations reached. Constraints satisfied.\r') 233 | end 234 | elseif deltaxk_rel <= myoptions.tolx 235 | exitflag = 2; 236 | if strcmp(myoptions.display,'Iter') 237 | fprintf('Local minimum possible, relative step size smaller than tolerance. Constraints satisfied.\r') 238 | end 239 | elseif deltaf_rel <= myoptions.tolfun 240 | exitflag = 3; 241 | if strcmp(myoptions.display,'Iter') 242 | fprintf('Local minimum possible, relative cost decrease smaller than tolerance. Constraints satisfied.\r') 243 | end 244 | end 245 | elseif max(eq_constr_max,-ineq_constr_min) > myotions.tolconstr 246 | if k >= myoptions.nitermax 247 | exitflag = -2; 248 | if strcmp(myoptions.display,'Iter') 249 | fprintf('Maximum number of iterations reached. Constraints not satisfied.\r') 250 | end 251 | end 252 | end 253 | -------------------------------------------------------------------------------- /Mark 23_semidef/mygradient.m: -------------------------------------------------------------------------------- 1 | function [fxk,gradient] = mygradient(fun,xk,method,dx) 2 | % MYGRADIENT Computes the gradient (i.e. the Jacobian transpose) of a 3 | % given function, using one of several possible methods. 4 | % INPUTS: 5 | % fun = function whose gradient shall be evaluated (in 6 | % general a vector of N functions) 7 | % xk = value of the n-dimensional input argument at which to evaluate 8 | % the gradient (dfun(x)/dx)' 9 | % method = string indicating the differentiation method: 'FD' 10 | % (Forward Finite Differences), 'CD' (Central 11 | % Finite Differences), 'UP' (User provided), 12 | % 'IM' (Imaginary-part trick). 13 | % If 'UP' is used, function fun(x) shall return as 14 | % second output the Jacobian dfun(x)/dx evaluated 15 | % at x = xk. 16 | % dx = perturbation step used in Finite Difference 17 | % approximation 18 | % 19 | % OUTPUTS: 20 | % fxk = Value of fun(xk) 21 | % gradient = n-by-N matrix containing in each column the 22 | % partial derivatives of the corresponding element of function 23 | % fun(x) with respect to each component of x, 24 | % evaluated at xk 25 | 26 | if strcmp(method,'UP') 27 | [fxk,gradient] = fun(xk); 28 | elseif strcmp(method,'FD') 29 | n = length(xk); 30 | p = zeros(n,1); 31 | fxk = fun(xk); 32 | N = length(fxk); 33 | gradient = zeros(n,N); 34 | p(1,1) = dx; 35 | gradient(1,:) = (fun(xk+p)-fun(xk))'/dx; 36 | for ind = 2:n 37 | p(ind-1,1) = 0; 38 | p(ind,1) = dx; 39 | gradient(ind,:) = (fun(xk+p)-fun(xk))'/dx; 40 | end 41 | elseif strcmp(method,'CD') 42 | n = length(xk); 43 | p = zeros(n,1); 44 | fxk = fun(xk); 45 | N = length(fxk); 46 | gradient = zeros(n,N); 47 | p(1,1) = dx; 48 | gradient(1,:) = (fun(xk+p)-fun(xk-p))'/(2*dx); 49 | for ind = 2:n 50 | p(ind-1,1) = 0; 51 | p(ind,1) = dx; 52 | gradient(ind,:) = (fun(xk+p)-fun(xk-p))'/(2*dx); 53 | end 54 | elseif strcmp(method,'IM') 55 | n = length(xk); 56 | p = zeros(n,1); 57 | fxk = fun(xk); 58 | N = length(fxk); 59 | gradient = zeros(n,N); 60 | tol = 1e-100; 61 | p(1,1) = 1j*tol; 62 | gradient(1,:) = imag(fun(xk+p))'/tol; 63 | for ind = 2:n 64 | p(ind-1,1) = 0; 65 | p(ind,1) = 1j*tol; 66 | gradient(ind,:) = imag(fun(xk+p))'/tol; 67 | end 68 | end -------------------------------------------------------------------------------- /Mark 23_semidef/myoptimset.m: -------------------------------------------------------------------------------- 1 | function myoptions = myoptimset 2 | % MYOPTIMSET Sets default options for user-developed nonlinear programming 3 | % routine, both unconstrained and constrained. 4 | % 5 | % OUTPUTS: 6 | % myoptions = structure containing all options 7 | % required by the NLP solvers 8 | % 9 | % %% General options 10 | % myoptions.display = 'Iter'; % Display iteration output 11 | % myoptions.xsequence = 'off'; % Store sequence of points {xk} 12 | % myoptions.tolgrad = 1e-6; % Termination tolerance on the norm 13 | % % of the directional derivative 14 | % myoptions.tolX = 1e-12; % Termination tolerance on the relative 15 | % % change of optimization variables 16 | % myoptions.tolfun = 1e-12; % Termination tolerance on the relative 17 | % % improvement of the cost function 18 | % myoptions.tolconstr = 1e-6; % Constraint tolerance satisfaction 19 | % tolerance 20 | % myoptions.nitermax = 50; % Termination tolerance on the number of 21 | % % iterations 22 | % 23 | % %% Differentiation options 24 | % myoptions.gradmethod = 'FD'; % Method for gradient computation 25 | % % FD = Forward Differences 26 | % % CD = Central Differences 27 | % % IM = Imaginary-part trick 28 | % % UP = User Provided 29 | % myoptions.graddx = 2^-26; % Perturbation for gradient computation 30 | % % use 2^-26 for FD 31 | % % use 2^-17 for CD 32 | % 33 | % %% Line search options 34 | % myoptions.ls_tkmax = 1; % Maximum step size 35 | % myoptions.ls_beta = 0.8; % Beta scaling factor for 36 | % % back-tracking line search 37 | % myoptions.ls_c = 0.1; % c coefficient factor for 38 | % % back-tracking line search 39 | % myoptions.ls_nitermax = 20; % max. number of back-tracking 40 | % % line search iterations 41 | % 42 | % %% Quasi-Newton method options 43 | % myoptions.Hessmethod = 'BFGS'; % Method for Hessian computation 44 | % % Exact = Exact Newton. In this 45 | % case, function f(x) shall 46 | % provide as second 47 | % output the gradient of 48 | % f, and as third output 49 | % the Hessian of f 50 | % % BFGS = BFGS algorithm 51 | % % GN = Gauss-Newton algorithm 52 | % % GD = Gradient descent 53 | % myoptions.BFGS_gamma = 0.2; % gamma factor for Powell's trick in 54 | % % BFGS algorithm 55 | 56 | %% General options 57 | myoptions.display = 'Iter'; % Display iteration output 58 | myoptions.xsequence = 'off'; % Store sequence of points {xk} 59 | myoptions.tolgrad = 1e-6; % Termination tolerance on the norm 60 | % of the directional derivative 61 | myoptions.tolconstr = 1e-6; % Constraint satisfaction tolerance 62 | myoptions.tolx = 1e-8; % Termination tolerance on the relative 63 | % change of optimization variables 64 | myoptions.tolfun = 1e-10; % Termination tolerance on the relative 65 | % improvement of the cost function 66 | myoptions.nitermax = 50; % Termination tolerance on the number of 67 | % iterations 68 | myoptions.outputfcn = []; % Handle for output function 69 | 70 | %% Differentiation options 71 | myoptions.gradmethod = 'FD'; % Method for gradient computation 72 | % FD = Forward Differences 73 | % CD = Central Differences 74 | % IM = Imaginary-part trick 75 | % UP = User Provided 76 | myoptions.graddx = 2^-26; % Perturbation for gradient computation 77 | % use 2^-26 for FD 78 | % use 2^-17 for CD 79 | 80 | %% Line search options 81 | myoptions.ls_tkmax = 1; % Maximum step size 82 | myoptions.ls_beta = 0.8; % Beta scaling factor for 83 | % back-tracking line search 84 | myoptions.ls_c = 0.1; % c coefficient factor for 85 | % back-tracking line search 86 | myoptions.ls_nitermax = 20; % max. number of back-tracking 87 | % line search iterations 88 | 89 | %% Quasi-Newton method options 90 | myoptions.Hessmethod = 'BFGS'; % Method for Hessian computation 91 | % Exact = Exact Newton. In this 92 | % case, it is expected that the 93 | % cost function provides as second 94 | % output the gradient, and as third 95 | % output the Hessian matrix. 96 | % BFGS = BFGS algorithm 97 | % GN = Gauss-Newton algorithm 98 | % SD = Steepest Descent 99 | myoptions.BFGS_gamma = 1e-1; % gamma factor for Powell's trick in 100 | % BFGS algorithm 101 | myoptions.GN_funF = []; % function providing the value of F 102 | % in Gauss-Newton method, assuming 103 | % f(x)=F(x)'*F(x) 104 | myoptions.GN_sigma = 0; % coefficient to ensure Hessian is 105 | % positive definite in Gauss-Newton 106 | % method (H=GradF*GradF'+eye(n)*sigma) 107 | myoptions.QPoptions = ... 108 | optimset('Display','none','Algorithm','interior-point-convex'); % options for QP solver in SQP methods 109 | end 110 | 111 | -------------------------------------------------------------------------------- /Mark 23_semidef/robot_dynamics.m: -------------------------------------------------------------------------------- 1 | function [q1_dd, q2_dd] = robot_dynamics(tau, state, d) 2 | 3 | F_hx = d(1); 4 | F_hz = d(2); 5 | m_load = d(3); 6 | q1 = state(1); 7 | q2 = state(2); 8 | q1_d = state(3); 9 | q2_d = state(4); 10 | 11 | %robot parameters 12 | g = 9.81; 13 | J1 = 10; %inertia link 1 14 | J2 = 10; %inertia link 2 15 | m1 = 50; %mass link 1 16 | m2 = 50; %mass link 2 17 | a1 = 1.2; %lenght link 1 18 | a2 = 1; %length link 2 19 | ac1 = a1/2; %centre of mass link 1 20 | ac2 = a2/2; %centre of mass link 2 21 | 22 | %kinematic functions 23 | % q1 = q(1); 24 | % q2 = q(2); 25 | % q1_d = q_d(1); 26 | % q2_d = q_d(2); 27 | c1 = cos(q1); 28 | s1 = sin(q1); 29 | c2 = cos(q2); 30 | s2 = sin(q2); 31 | c12 = cos(q1+q2); 32 | s12 = sin(q1+q2); 33 | 34 | J = [-a1*sin(q1)-a2*sin(q1+q2) -a2*sin(q1+q2); 35 | a1*cos(q1)+a2*cos(q1+q2) a2*cos(q1+q2)]; 36 | 37 | M_tot = [m1*ac1^2+m2*(a1^2+ac2^2+2*a1*ac2*c2)+J1+J2 m2*(ac2^2+a1*ac2*c2)+J2; 38 | m2*(ac2^2+a1*ac2*c2)+J2 m2*ac2^2+J2]; 39 | 40 | % C_tot = [-a1*q2_d*s2*ac2*m2 -a1*q1_d*s2*ac2*m2; 41 | % -a1*q1_d*s2*ac2*m2 0];- C_tot*[q1_d;q2_d] 42 | 43 | 44 | g_tot = [(m1*ac1+m2*a1)*g*c1+m2*g*ac2*c12; 45 | m2*g*ac2*c12]; 46 | 47 | 48 | %equation of motion of the 2dof manipulator 49 | q_dd = inv(M_tot)*(tau + J'*[F_hx;F_hz - m_load*g] - g_tot); 50 | q1_dd = q_dd(1); 51 | q2_dd = q_dd(2); 52 | 53 | end -------------------------------------------------------------------------------- /Mark 23_semidef/robot_sim_cost.m: -------------------------------------------------------------------------------- 1 | function [cost, q_sim] = robot_sim_cost(x,q_meas,qd_ref,pos_ref, m_load,Q) 2 | 3 | %Compute the quadratic cost of the error obteined after the simulation 4 | [err_vec, q_sim] = robot_sim_err(x, q_meas,qd_ref,pos_ref, m_load, Q); 5 | cost = sum(err_vec.*err_vec); 6 | 7 | end -------------------------------------------------------------------------------- /Mark 23_semidef/robot_sim_err.m: -------------------------------------------------------------------------------- 1 | function [err_vec,q_sim] = robot_sim_err(x, q_reference, qd_reference,pos_ref, m_load, Q) 2 | 3 | %% Initialize simulation parameters 4 | 5 | tau_opt = [x(1,1); x(2,1)]; 6 | scaling = size(q_reference,2); % number of samples 7 | tau = zeros(5,3); 8 | t_sim = 0:0.02:0.08; 9 | 10 | 11 | for j = 1:5 %1 --> dimension of the sliding window 12 | tau(j,2:3) = tau_opt'; 13 | end 14 | tau(:,1) = t_sim'; 15 | tau_sim = tau; 16 | assignin('base', 'tau_sim', tau_sim); % assign to workspace matrix disturbances and pass it to simulink 17 | 18 | % Simulate the model 19 | 20 | sim('Model_sim'); 21 | q_sim = [q1_sim q2_sim]'; 22 | qd_sim = [q1d_sim q2d_sim]'; 23 | F_human = [Fh_x Fh_z]'; 24 | 25 | %% Simulation of the trajectories 26 | 27 | xi_test = zeros(4,5); 28 | for j = 1:5 29 | [x, z, xd, zd] = direct_kin_test(q_sim(1,j),q_sim(2,j),... 30 | qd_sim(1,j), qd_sim(2,j)); % first 2 rows positions, second 2 speeds 31 | xi_test(:,j) = [x, z, xd, zd]'; 32 | end 33 | %% Computation of the error to pass to simulink 34 | error_traj = zeros(5,3); 35 | xi_traj = zeros(5,3); 36 | xi_traj(:,2:3) = xi_test(1:2,:)'; 37 | xi_traj(:,1) = t_sim'; 38 | error_traj(:,1) = t_sim'; 39 | error_traj(:,2:3) = pos_ref(:,2:3)-xi_traj(:,2:3); 40 | %% Stack errors in one vector 41 | 42 | q_ref = [q_reference; qd_reference]; 43 | q_test = [q_sim; qd_sim]; 44 | err = Q*(q_ref-q_test); 45 | err_vec = [err(1,:) err(2,:) err(3,:) err(4,:)]'/sqrt(scaling); 46 | 47 | %% Assign the parameters to the base workspace 48 | 49 | assignin('base', 'xi0', q_sim); 50 | assignin('base', 'xd0', qd_sim); 51 | assignin('base', 'error', err); 52 | assignin('base', 'xi_sim', xi_test); 53 | assignin('base', 'error_xi', error_traj); 54 | assignin('base', 'F_hu', F_human); 55 | end 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /Mark 23_semidef/test_1_tolgrad5_tolx8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 23_semidef/test_1_tolgrad5_tolx8.mat -------------------------------------------------------------------------------- /Mark 23_semidef/test_2_tolgrad6_tolx10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 23_semidef/test_2_tolgrad6_tolx10.mat -------------------------------------------------------------------------------- /Mark 23_semidef/test_2_tolgrad6_tolx8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Mark 23_semidef/test_2_tolgrad6_tolx8.mat -------------------------------------------------------------------------------- /Presentazione_CNOEC.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Roddolo/Collaborative-robot/0a4b180b1b72ff0997ae0f59d4626a9e4681a236/Presentazione_CNOEC.pptx -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Collaborative-robot 2 | Online estimation of the human force acting on the end-effector of a collaborative robot and online optimal control of the torque 3 | 4 | - Mark21_semidef is the part of the project concerning the estimation of the human force (https://github.com/Roddolo/Collaborative-robot/tree/master/Mark%2021_semidef) 5 | - Mark23_semidef is the part of the project concerning the optimal control of the robot's torques (https://github.com/Roddolo/Collaborative-robot/tree/master/Mark%2023_semidef) 6 | - Report of the project (https://github.com/Roddolo/Collaborative-robot/blob/master/CNOEC_report.pdf) 7 | 8 | ***Students Rodrigo Senofieni, Andrea Ghezzi, Luca Brolese. Project for the course of Constrained numerical optimization for estimation and control, held at Politecnico of Milan by Prof. Lorenzo Fagiano*** 9 | --------------------------------------------------------------------------------