├── Aluminium_Frame_Problem ├── Aluminium_Frame_Analysis.m ├── Aluminium_Frame_Data.mat ├── Aluminium_Frame_ModelUpdating.m ├── Data_experimental.mat ├── Data_simulations.mat ├── FIT_NEURALNETWORK.m ├── README.md ├── TEMCMCsampler.m ├── TMCMCsampler.m ├── loglikelihood1.m ├── loglikelihood2.m ├── loglikelihood3.m └── plotEigenvalues.m ├── EMCMCsampler.m ├── Illustrative_Example.m ├── README.md ├── TEMCMCsampler.m ├── TMCMCsampler.m ├── example_CoupledOscillator.m └── example_himmelblau.m /Aluminium_Frame_Problem/Aluminium_Frame_Analysis.m: -------------------------------------------------------------------------------- 1 | %% load Data: 2 | load('Aluminium_Frame_ModelUpdating.mat'); 3 | 4 | %% Define function handle to compute influence weights: 5 | 6 | L1 = @(theta,measurements) loglikelihood1(theta, measurements, net); 7 | L2 = @(theta,measurements) loglikelihood2(theta, measurements, net); 8 | L3 = @(theta,measurements) loglikelihood3(theta, measurements, net); 9 | 10 | % Compute nominal influence weights from posterior samples: 11 | nom_post_weights = @(theta, measurements) exp([L1(theta, measurements),... 12 | L2(theta, measurements),... 13 | L3(theta, measurements)]); 14 | 15 | %% Compute array of influence weights for the likelihood functions: 16 | 17 | % Array to store nominal weights of inidividual likelihood for combined likelihood 1: 18 | nom_weights_like1 = zeros(Nsamples, 3, size(exp_freq,1)); 19 | % Array to store nominal weights of inidividual likelihood for combined likelihood 2: 20 | nom_weights_like2 = zeros(Nsamples, 3, size(exp_freq,1)); 21 | % Array to store nominal weights of inidividual likelihood for combined likelihood 3: 22 | nom_weights_like3 = zeros(Nsamples, 3, size(exp_freq,1)); 23 | 24 | % Array to store normalised weights of individual likelihood for combined likelihood 1: 25 | weights_like1 = zeros(Nsamples, 3, size(exp_freq,1)); 26 | % Array to store normalised weights of individual likelihood for combined likelihood 2: 27 | weights_like2 = zeros(Nsamples, 3, size(exp_freq,1)); 28 | % Array to store normalised weights of individual likelihood for combined likelihood 3: 29 | weights_like3 = zeros(Nsamples, 3, size(exp_freq,1)); 30 | 31 | for j = 1:size(exp_freq,1) 32 | 33 | samples_temcmc_1 = TEMCMC{j,1}.samples; 34 | samples_temcmc_2 = TEMCMC{j,2}.samples; 35 | samples_temcmc_3 = TEMCMC{j,3}.samples; 36 | 37 | nom_weights_like1(:,:,j) = nom_post_weights(samples_temcmc_1, exp_freq(j,:)); 38 | nom_weights_like2(:,:,j) = nom_post_weights(samples_temcmc_2, exp_freq(j,:)); 39 | nom_weights_like3(:,:,j) = nom_post_weights(samples_temcmc_3, exp_freq(j,:)); 40 | 41 | for i = 1:Nsamples 42 | 43 | norm_fac1 = sum(nom_weights_like1(i,:,j)); 44 | norm_fac2 = sum(nom_weights_like2(i,:,j)); 45 | norm_fac3 = sum(nom_weights_like3(i,:,j)); 46 | 47 | weights_like1(i,:,j) = nom_weights_like1(i,:,j)./norm_fac1; 48 | weights_like2(i,:,j) = nom_weights_like2(i,:,j)./norm_fac2; 49 | weights_like3(i,:,j) = nom_weights_like3(i,:,j)./norm_fac3; 50 | 51 | end 52 | end 53 | 54 | for j = 1:size(exp_freq,1) 55 | for k = 1:3 56 | 57 | mean_weights1(j,k) = mean(weights_like1(:,k,j)); 58 | mean_weights2(j,k) = mean(weights_like2(:,k,j)); 59 | mean_weights3(j,k) = mean(weights_like3(:,k,j)); 60 | 61 | cov_weights1(j,k) = (std(weights_like1(:,k,j))./mean(weights_like1(:,k,j)))*100; 62 | cov_weights2(j,k) = (std(weights_like2(:,k,j))./mean(weights_like2(:,k,j)))*100; 63 | cov_weights3(j,k) = (std(weights_like3(:,k,j))./mean(weights_like3(:,k,j)))*100; 64 | 65 | end 66 | end 67 | 68 | table_C1 = array2table([mean_weights1(:,1),cov_weights1(:,1),mean_weights1(:,2),cov_weights1(:,2),... 69 | mean_weights1(:,3),cov_weights1(:,3)], 'VariableNames',... 70 | {'Mean_L1','COV_L1','Mean_L2','COV_L2','Mean_L3','COV_L3'}); 71 | 72 | table_C2 = array2table([mean_weights2(:,1),cov_weights2(:,1),mean_weights2(:,2),cov_weights2(:,2),... 73 | mean_weights2(:,3),cov_weights2(:,3)], 'VariableNames',... 74 | {'Mean_L1','COV_L1','Mean_L2','COV_L2','Mean_L3','COV_L3'}); 75 | 76 | table_C3 = array2table([mean_weights3(:,1),cov_weights3(:,1),mean_weights3(:,2),cov_weights3(:,2),... 77 | mean_weights3(:,3),cov_weights3(:,3)], 'VariableNames',... 78 | {'Mean_L1','COV_L1','Mean_L2','COV_L2','Mean_L3','COV_L3'}); 79 | -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/Aluminium_Frame_Data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adolphus8/Transitional_Ensemble_MCMC/3e6cbf62c1fc269e553942dc18d479c9cb25c847/Aluminium_Frame_Problem/Aluminium_Frame_Data.mat -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/Aluminium_Frame_ModelUpdating.m: -------------------------------------------------------------------------------- 1 | %% Bayesian Model Updating of the 2D Aluminium Frame 2 | 3 | % Objective: To update the model parameters given frequency mesurements via 4 | % TMCMC and TEMCMC 5 | 6 | %% Load the Database of Simulated Frequencies 7 | % The experimenal mesurments are the natural frequencies of an alminium 8 | % frame with movable masses. 9 | % 10 | % A total of 200 synthetic mesurments were obtained from FE model. 11 | % The mass location in the frame is varied. 12 | % The pair of vertical distance (p1,p2) were randomly extracted from a 13 | % normal pdf with means [20,20] and cov C(p1,p2) = C(p2,p1) = 9; 14 | 15 | % Load the Synthetic data for Frequency and Mass positions from the FE model: 16 | load('Data_simulations.mat'); 17 | 18 | % Distance between mass 1 and the bottom of the beam: 19 | pm1 = DATA.p1; 20 | 21 | % Distance between mass 2 and the middle beam: 22 | pm2 = DATA.p2; 23 | 24 | % Simulated data of pm = [pm1, pm2]: 25 | pm = [pm1, pm2]; 26 | 27 | % Obtain the 6 natural frequencies considered in this study: 28 | freq = DATA.f(:,[1:4,6,8]); 29 | 30 | % Plot scatterplot of pm1 and pm2: 31 | figure; 32 | hold on; box on; grid on; 33 | scatter(pm1, pm2, 18, 'b', 'filled') 34 | xlabel('$pm_1$ $[cm]$','Interpreter','latex'); ylabel('$pm_2$ $[cm]$','Interpreter','latex'); 35 | set(gca, 'fontsize', 20) 36 | hold off 37 | 38 | % Plot matrix plot of frequency samples: 39 | plotEigenvalues(freq) 40 | 41 | %% Train a Surrogate Model to mimic Input Output FE relation 42 | % 43 | % The surrogate model used in this study is in the form of an Artifical 44 | % Neural Network (ANN) with configuration: [2:15:6]; 45 | % 46 | % Topology: 1 input layer with 2 nodes, 1 hidden-layer with 15 nodes, and 47 | % 1 output layer with 6 nodes. 48 | % 49 | % The ANN is calibrated and trained using the simulated input-output data 50 | % from the Finite Element Model via the basic feed-forward back-propagation 51 | % method 52 | 53 | % Obtain the network NET (net) and the training record (tr) from the ANN: 54 | tic; 55 | [net,tr] = FIT_NEURALNETWORK(pm,freq); 56 | ANNtime = toc; 57 | 58 | % To obtain plots of the ANN performanc statistics: 59 | figure; % To plot training state values 60 | plotperform(tr) 61 | 62 | POutputs = net(pm'); POutputs = POutputs'; 63 | trOut = POutputs(tr.trainInd,:); vOut = POutputs(tr.valInd,:); tsOut = POutputs(tr.testInd,:); 64 | trTarg = freq(tr.trainInd,:); vTarg = freq(tr.valInd,:); tsTarg = freq(tr.testInd,:); 65 | 66 | plotregression(trTarg, trOut, 'Train', vTarg, vOut, 'Validation', tsTarg, tsOut, 'Testing', freq, POutputs, 'All') 67 | 68 | %% Load the real experimental data 69 | 70 | load('Data_experimental.mat') 71 | 72 | % Experimental values of [pm1, pm2]: 73 | exp_pm = Data_experimental.p; 74 | 75 | % Experimental data of the natural frequencies: 76 | exp_freq = Data_experimental.Nat_Freq_Exp; 77 | 78 | % Present experimental data in Table form for reference: 79 | exp_data_table = array2table([exp_pm, exp_freq], 'VariableNames',... 80 | {'exp_pm1','exp_pm2','exp_f1', 'exp_f2', 'exp_f3',... 81 | 'exp_f4', 'exp_f5', 'exp_f6'}); 82 | 83 | load('Aluminium_Frame_Data.mat') 84 | %% Bayesian Model Updating set-up: 85 | 86 | % Set up the Prior: 87 | lowerBound = [5, 1e-03]; upperBound = [35, 100]; 88 | prior_pm1 = @(x) unifpdf(x, lowerBound(1), upperBound(1)); 89 | prior_pm2 = @(x) unifpdf(x, lowerBound(1), upperBound(1)); 90 | prior_sigma1 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 91 | prior_sigma2 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 92 | prior_sigma3 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 93 | prior_sigma4 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 94 | prior_sigma5 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 95 | prior_sigma6 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 96 | 97 | prior_pdf = @(x) prior_pm1(x(:,1)).*prior_pm2(x(:,2)).*prior_sigma1(x(:,3)).* ... 98 | prior_sigma2(x(:,4)).*prior_sigma3(x(:,5)).*prior_sigma4(x(:,6)).* ... 99 | prior_sigma5(x(:,7)).*prior_sigma6(x(:,8)); 100 | 101 | prior_rnd = @(N) [unifrnd(lowerBound(1), upperBound(1), N, 2),... 102 | unifrnd(lowerBound(2), upperBound(2), N, 6)]; 103 | 104 | % Set up the cell array of Loglikelihood functions: 105 | 106 | L1 = @(theta,measurements) loglikelihood1(theta, measurements, net); 107 | L2 = @(theta,measurements) loglikelihood2(theta, measurements, net); 108 | L3 = @(theta,measurements) loglikelihood3(theta, measurements, net); 109 | 110 | loglike = cell(3,1); 111 | loglike{1,1} = @(theta,measurements) log((1/3).*(exp(L1(theta,measurements)) +... 112 | exp(L2(theta,measurements)) + exp(L3(theta,measurements)))); 113 | loglike{2,1} = @(theta,measurements) L1(theta,measurements) + L2(theta,measurements) +... 114 | L3(theta,measurements); 115 | loglike{3,1} = @(theta,measurements) 0.5 .* log((1/3).*(exp(2 .* L1(theta,measurements)) +... 116 | exp(2 .* L2(theta,measurements)) + exp(2 .* L3(theta,measurements)))); 117 | 118 | %% Run the TMCMC and TEMCMC simulations: 119 | 120 | Nsamples = 1000; % Number of samples to obtain from the posterior 121 | TMCMC = cell(size(exp_freq,1), length(loglike)); 122 | TEMCMC = cell(size(exp_freq,1), length(loglike)); 123 | timeTMCMC = zeros(size(exp_freq,1), length(loglike)); 124 | timeTEMCMC = zeros(size(exp_freq,1), length(loglike)); 125 | 126 | for i = 1:length(loglike) 127 | parfor j = 1:size(exp_freq,1) 128 | 129 | logl = loglike{i,1}; 130 | logL = @(theta) logl(theta, exp_freq(j,:)); 131 | 132 | tic; 133 | TMCMC{j,i} = TMCMCsampler('nsamples',Nsamples,'loglikelihood',logL,... 134 | 'priorpdf',prior_pdf,'priorrnd',prior_rnd,'burnin',0); 135 | timeTMCMC(j,i) = toc; 136 | 137 | tic; 138 | TEMCMC{j,i} = TEMCMCsampler('nsamples',Nsamples,'loglikelihood',logL,... 139 | 'priorpdf',prior_pdf,'priorrnd',prior_rnd,'burnin',0); 140 | timeTEMCMC(j,i) = toc; 141 | 142 | end 143 | end 144 | 145 | %% Analysis of Results: 146 | 147 | mean_tmcmc = zeros(size(exp_pm,1),8,size(logL,1)); 148 | mean_temcmc = zeros(size(exp_pm,1),8,size(logL,1)); 149 | stdev_tmcmc = zeros(size(exp_pm,1),8,size(logL,1)); 150 | stdev_temcmc = zeros(size(exp_pm,1),8,size(logL,1)); 151 | bounds_pm1_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 152 | bounds_pm1_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 153 | bounds_pm2_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 154 | bounds_pm2_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 155 | bounds_sigma1_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 156 | bounds_sigma1_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 157 | bounds_sigma2_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 158 | bounds_sigma2_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 159 | bounds_sigma3_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 160 | bounds_sigma3_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 161 | bounds_sigma4_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 162 | bounds_sigma4_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 163 | bounds_sigma5_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 164 | bounds_sigma5_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 165 | bounds_sigma6_tmcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 166 | bounds_sigma6_temcmc = zeros(size(exp_pm,1),size(exp_pm,2),size(logL,1)); 167 | 168 | for i = 1:size(logL,1) 169 | for j = 1:size(exp_pm,1) 170 | posterior_TMCMC = TMCMC{j,i}.samples; posterior_TEMCMC = TEMCMC{j,i}.samples; 171 | mean_tmcmc(j,:,i) = mean(posterior_TMCMC); mean_temcmc(j,:,i) = mean(posterior_TEMCMC); 172 | stdev_tmcmc(j,:,i) = std(posterior_TMCMC); stdev_temcmc(j,:,i) = std(posterior_TEMCMC); 173 | 174 | bounds_TMCMC = (prctile(posterior_TMCMC,[5 95],1))'; 175 | bounds_TEMCMC = (prctile(posterior_TEMCMC,[5 95],1))'; 176 | 177 | bounds_pm1_tmcmc(j,:,i) = bounds_TMCMC(1,:); bounds_pm2_tmcmc(j,:,i) = bounds_TMCMC(2,:); 178 | bounds_sigma1_tmcmc(j,:,i) = bounds_TMCMC(3,:); bounds_sigma2_tmcmc(j,:,i) = bounds_TMCMC(4,:); 179 | bounds_sigma3_tmcmc(j,:,i) = bounds_TMCMC(5,:); bounds_sigma4_tmcmc(j,:,i) = bounds_TMCMC(6,:); 180 | bounds_sigma5_tmcmc(j,:,i) = bounds_TMCMC(7,:); bounds_sigma6_tmcmc(j,:,i) = bounds_TMCMC(8,:); 181 | 182 | bounds_pm1_temcmc(j,:,i) = bounds_TEMCMC(1,:); bounds_pm2_temcmc(j,:,i) = bounds_TEMCMC(2,:); 183 | bounds_sigma1_temcmc(j,:,i) = bounds_TEMCMC(3,:); bounds_sigma2_temcmc(j,:,i) = bounds_TEMCMC(4,:); 184 | bounds_sigma3_temcmc(j,:,i) = bounds_TEMCMC(5,:); bounds_sigma4_temcmc(j,:,i) = bounds_TEMCMC(6,:); 185 | bounds_sigma5_temcmc(j,:,i) = bounds_TEMCMC(7,:); bounds_sigma6_temcmc(j,:,i) = bounds_TEMCMC(8,:); 186 | end 187 | end 188 | cov_tmcmc = (stdev_tmcmc./mean_tmcmc)*100; cov_temcmc = (stdev_temcmc./mean_temcmc)*100; 189 | 190 | % To compute the model error relative to the data 191 | for j = 1:size(exp_pm,1) 192 | error(j,:) = std(exp_freq(j,:)' - net(exp_pm(j,:)')); 193 | end 194 | 195 | %% Summary of results for Likelihod 1: 196 | 197 | table_pm1_L1 = array2table([exp_pm(:,1),mean_tmcmc(:,1,1),cov_tmcmc(:,1,1),bounds_pm1_tmcmc(:,:,1),... 198 | timeTMCMC(:,1),mean_temcmc(:,1,1),cov_temcmc(:,1,1),bounds_pm1_temcmc(:,:,1),... 199 | timeTEMCMC(:,1)], 'VariableNames',... 200 | {'True_pm1','TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 201 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 202 | 203 | table_pm2_L1 = array2table([exp_pm(:,2),mean_tmcmc(:,2,1),cov_tmcmc(:,2,1),bounds_pm2_tmcmc(:,:,1),... 204 | timeTMCMC(:,1),mean_temcmc(:,2,1),cov_temcmc(:,2,1),bounds_pm2_temcmc(:,:,1),... 205 | timeTEMCMC(:,1)], 'VariableNames',... 206 | {'True_pm2','TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 207 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 208 | 209 | table_sigma1_L1 = array2table([mean_tmcmc(:,3,1),cov_tmcmc(:,3,1),bounds_sigma1_tmcmc(:,:,1),... 210 | timeTMCMC(:,1),mean_temcmc(:,3,1),cov_temcmc(:,3,1),bounds_sigma1_temcmc(:,:,1),... 211 | timeTEMCMC(:,1)], 'VariableNames',... 212 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 213 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 214 | 215 | table_sigma2_L1 = array2table([mean_tmcmc(:,4,1),cov_tmcmc(:,4,1),bounds_sigma2_tmcmc(:,:,1),... 216 | timeTMCMC(:,1),mean_temcmc(:,4,1),cov_temcmc(:,4,1),bounds_sigma2_temcmc(:,:,1),... 217 | timeTEMCMC(:,1)], 'VariableNames',... 218 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 219 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 220 | 221 | table_sigma3_L1 = array2table([mean_tmcmc(:,5,1),cov_tmcmc(:,5,1),bounds_sigma3_tmcmc(:,:,1),... 222 | timeTMCMC(:,1),mean_temcmc(:,5,1),cov_temcmc(:,5,1),bounds_sigma3_temcmc(:,:,1),... 223 | timeTEMCMC(:,1)], 'VariableNames',... 224 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 225 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 226 | 227 | table_sigma4_L1 = array2table([mean_tmcmc(:,6,1),cov_tmcmc(:,6,1),bounds_sigma4_tmcmc(:,:,1),... 228 | timeTMCMC(:,1),mean_temcmc(:,6,1),cov_temcmc(:,6,1),bounds_sigma4_temcmc(:,:,1),... 229 | timeTEMCMC(:,1)], 'VariableNames',... 230 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 231 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 232 | 233 | table_sigma5_L1 = array2table([mean_tmcmc(:,7,1),cov_tmcmc(:,7,1),bounds_sigma5_tmcmc(:,:,1),... 234 | timeTMCMC(:,1),mean_temcmc(:,7,1),cov_temcmc(:,7,1),bounds_sigma5_temcmc(:,:,1),... 235 | timeTEMCMC(:,1)], 'VariableNames',... 236 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 237 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 238 | 239 | table_sigma6_L1 = array2table([mean_tmcmc(:,8,1),cov_tmcmc(:,8,1),bounds_sigma6_tmcmc(:,:,1),... 240 | timeTMCMC(:,1),mean_temcmc(:,8,1),cov_temcmc(:,8,1),bounds_sigma6_temcmc(:,:,1),... 241 | timeTEMCMC(:,1)], 'VariableNames',... 242 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 243 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 244 | 245 | %% Summary of results for Likelihod 2: 246 | 247 | table_pm1_L2 = array2table([exp_pm(:,1),mean_tmcmc(:,1,2),cov_tmcmc(:,1,2),bounds_pm1_tmcmc(:,:,2),... 248 | timeTMCMC(:,2),mean_temcmc(:,1,2),cov_temcmc(:,1,2),bounds_pm1_temcmc(:,:,2),... 249 | timeTEMCMC(:,2)], 'VariableNames',... 250 | {'True_pm1','TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 251 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 252 | 253 | table_pm2_L2 = array2table([exp_pm(:,2),mean_tmcmc(:,2,2),cov_tmcmc(:,2,2),bounds_pm2_tmcmc(:,:,2),... 254 | timeTMCMC(:,2),mean_temcmc(:,2,2),cov_temcmc(:,2,2),bounds_pm2_temcmc(:,:,2),... 255 | timeTEMCMC(:,2)], 'VariableNames',... 256 | {'True_pm2','TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 257 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 258 | 259 | table_sigma1_L2 = array2table([mean_tmcmc(:,3,2),cov_tmcmc(:,3,2),bounds_sigma1_tmcmc(:,:,2),... 260 | timeTMCMC(:,2),mean_temcmc(:,3,2),cov_temcmc(:,3,2),bounds_sigma1_temcmc(:,:,2),... 261 | timeTEMCMC(:,2)], 'VariableNames',... 262 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 263 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 264 | 265 | table_sigma2_L2 = array2table([mean_tmcmc(:,4,2),cov_tmcmc(:,4,2),bounds_sigma2_tmcmc(:,:,2),... 266 | timeTMCMC(:,2),mean_temcmc(:,4,2),cov_temcmc(:,4,2),bounds_sigma2_temcmc(:,:,2),... 267 | timeTEMCMC(:,2)], 'VariableNames',... 268 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 269 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 270 | 271 | table_sigma3_L2 = array2table([mean_tmcmc(:,5,2),cov_tmcmc(:,5,2),bounds_sigma3_tmcmc(:,:,2),... 272 | timeTMCMC(:,2),mean_temcmc(:,5,2),cov_temcmc(:,5,2),bounds_sigma3_temcmc(:,:,2),... 273 | timeTEMCMC(:,2)], 'VariableNames',... 274 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 275 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 276 | 277 | table_sigma4_L2 = array2table([mean_tmcmc(:,6,2),cov_tmcmc(:,6,2),bounds_sigma4_tmcmc(:,:,2),... 278 | timeTMCMC(:,2),mean_temcmc(:,6,2),cov_temcmc(:,6,2),bounds_sigma4_temcmc(:,:,2),... 279 | timeTEMCMC(:,2)], 'VariableNames',... 280 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 281 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 282 | 283 | table_sigma5_L2 = array2table([mean_tmcmc(:,7,2),cov_tmcmc(:,7,2),bounds_sigma5_tmcmc(:,:,2),... 284 | timeTMCMC(:,2),mean_temcmc(:,7,2),cov_temcmc(:,7,2),bounds_sigma5_temcmc(:,:,2),... 285 | timeTEMCMC(:,2)], 'VariableNames',... 286 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 287 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 288 | 289 | table_sigma6_L2 = array2table([mean_tmcmc(:,8,2),cov_tmcmc(:,8,2),bounds_sigma6_tmcmc(:,:,2),... 290 | timeTMCMC(:,2),mean_temcmc(:,8,2),cov_temcmc(:,8,2),bounds_sigma6_temcmc(:,:,2),... 291 | timeTEMCMC(:,2)], 'VariableNames',... 292 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 293 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 294 | 295 | %% Summary of results for Likelihod 3: 296 | 297 | table_pm1_L3 = array2table([exp_pm(:,1),mean_tmcmc(:,1,3),cov_tmcmc(:,1,3),bounds_pm1_tmcmc(:,:,3),... 298 | timeTMCMC(:,3),mean_temcmc(:,1,3),cov_temcmc(:,1,3),bounds_pm1_temcmc(:,:,3),... 299 | timeTEMCMC(:,3)], 'VariableNames',... 300 | {'True_pm1','TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 301 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 302 | 303 | table_pm2_L3 = array2table([exp_pm(:,2),mean_tmcmc(:,2,3),cov_tmcmc(:,2,3),bounds_pm2_tmcmc(:,:,3),... 304 | timeTMCMC(:,3),mean_temcmc(:,2,3),cov_temcmc(:,2,3),bounds_pm2_temcmc(:,:,3),... 305 | timeTEMCMC(:,3)], 'VariableNames',... 306 | {'True_pm2','TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 307 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 308 | 309 | table_sigma1_L3 = array2table([mean_tmcmc(:,3,3),cov_tmcmc(:,3,3),bounds_sigma1_tmcmc(:,:,3),... 310 | timeTMCMC(:,3),mean_temcmc(:,3,3),cov_temcmc(:,3,3),bounds_sigma1_temcmc(:,:,3),... 311 | timeTEMCMC(:,3)], 'VariableNames',... 312 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 313 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 314 | 315 | table_sigma2_L3 = array2table([mean_tmcmc(:,4,3),cov_tmcmc(:,4,3),bounds_sigma2_tmcmc(:,:,3),... 316 | timeTMCMC(:,3),mean_temcmc(:,4,3),cov_temcmc(:,4,3),bounds_sigma2_temcmc(:,:,3),... 317 | timeTEMCMC(:,3)], 'VariableNames',... 318 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 319 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 320 | 321 | table_sigma3_L3 = array2table([mean_tmcmc(:,5,3),cov_tmcmc(:,5,3),bounds_sigma3_tmcmc(:,:,3),... 322 | timeTMCMC(:,3),mean_temcmc(:,5,3),cov_temcmc(:,5,3),bounds_sigma3_temcmc(:,:,3),... 323 | timeTEMCMC(:,3)], 'VariableNames',... 324 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 325 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 326 | 327 | table_sigma4_L3 = array2table([mean_tmcmc(:,6,3),cov_tmcmc(:,6,3),bounds_sigma4_tmcmc(:,:,3),... 328 | timeTMCMC(:,3),mean_temcmc(:,6,3),cov_temcmc(:,6,3),bounds_sigma4_temcmc(:,:,3),... 329 | timeTEMCMC(:,3)], 'VariableNames',... 330 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 331 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 332 | 333 | table_sigma5_L3 = array2table([mean_tmcmc(:,7,3),cov_tmcmc(:,7,3),bounds_sigma5_tmcmc(:,:,3),... 334 | timeTMCMC(:,3),mean_temcmc(:,7,3),cov_temcmc(:,7,3),bounds_sigma5_temcmc(:,:,3),... 335 | timeTEMCMC(:,3)], 'VariableNames',... 336 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 337 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 338 | 339 | table_sigma6_L3 = array2table([mean_tmcmc(:,8,3),cov_tmcmc(:,8,3),bounds_sigma6_tmcmc(:,:,3),... 340 | timeTMCMC(:,3),mean_temcmc(:,8,3),cov_temcmc(:,8,3),bounds_sigma6_temcmc(:,:,3),... 341 | timeTEMCMC(:,3)], 'VariableNames',... 342 | {'TMCMC_mean','TMCMC_cov','TMCMC_lb','TMCMC_ub','TMCMC_time',... 343 | 'TEMCMC_mean','TEMCMC_cov','TEMCMC_lb','TEMCMC_ub','TEMCMC_time'}); 344 | 345 | %% Construct P-boxes for pm1: 346 | 347 | figure; 348 | for j = 1:6 349 | subplot(2,3,j) 350 | hold on; grid on; box on; 351 | 352 | samps1 = TEMCMC{j,1}.samples; 353 | samps2 = TEMCMC{j,2}.samples; 354 | samps3 = TEMCMC{j,3}.samples; 355 | [m_output1, m_input1] = ecdf(samps1(:,1)); 356 | [m_output2, m_input2] = ecdf(samps2(:,1)); 357 | [m_output3, m_input3] = ecdf(samps3(:,1)); 358 | 359 | plot(m_input1, m_output1, 'r', 'linewidth', 1) 360 | plot(m_input2, m_output2, 'g', 'linewidth', 1) 361 | plot(m_input3, m_output3, 'b', 'linewidth', 1) 362 | xline(exp_pm(j,1), 'k --', 'linewidth', 2) 363 | 364 | title(sprintf('Exp #%1d', j),'Interpreter','latex'); 365 | xlabel('$pm_{1}$', 'Interpreter','latex') 366 | ylabel('ECDF values') 367 | set(gca, 'Fontsize', 20) 368 | end 369 | 370 | figure; 371 | for j = 7:size(exp_freq,1) 372 | subplot(2,3,j-6) 373 | hold on; grid on; box on; 374 | 375 | samps1 = TEMCMC{j,1}.samples; 376 | samps2 = TEMCMC{j,2}.samples; 377 | samps3 = TEMCMC{j,3}.samples; 378 | [m_output1, m_input1] = ecdf(samps1(:,1)); 379 | [m_output2, m_input2] = ecdf(samps2(:,1)); 380 | [m_output3, m_input3] = ecdf(samps3(:,1)); 381 | 382 | plot(m_input1, m_output1, 'r', 'linewidth', 1) 383 | plot(m_input2, m_output2, 'g', 'linewidth', 1) 384 | plot(m_input3, m_output3, 'b', 'linewidth', 1) 385 | xline(exp_pm(j,1), 'k --', 'linewidth', 2) 386 | 387 | title(sprintf('Exp #%1d', j),'Interpreter','latex'); 388 | xlabel('$pm_{1}$', 'Interpreter','latex') 389 | ylabel('ECDF values') 390 | set(gca, 'Fontsize', 20) 391 | end 392 | 393 | %% Construct P-boxes for pm2: 394 | 395 | figure; 396 | for j = 1:6 397 | subplot(2,3,j) 398 | hold on; grid on; box on; 399 | 400 | samps1 = TEMCMC{j,1}.samples; 401 | samps2 = TEMCMC{j,2}.samples; 402 | samps3 = TEMCMC{j,3}.samples; 403 | [m_output1, m_input1] = ecdf(samps1(:,2)); 404 | [m_output2, m_input2] = ecdf(samps2(:,2)); 405 | [m_output3, m_input3] = ecdf(samps3(:,2)); 406 | 407 | plot(m_input1, m_output1, 'r', 'linewidth', 1) 408 | plot(m_input2, m_output2, 'g', 'linewidth', 1) 409 | plot(m_input3, m_output3, 'b', 'linewidth', 1) 410 | xline(exp_pm(j,2), 'k --', 'linewidth', 2) 411 | 412 | title(sprintf('Exp #%1d', j),'Interpreter','latex'); 413 | xlabel('$pm_{2}$', 'Interpreter','latex') 414 | ylabel('ECDF values') 415 | set(gca, 'Fontsize', 20) 416 | end 417 | 418 | figure; 419 | for j = 7:size(exp_freq,1) 420 | subplot(2,3,j-6) 421 | hold on; grid on; box on; 422 | 423 | samps1 = TEMCMC{j,1}.samples; 424 | samps2 = TEMCMC{j,2}.samples; 425 | samps3 = TEMCMC{j,3}.samples; 426 | [m_output1, m_input1] = ecdf(samps1(:,2)); 427 | [m_output2, m_input2] = ecdf(samps2(:,2)); 428 | [m_output3, m_input3] = ecdf(samps3(:,2)); 429 | 430 | plot(m_input1, m_output1, 'r', 'linewidth', 1) 431 | plot(m_input2, m_output2, 'g', 'linewidth', 1) 432 | plot(m_input3, m_output3, 'b', 'linewidth', 1) 433 | xline(exp_pm(j,2), 'k --', 'linewidth', 2) 434 | 435 | title(sprintf('Exp #%1d', j),'Interpreter','latex'); 436 | xlabel('$pm_{2}$', 'Interpreter','latex') 437 | ylabel('ECDF values') 438 | set(gca, 'Fontsize', 20) 439 | end 440 | 441 | %% Statistics: 442 | 443 | % Interval statistis of the estimates for pm1 and pm2: 444 | p_val = [50 75, 95]; 445 | 446 | for i = 1:size(exp_freq,1) 447 | samps1 = TEMCMC{i,1}.samples; 448 | samps2 = TEMCMC{i,2}.samples; 449 | samps3 = TEMCMC{i,3}.samples; 450 | 451 | for j = 1:length(p_val) 452 | 453 | pm1_bounds(i,1,j) = min([prctile(samps1(:,1), 0.5*(100-p_val(j))),... 454 | prctile(samps2(:,1), 0.5*(100-p_val(j))),... 455 | prctile(samps3(:,1), 0.5*(100-p_val(j)))]); 456 | 457 | pm1_bounds(i,2,j) = max([prctile(samps1(:,1), 100-(0.5*(100-p_val(j)))),... 458 | prctile(samps2(:,1), 100-(0.5*(100-p_val(j)))),... 459 | prctile(samps3(:,1), 100-(0.5*(100-p_val(j))))]); 460 | 461 | pm2_bounds(i,1,j) = min([prctile(samps1(:,2), 0.5*(100-p_val(j))),... 462 | prctile(samps2(:,2), 0.5*(100-p_val(j))),... 463 | prctile(samps3(:,2), 0.5*(100-p_val(j)))]); 464 | 465 | pm2_bounds(i,2,j) = max([prctile(samps1(:,2), 100-(0.5*(100-p_val(j)))),... 466 | prctile(samps2(:,2), 100-(0.5*(100-p_val(j)))),... 467 | prctile(samps3(:,2), 100-(0.5*(100-p_val(j))))]); 468 | 469 | end 470 | end 471 | 472 | % TEMCMC Statistics: 473 | for i = 1:size(exp_freq,1) 474 | for j = 1:size(loglike,1) 475 | 476 | iterationsTEMCMC(i,j) = length(TEMCMC{i,j}.beta) - 1; 477 | accept = TEMCMC{i,j}.acceptance; 478 | acceptanceTEMCMC(i,:,j) = [min(accept), max(accept)]; 479 | 480 | end 481 | end 482 | 483 | %% Save the data: 484 | 485 | save('Aluminium_Frame_ModelUpdting.mat') -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/Data_experimental.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adolphus8/Transitional_Ensemble_MCMC/3e6cbf62c1fc269e553942dc18d479c9cb25c847/Aluminium_Frame_Problem/Data_experimental.mat -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/Data_simulations.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adolphus8/Transitional_Ensemble_MCMC/3e6cbf62c1fc269e553942dc18d479c9cb25c847/Aluminium_Frame_Problem/Data_simulations.mat -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/FIT_NEURALNETWORK.m: -------------------------------------------------------------------------------- 1 | function [net,tr] = FIT_NEURALNETWORK(inputs, targets) 2 | %% This is the function used to solve an Input-Output Fitting problem with a Neural Network 3 | % 4 | % Usage: 5 | % [net, tr] = FIT_NEURALNETWORK(inputs, targets) 6 | % 7 | % Inputs: 8 | % inputs: The Ndata x Dim_input matrix of model inputs; 9 | % targets: The Ndata x Dim_output matrix of model outputs; 10 | % 11 | % Output: 12 | % net: The trained ANN model function; 13 | % tr: The structure of the ANN training statistics; 14 | % 15 | % Create a Fitting Network 16 | net = feedforwardnet([10]); 17 | net.trainParam.epochs = 1000; 18 | 19 | % Set up Division of Data for Training, Validation, Testing 20 | net.divideParam.trainRatio = 70/100; 21 | net.divideParam.valRatio = 15/100; 22 | net.divideParam.testRatio = 15/100; 23 | 24 | % Train the Network 25 | [net,tr] = train(net,inputs',targets'); 26 | 27 | end -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/README.md: -------------------------------------------------------------------------------- 1 | # Instructions 2 | This repository contains the complete set of codes to the Aluminium Frame Problem. 3 | 4 | To execute this problem, run the file "Aluminium_Frame_ModelUpdating.m" first. After which, run the file "Aluminium_Frame_Analysis.m". 5 | -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/TEMCMCsampler.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adolphus8/Transitional_Ensemble_MCMC/3e6cbf62c1fc269e553942dc18d479c9cb25c847/Aluminium_Frame_Problem/TEMCMCsampler.m -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/TMCMCsampler.m: -------------------------------------------------------------------------------- 1 | function [output] = TMCMCsampler(varargin) 2 | %% Transitional Markov Chain Monte Carlo sampler 3 | % 4 | % This program implements a method described in: 5 | % Ching, J. and Chen, Y. (2007). "Transitional Markov Chain Monte Carlo 6 | % Method for Bayesian Model Updating, Model Class Selection, and Model 7 | % Averaging." J. Eng. Mech., 133(7), 816-832. 8 | % 9 | % Usage: 10 | % [samples_fT_D, fD] = tmcmc_v1(fD_T, fT, sample_from_fT, N); 11 | % 12 | % where: 13 | % 14 | % inputs: 15 | % log_fD_T = function handle of log(fD_T(t)), Loglikelihood 16 | % 17 | % fT = function handle of fT(t), Prior PDF 18 | % 19 | % sample_from_fT = handle to a function that samples from of fT(t), 20 | % Sampling rule function from Prior PDF 21 | % 22 | % nsamples = number of samples of fT_D, Posterior, to generate 23 | % 24 | % outputs: 25 | % samples_fT_D = samples of fT_D (N x D) = samples from Posterior 26 | % distribution 27 | % 28 | % log_fD = log(evidence) = log(normalization constant) 29 | 30 | % ------------------------------------------------------------------------ 31 | % who when observations 32 | %-------------------------------------------------------------------------- 33 | % Diego Andres Alvarez Jul-24-2013 First algorithm 34 | %-------------------------------------------------------------------------- 35 | % Diego Andres Alvarez - daalvarez@unal.edu.co 36 | % Edoardo Patelli - edoardo.patelli@strath.ac.uk 37 | 38 | % parse the information in the name/value pairs: 39 | pnames = {'nsamples','loglikelihood','priorpdf','priorrnd','burnin','lastburnin','beta'}; 40 | 41 | dflts = {[],[],[],[],0,0,0.2}; % define default values 42 | 43 | [nsamples,loglikelihood,priorpdf,prior_rnd,burnin,lastBurnin,beta] = ... 44 | internal.stats.parseArgs(pnames, dflts, varargin{:}); 45 | 46 | %% Obtain N samples from the prior pdf f(T) 47 | j = 0; % Initialise loop for the transitional likelihood 48 | thetaj = prior_rnd(nsamples); % theta0 = N x D 49 | pj = 0; % p0 = 0 (initial tempering parameter) 50 | Dimensions = size(thetaj, 2); % size of the vector theta 51 | 52 | count = 1; % Counter 53 | samps(:,:,count) = thetaj; 54 | beta_j(count) = pj; 55 | beta = 2.4./sqrt(Dimensions); 56 | scale(count) = beta; 57 | 58 | %% Initialization of matrices and vectors 59 | thetaj1 = zeros(nsamples, Dimensions); 60 | 61 | %% Main loop 62 | while pj < 1 63 | j = j+1; 64 | 65 | %% Calculate the tempering parameter p(j+1): 66 | for l = 1:nsamples 67 | log_fD_T_thetaj(l) = loglikelihood(thetaj(l,:)); 68 | end 69 | if any(isinf(log_fD_T_thetaj)) 70 | error('The prior distribution is too far from the true region'); 71 | end 72 | pj1 = calculate_pj1(log_fD_T_thetaj, pj); 73 | fprintf('TMCMC: Iteration j = %2d, pj1 = %f\n', j, pj1); 74 | 75 | %% Compute the plausibility weight for each sample wrt f_{j+1} 76 | fprintf('Computing the weights ...\n'); 77 | a = (pj1-pj)*log_fD_T_thetaj; 78 | wj = exp(a); 79 | wj_norm = wj./sum(wj); % normalization of the weights 80 | 81 | %% Compute S(j) = E[w{j}] (eq 15) 82 | S(j) = mean(wj); 83 | 84 | %% Do the resampling step to obtain N samples from f_{j+1}(theta) and 85 | % then perform Metropolis-Hastings on each of these samples using as a 86 | % stationary PDF "fj1" 87 | % fj1 = @(t) fT(t).*log_fD_T(t).^pj1; % stationary PDF (eq 11) f_{j+1}(theta) 88 | log_posterior = @(t) log(priorpdf(t)) + pj1*loglikelihood(t); 89 | 90 | 91 | % and using as proposal PDF a Gaussian centered at thetaj(idx,:) and 92 | % with covariance matrix equal to an scaled version of the covariance 93 | % matrix of fj1: 94 | 95 | % weighted mean 96 | mu = zeros(1, Dimensions); 97 | for l = 1:nsamples 98 | mu = mu + wj_norm(l)*thetaj(l,:); % 1 x N 99 | end 100 | 101 | % scaled covariance matrix of fj1 (eq 17) 102 | cov_gauss = zeros(Dimensions); 103 | for k = 1:nsamples 104 | % this formula is slightly different to eq 17 (the transpose) 105 | % because of the size of the vectors)m and because Ching and Chen 106 | % forgot to normalize the weight wj: 107 | tk_mu = thetaj(k,:) - mu; 108 | cov_gauss = cov_gauss + wj_norm(k)*(tk_mu'*tk_mu); 109 | end 110 | 111 | cov_gauss = beta^2 * cov_gauss; 112 | assert(~isinf(cond(cov_gauss)),'Something is wrong with the likelihood.') 113 | 114 | % Define the Proposal distribution: 115 | proppdf = @(x,y) prop_pdf(x, y, cov_gauss, priorpdf); %q(x,y) = q(x|y). 116 | proprnd = @(x) prop_rnd(x, cov_gauss, priorpdf); 117 | 118 | %% During the last iteration we require to do a better burnin in order 119 | % to guarantee the quality of the samples: 120 | if pj1 == 1 121 | burnin = lastBurnin; 122 | end 123 | 124 | %% Start N different Markov chains 125 | fprintf('Markov chains ...\n\n'); 126 | idx = randsample(nsamples, nsamples, true, wj_norm); 127 | 128 | for i = 1:nsamples % For parallel, type: parfor 129 | %% Sample one point with probability wj_norm 130 | 131 | % smpl = mhsample(start, nsamples, 132 | % 'pdf', pdf, 'proppdf', proppdf, 'proprnd', proprnd); 133 | % start = row vector containing the start value of the Markov Chain, 134 | % nsamples = number of samples to be generated 135 | [thetaj1(i,:), acceptance_rate(i)] = mhsample(thetaj(idx(i), :), 1, ... 136 | 'logpdf', log_posterior, ... 137 | 'proppdf', proppdf, ... 138 | 'proprnd', proprnd, ... 139 | 'thin', 3, ... 140 | 'burnin', burnin); 141 | % According to Cheung and Beck (2009) - Bayesian model updating ..., 142 | % the initial samples from reweighting and the resample of samples of 143 | % fj, in general, do not exactly follow fj1, so that the Markov 144 | % chains must "burn-in" before samples follow fj1, requiring a large 145 | % amount of samples to be generated for each level. 146 | 147 | %% Adjust the acceptance rate (optimal = 23%) 148 | % See: http://www.dms.umontreal.ca/~bedard/Beyond_234.pdf 149 | %{ 150 | if acceptance_rate < 0.3 151 | % Many rejections means an inefficient chain (wasted computation 152 | %time), decrease the variance 153 | beta = 0.99*beta; 154 | elseif acceptance_rate > 0.5 155 | % High acceptance rate: Proposed jumps are very close to current 156 | % location, increase the variance 157 | beta = 1.01*beta; 158 | end 159 | %} 160 | end 161 | fprintf('\n'); 162 | acceptance(count) = mean(acceptance_rate); 163 | 164 | %% Prepare for the next iteration 165 | c_a = (acceptance(count) - ((0.21./Dimensions) + 0.23))./sqrt(j); 166 | beta = beta .* exp(c_a); 167 | 168 | count = count+1; 169 | scale(count) = beta; 170 | samps(:,:,count) = thetaj1; 171 | thetaj = thetaj1; 172 | pj = pj1; 173 | beta_j(count) = pj; 174 | end 175 | 176 | % estimation of f(D) -- this is the normalization constant in Bayes 177 | log_fD = sum(log(S(1:j))); 178 | 179 | %% Description of outputs: 180 | 181 | output.allsamples = samps; % To show samples from all transitional distributions 182 | output.samples = samps(:,:,end); % To only show samples from the final posterior 183 | output.log_evidence = log_fD; % To generate the logarithmic of the evidence 184 | output.acceptance = acceptance; % To show the mean acceptance rates for all iterations 185 | output.beta = beta_j; % To show the values of temepring parameters, beta_j 186 | output.scale = scale(1:end-1); % To show the values of the scaling factor of the covariance matrix across iterations 187 | 188 | return; % End 189 | 190 | 191 | %% Calculate the tempering parameter p(j+1) 192 | function pj1 = calculate_pj1(log_fD_T_thetaj, pj) 193 | % find pj1 such that COV <= threshold, that is 194 | % 195 | % std(wj) 196 | % --------- <= threshold 197 | % mean(wj) 198 | % 199 | % here 200 | % size(thetaj) = N x D, 201 | % wj = fD_T(thetaj).^(pj1 - pj) 202 | % e = pj1 - pj 203 | 204 | threshold = 1; % 100% = threshold on the COV 205 | 206 | % wj = @(e) fD_T_thetaj^e; % N x 1 207 | % Note the following trick in order to calculate e: 208 | % Take into account that e>=0 209 | wj = @(e) exp(abs(e)*log_fD_T_thetaj); % N x 1 210 | fmin = @(e) std(wj(e)) - threshold*mean(wj(e)) + realmin; 211 | e = abs(fzero(fmin, 0)); % e is >= 0, and fmin is an even function 212 | if isnan(e) 213 | error('There is an error finding e'); 214 | end 215 | 216 | pj1 = min(1, pj + e); 217 | 218 | return; % End 219 | 220 | function proppdf = prop_pdf(x, mu, covmat, box) 221 | % This is the Proposal PDF for the Markov Chain. 222 | 223 | % Box function is the Prior PDF in the feasible region. 224 | % So if a point is out of bounds, this function will 225 | % return 0. 226 | 227 | proppdf = mvnpdf(x, mu, covmat).*box(x); %q(x,y) = q(x|y). 228 | 229 | return; 230 | 231 | 232 | function proprnd = prop_rnd(mu, covmat, box) 233 | % Sampling from the proposal PDF for the Markov Chain. 234 | 235 | while true 236 | proprnd = mvnrnd(mu, covmat, 1); 237 | if box(proprnd) 238 | % The box function is the Prior PDF in the feasible region. 239 | % If a point is out of bounds, this function will return 0 = false. 240 | break; 241 | end 242 | end 243 | 244 | return 245 | -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/loglikelihood1.m: -------------------------------------------------------------------------------- 1 | function [logL] = loglikelihood1(theta, measurements, net) 2 | %% This is the logarithm of the Gaussian Likelihood function 3 | % 4 | % Usage: 5 | % logL = loglikelihood1(theta, net) 6 | % 7 | % Inputs: 8 | % theta: The N x dim matrix of sample inputs; 9 | % measurements: The 1 x 6 vector of measurements; 10 | % net: The trained ANN model function; 11 | % 12 | % Output: 13 | % logL: The N x 1 vector of loglikelihood values; 14 | % 15 | %% Function description: 16 | model_output = (net(theta(:,1:2)'))'; % N x 6 vector 17 | 18 | logL = zeros(size(theta,1),1); % N x 6 vector 19 | for i = 1:size(theta,1) 20 | logL(i) = - 0.5 .* (1./theta(i,3)).^2 .*(measurements(:,1) - model_output(i,1)).^2 -... 21 | length(measurements).*log(sqrt(2*pi).*theta(i,3)) +... 22 | - 0.5 .* (1./theta(i,4)).^2 .*(measurements(:,2) - model_output(i,2)).^2 -... 23 | length(measurements).*log(sqrt(2*pi).*theta(i,4)) +... 24 | - 0.5 .* (1./theta(i,5)).^2 .*(measurements(:,3) - model_output(i,3)).^2 -... 25 | length(measurements).*log(sqrt(2*pi).*theta(i,5)) +... 26 | - 0.5 .* (1./theta(i,6)).^2 .*(measurements(:,4) - model_output(i,4)).^2 -... 27 | length(measurements).*log(sqrt(2*pi).*theta(i,6)) +... 28 | - 0.5 .* (1./theta(i,7)).^2 .*(measurements(:,5) - model_output(i,5)).^2 -... 29 | length(measurements).*log(sqrt(2*pi).*theta(i,7)) +... 30 | - 0.5 .* (1./theta(i,8)).^2 .*(measurements(:,6) - model_output(i,6)).^2 -... 31 | length(measurements).*log(sqrt(2*pi).*theta(i,8)); 32 | end 33 | end 34 | 35 | -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/loglikelihood2.m: -------------------------------------------------------------------------------- 1 | function [logL] = loglikelihood2(theta, measurements, net) 2 | %% This is the logarithm of the Inverse-error Likelihood function 3 | % likelihood = 1 - exp[- (sigma/(obs - model_ouput))]; 4 | % 5 | % Usage: 6 | % logL = loglikelihood1(theta, net) 7 | % 8 | % Inputs: 9 | % theta: The N x dim matrix of sample inputs; 10 | % measurements: The 1 x 6 vector of measurements; 11 | % net: The trained ANN model function; 12 | % 13 | % Output: 14 | % logL: The N x 1 vector of loglikelihood values; 15 | % 16 | %% Function description: 17 | model_output = (net(theta(:,1:2)'))'; % N x 6 vector 18 | 19 | logL = zeros(size(theta,1),1); % N x 6 vector 20 | for i = 1:size(theta,1) 21 | logL(i) = log(1 - exp(- (1./abs(measurements(:,1) - model_output(i,1))))) + ... 22 | log(1 - exp(- (1./abs(measurements(:,2) - model_output(i,2))))) + ... 23 | log(1 - exp(- (1./abs(measurements(:,3) - model_output(i,3))))) + ... 24 | log(1 - exp(- (1./abs(measurements(:,4) - model_output(i,4))))) + ... 25 | log(1 - exp(- (1./abs(measurements(:,5) - model_output(i,5))))) + ... 26 | log(1 - exp(- (1./abs(measurements(:,6) - model_output(i,6))))); 27 | end 28 | end 29 | 30 | 31 | -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/loglikelihood3.m: -------------------------------------------------------------------------------- 1 | function [logL] = loglikelihood3(theta, measurements, net) 2 | %% This is the logarithm of the Inverse-square error Likelihood function 3 | % likelihood = 1 - exp[- (sigma/(obs - model_ouput)).^2]; 4 | % 5 | % Usage: 6 | % logL = loglikelihood1(theta, net) 7 | % 8 | % Inputs: 9 | % theta: The N x dim matrix of sample inputs; 10 | % measurements: The 1 x 6 vector of measurements; 11 | % net: The trained ANN model function; 12 | % 13 | % Output: 14 | % logL: The N x 1 vector of loglikelihood values; 15 | % 16 | %% Function description: 17 | model_output = (net(theta(:,1:2)'))'; % N x 6 vector 18 | 19 | logL = zeros(size(theta,1),1); % N x 6 vector 20 | for i = 1:size(theta,1) 21 | logL(i) = log(1 - exp(- (1./(measurements(:,1) - model_output(i,1))).^2)) + ... 22 | log(1 - exp(- (1./(measurements(:,2) - model_output(i,2))).^2)) + ... 23 | log(1 - exp(- (1./(measurements(:,3) - model_output(i,3))).^2)) + ... 24 | log(1 - exp(- (1./(measurements(:,4) - model_output(i,4))).^2)) + ... 25 | log(1 - exp(- (1./(measurements(:,5) - model_output(i,5))).^2)) + ... 26 | log(1 - exp(- (1./(measurements(:,6) - model_output(i,6))).^2)); 27 | end 28 | end 29 | 30 | -------------------------------------------------------------------------------- /Aluminium_Frame_Problem/plotEigenvalues.m: -------------------------------------------------------------------------------- 1 | function plotEigenvalues(samples) 2 | %% A function that is used to generate the scatterplot matrix of samples 3 | % Detailed explanation is as follows: 4 | % 5 | % Input: 6 | % samples: A N x dim matrix of samples, where dim is the dimension of samples; 7 | % 8 | % Output: 9 | % An image output of the scatterplot matrix of the samples; 10 | % 11 | %% Function description: 12 | 13 | dim = size(samples,2); 14 | 15 | figure; 16 | [~,ax1] = plotmatrix(samples); 17 | for i=1:dim 18 | ax1(i,1).FontSize = 20; 19 | ax1(dim,i).FontSize = 20; 20 | end 21 | ylabel(ax1(2,1),'$\omega_{2}$','Interpreter','latex'); 22 | ylabel(ax1(3,1),'$\omega_{3}$','Interpreter','latex'); 23 | ylabel(ax1(4,1),'$\omega_{4}$','Interpreter','latex'); 24 | ylabel(ax1(5,1),'$\omega_{5}$','Interpreter','latex'); 25 | ylabel(ax1(6,1),'$\omega_{6}$','Interpreter','latex'); 26 | xlabel(ax1(6,1),'$\omega_{1}$','Interpreter','latex'); 27 | xlabel(ax1(6,2),'$\omega_{2}$','Interpreter','latex'); 28 | xlabel(ax1(6,3),'$\omega_{3}$','Interpreter','latex'); 29 | xlabel(ax1(6,4),'$\omega_{4}$','Interpreter','latex'); 30 | xlabel(ax1(6,5),'$\omega_{5}$','Interpreter','latex'); 31 | xlabel(ax1(6,6),'$\omega_{6}$','Interpreter','latex'); 32 | set(gca,'FontSize',20) 33 | 34 | % To delete the unnecessary histogram/replicated plots: 35 | for i = 1:dim 36 | delete(ax1(i,i+1:end)) 37 | delete(ax1(1,1)) 38 | end 39 | 40 | 41 | end 42 | 43 | -------------------------------------------------------------------------------- /EMCMCsampler.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adolphus8/Transitional_Ensemble_MCMC/3e6cbf62c1fc269e553942dc18d479c9cb25c847/EMCMCsampler.m -------------------------------------------------------------------------------- /Illustrative_Example.m: -------------------------------------------------------------------------------- 1 | %% Illustrative Example: AIES vs MH 2 | % To highlight the comparison between the AIES and MH samplers in sampling 3 | % from a skewed distribution (represented by a transitional distribution) 4 | % and the scaled posterior. The objective is to highlight the effectiveness 5 | % of the AIES sampler over the MH sampler in sampling from the former. 6 | 7 | %% The Log-Function: 8 | lb = -5; ub = 5; 9 | logfun = @(x,e) log(unifpdf(x(:,1), lb, ub)) + log(unifpdf(x(:,2), lb, ub)) + ... 10 | e .* ((-(3.*x(:,1) + x(:,2)).^2./0.08) + (-(x(:,1) - x(:,2)).^2./2)); 11 | 12 | % The Anisotropic Log-function: 13 | log_aniso = @(x) logfun(x, 0.2); 14 | 15 | % The Isotropic Log-function: 16 | log_iso = @(x) log(unifpdf(x(:,1), lb, ub)) + log(unifpdf(x(:,2), lb, ub)) + ... 17 | ((-(x(:,1)).^2./2) + (-(x(:,2)).^2./2)); 18 | 19 | % Plotting the Ansiotropic Gaussian Function: 20 | [X1,X2] = meshgrid(lb:.01:ub, lb:.01:ub); 21 | Z1 = log_aniso([X1(:) X2(:)]); Z1 = reshape(Z1,size(X1)); 22 | Z2 = log_iso([X1(:) X2(:)]); Z2 = reshape(Z2,size(X1)); 23 | 24 | figure; 25 | subplot(1,2,1) 26 | hold on; box on; grid on; 27 | contour(X1,X2,exp(Z1)) 28 | colormap(parula) 29 | xlim([-5 5]); ylim([-5 5]); 30 | xlabel('$\theta^{1}$', 'Interpreter', 'latex'); ylabel('$\theta^{2}$', 'Interpreter', 'latex'); 31 | legend('Poorly-scaled P^{j}', 'linewidth', 2) 32 | set(gca, 'fontsize', 20) 33 | 34 | subplot(1,2,2) 35 | hold on; box on; grid on; 36 | contour(X1,X2,exp(Z2)) 37 | colormap(parula) 38 | xlim([-5 5]); ylim([-5 5]); 39 | xlabel('$\Theta^{1}$', 'Interpreter', 'latex'); ylabel('$\Theta^{2}$', 'Interpreter', 'latex'); 40 | legend('Scaled P^{j}', 'linewidth', 2) 41 | set(gca, 'fontsize', 20) 42 | 43 | %% AIES Section: 44 | 45 | dim = 2; % number of model parameters 46 | Nwalkers = 2*dim; % total number of chains of the ensemble 47 | start_emcmc = unifrnd(lb, ub, Nwalkers, dim); % Starting values of the chain(s). 48 | Nsamples = 250; % Overall sample number across all chains (not per chain) 49 | BurnIn_emcmc = 100; % Burn-in rate per chain 50 | step_size = 8; % To give acceptance rate between 0.15 to 0.5 51 | 52 | % Sample from the skewed transitional distribution: 53 | tic; 54 | EMCMC1 = EMCMCsampler(start_emcmc,log_aniso,Nsamples,'StepSize',step_size,... 55 | 'burnin',BurnIn_emcmc); 56 | timeEMCMC_1 = toc; 57 | fprintf('Time elapsed is for the AIES for Anisotropic case: %f \n',timeEMCMC_1) 58 | fprintf('The acceptance level of the AIES sampler for Anisotropic case is %d. \n',EMCMC1.acceptance) 59 | 60 | samps_aniso = EMCMC1.samples; 61 | samps_aniso_AIES = permute(samps_aniso, [2 1 3]); samps_aniso_AIES = samps_aniso_AIES(:,:)'; 62 | 63 | % Sample from the scaled transitional distribution: 64 | tic; 65 | EMCMC2 = EMCMCsampler(start_emcmc,log_iso,Nsamples,'StepSize',step_size,... 66 | 'burnin',BurnIn_emcmc); 67 | timeEMCMC_2 = toc; 68 | fprintf('Time elapsed is for the MCMC-Hammer sampler for Isotropic case: %f \n',timeEMCMC_2) 69 | fprintf('The acceptance level of the AIES sampler for Isotropic case is %d. \n',EMCMC2.acceptance) 70 | 71 | samps_iso = EMCMC2.samples; 72 | samps_iso_AIES = permute(samps_iso, [2 1 3]); samps_iso_AIES = samps_iso_AIES(:,:)'; 73 | 74 | %% MH Section: 75 | 76 | % Defining the variables: 77 | BurnIn_1 = 100; % Burn-in. 78 | NumberOfChains = Nwalkers; % No. of chains of the MCMC sampler. 79 | start_mh = unifrnd(lb, ub, Nwalkers, dim); % Starting values of the chain(s). 80 | 81 | % Defining the 2D covariance matrix (Tuning parameter): 82 | Tuning_mcmc_1 = [0.5, 0; 0, 0.5]; 83 | 84 | % Defining the 2D Proposal distribution function for the MCMC sampler: 85 | proppdf_1 = @(CandidateSample,CurrentSample) mvnpdf(CandidateSample,CurrentSample,Tuning_mcmc_1); 86 | proprnd_1 = @(CurrentSample) mvnrnd(CurrentSample,Tuning_mcmc_1); 87 | 88 | % Sample from the skewed transitional distribution: 89 | tic; 90 | [samples_mh_1,accept_1] = mhsample(start_mh,Nsamples,'logpdf',log_aniso,'proppdf',proppdf_1... 91 | ,'proprnd',proprnd_1,'symmetric',1.... 92 | ,'burnin',BurnIn_1,'nchain',NumberOfChains); 93 | timeMH_1 = toc; 94 | fprintf('The acceptance level of the MH sampler for Anisotropic case is %d. \n',accept_1) 95 | fprintf('Time elapsed is for the MH sampler for Ansiotropic case: %f \n',timeMH_1) 96 | 97 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 98 | BurnIn_2 = 100; 99 | 100 | % Defining the 2D covariance matrix (Tuning parameter): 101 | Tuning_mcmc_2 = [5, 0; 0, 5]; 102 | 103 | % Defining the 2D Proposal distribution function for the MCMC sampler: 104 | proppdf_2 = @(CandidateSample,CurrentSample) mvnpdf(CandidateSample,CurrentSample,Tuning_mcmc_2); 105 | proprnd_2 = @(CurrentSample) mvnrnd(CurrentSample,Tuning_mcmc_2); 106 | 107 | % Sample from the scaled transitional distribution: 108 | tic; 109 | [samples_mh_2,accept_2] = mhsample(start_mh,Nsamples,'logpdf',log_iso,'proppdf',proppdf_2... 110 | ,'proprnd',proprnd_2,'symmetric',1.... 111 | ,'burnin',BurnIn_2,'nchain',NumberOfChains); 112 | timeMH_2 = toc; 113 | fprintf('The acceptance level of the MH sampler for Isotropic case is %d. \n',accept_2) 114 | fprintf('Time elapsed is for MH sampler for Isotropic case: %f \n',timeMH_2) 115 | 116 | %% Plotting the marginal Ansiotropic Gaussian Log-Function: 117 | e = 0.01; % This is the scaling factor 118 | fun = @(x1,x2) exp(-((((3.*x1 + x2).^2)./(0.08)) + (((x1 - x2).^2)./(2)))).^(0.2); 119 | 120 | % Marginal distribution of x1: 121 | fun_x1 = @(x1) integral(@(x2) fun(x1,x2),lb,ub); 122 | 123 | % Marginal distribution of x2: 124 | fun_x2 = @(x2) integral(@(x1) fun(x1,x2),lb,ub); 125 | 126 | fun_x1_out = zeros(1000,1); fun_x2_out = zeros(1000,1); 127 | x1 = linspace(lb,ub,1000); x2 = linspace(lb,ub,1000); 128 | for i = 1:1000 129 | fun_x1_out(i) = fun_x1(x1(i)); 130 | fun_x2_out(i) = fun_x2(x2(i)); 131 | end 132 | fun_x1_out = normalize(cumsum(fun_x1_out),'range',[0,1]); 133 | fun_x2_out = normalize(cumsum(fun_x2_out),'range',[0,1]); 134 | 135 | %% Plot the Posterior samples 136 | 137 | % Plot figure for Skewed P^{j}: 138 | figure; 139 | subplot(1,2,1) 140 | hold on; box on; grid on; 141 | contour(X1,X2,exp(Z1)) 142 | colormap(parula) 143 | scatter(samples_mh1(:,1), samples_mh1(:,2), 18, 'r', 'filled'); 144 | xlim([-5 5]); ylim([-5 5]); 145 | xlabel('$\theta^{1}$', 'Interpreter', 'latex'); ylabel('$\theta^{2}$', 'Interpreter', 'latex'); 146 | legend('Skewed P^{j}', 'MH samples', 'linewidth', 2) 147 | title('MH Samples') 148 | set(gca, 'fontsize', 20) 149 | 150 | subplot(1,2,2) 151 | hold on; box on; grid on; 152 | contour(X1,X2,exp(Z1)) 153 | colormap(parula) 154 | scatter(samps_aniso_AIES(:,1), samps_aniso_AIES(:,2), 18, 'r', 'filled'); 155 | xlim([-5 5]); ylim([-5 5]); 156 | xlabel('$\theta^{1}$', 'Interpreter', 'latex'); ylabel('$\theta^{2}$', 'Interpreter', 'latex'); 157 | legend('Skewed P^{j}', 'AIES samples', 'linewidth', 2) 158 | title('AIES Samples') 159 | set(gca, 'fontsize', 20) 160 | 161 | % Plot figure for Scaled P^{j}: 162 | figure; 163 | subplot(1,2,1) 164 | hold on; box on; grid on; 165 | contour(X1,X2,exp(Z2)) 166 | colormap(parula) 167 | scatter(samples_mh2(:,1), samples_mh2(:,2), 18, 'r', 'filled'); 168 | xlim([-5 5]); ylim([-5 5]); 169 | xlabel('$\Theta^{1}$', 'Interpreter', 'latex'); ylabel('$\Theta^{2}$', 'Interpreter', 'latex'); 170 | legend('Scaled P^{j}', 'AIES samples', 'linewidth', 2) 171 | title('MH Samples') 172 | set(gca, 'fontsize', 20) 173 | 174 | subplot(1,2,2) 175 | hold on; box on; grid on; 176 | contour(X1,X2,exp(Z2)) 177 | colormap(parula) 178 | scatter(samps_iso_AIES(:,1), samps_iso_AIES(:,2), 18, 'r', 'filled'); 179 | xlim([-5 5]); ylim([-5 5]); 180 | xlabel('$\Theta^{1}$', 'Interpreter', 'latex'); ylabel('$\Theta^{2}$', 'Interpreter', 'latex'); 181 | legend('Scaled P^{j}', 'MH samples', 'linewidth', 2) 182 | title('AIES Samples') 183 | set(gca, 'fontsize', 20) 184 | 185 | %% Comparing the ECDFs 186 | e = 0.2; 187 | samples_mh1 = permute(samples_mh_1, [2 3 1]); samples_mh2 = permute(samples_mh_2, [2 3 1]); 188 | samples_mh1 = samples_mh1(:,:)'; samples_mh2 = samples_mh2(:,:)'; 189 | 190 | func_x1 = @(y) (1./(20.*sqrt(e))).*(y(:,1) + 5.*y(:,2)); func_x2 = @(y) (1./(20.*sqrt(e))).*(y(:,1) - 15.*y(:,2)); 191 | 192 | rescaled_aies = zeros(size(samps_iso_AIES,1),size(samps_iso_AIES,2)); 193 | rescaled_aies(:,1) = func_x1(samps_iso_AIES); rescaled_aies(:,2) = func_x2(samps_iso_AIES); 194 | 195 | rescaled_mh = zeros(size(samples_mh2,1),size(samples_mh2,2)); 196 | rescaled_mh(:,1) = func_x1(samples_mh2); rescaled_mh(:,2) = func_x2(samples_mh2); 197 | 198 | figure; 199 | f = 17; 200 | % Compare ECDF Plots for AIES samples: 201 | subplot(2,2,1) 202 | hold on; box on; grid on; 203 | plot(x1,fun_x1_out,'k--','linewidth',1.5); 204 | [f1a,x1a]=ecdf(samps_aniso_AIES(:,1)); plot(x1a,f1a,'r','LineWidth',1.5) 205 | [f1b,x1b]=ecdf(rescaled_aies(:,1)); plot(x1b,f1b,'b','LineWidth',1.5) 206 | xlabel('$\theta^{1}$', 'Interpreter', 'latex'); ylabel('$F(\theta^{1})$', 'Interpreter', 'latex') 207 | legend('Analytial CDF', 'AIES Anisotropic', 'AIES Re-scaled', 'linewidth',2, 'location', 'Northwest'); 208 | xlim([lb, ub]) 209 | set(gca, 'Fontsize', f) 210 | 211 | subplot(2,2,2) 212 | hold on; box on; grid on; 213 | plot(x2,fun_x2_out,'k--','linewidth',1.5); 214 | [f2a,x2a]=ecdf(samps_aniso_AIES(:,2)); plot(x2a,f2a,'r','LineWidth',1.5) 215 | [f2b,x2b]=ecdf(rescaled_aies(:,2)); plot(x2b,f2b,'b','LineWidth',1.5) 216 | xlabel('$\theta^{2}$', 'Interpreter', 'latex'); ylabel('$F(\theta^{2})$', 'Interpreter', 'latex') 217 | legend('Analytial CDF', 'AIES Anisotropic', 'AIES Re-scaled', 'linewidth',2, 'location', 'Northwest'); 218 | xlim([lb, ub]) 219 | set(gca, 'Fontsize', f) 220 | 221 | % Compare ECDF Plots for MH samples: 222 | subplot(2,2,3) 223 | hold on; box on; grid on 224 | plot(x1,fun_x1_out,'k--','linewidth',1.5); 225 | [f3a,x3a]=ecdf(samples_mh1(:,1)); plot(x3a,f3a,'m','LineWidth',1.5) 226 | [f3b,x3b]=ecdf(rescaled_mh(:,1)); plot(x3b,f3b,'c','LineWidth',1.5) 227 | xlabel('$\theta^{1}$', 'Interpreter', 'latex'); ylabel('$F(\theta^{1})$', 'Interpreter', 'latex') 228 | legend('Analytical CDF','MH Anisotropic', 'MH Re-scaled', 'linewidth',2, 'location', 'Northwest'); 229 | xlim([lb, ub]) 230 | set(gca, 'Fontsize', f) 231 | 232 | subplot(2,2,4) 233 | hold on; box on; grid on 234 | plot(x2,fun_x2_out,'k--','linewidth',1.5); 235 | [f4a,x4a]=ecdf(samples_mh1(:,2)); plot(x4a,f4a,'m','LineWidth',1.5) 236 | [f4b,x4b]=ecdf(rescaled_mh(:,2)); plot(x4b,f4b,'c','LineWidth',1.5) 237 | xlabel('$\theta^{2}$', 'Interpreter', 'latex'); ylabel('$F(\theta^{2})$', 'Interpreter', 'latex') 238 | legend('Analytical CDF','MH Anisotropic', 'MH Re-scaled', 'linewidth',2, 'location', 'Northwest'); 239 | xlim([lb, ub]) 240 | set(gca, 'Fontsize', f) 241 | 242 | %% Compute Area Metric: 243 | 244 | area_AIES = zeros(1,2); area_MH = zeros(1,2); 245 | area_AIES(:,1) = areaMe(samps_aniso_AIES(:,1), rescaled_aies(:,1)); 246 | area_AIES(:,2) = areaMe(samps_aniso_AIES(:,2), rescaled_aies(:,2)); 247 | area_MH(:,1) = areaMe(samples_mh1(:,1), rescaled_mh(:,1)); 248 | area_MH(:,2) = areaMe(samples_mh1(:,2), rescaled_mh(:,2)); 249 | 250 | %% Save Data: 251 | save('Illustrative_Example') -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Transitional Ensemble Markov Chain Monte Carlo 2 | This repository presents a collection of tutorials (written in MATLAB) which seeks to demonstrate the implementation of the Transitional Ensemble Markov Chain Monte Carlo (TEMCMC) based on the literature by [Lye et. al (2022)](https://doi.org/10.1016/j.ymssp.2021.108471). Currently, 3 tutorials are presented here aimed at allowing users who are new such sampler find their footing around its concept and its workings. The details to these tutorials are as follows: 3 | 4 | ## Tutorials: 5 | 6 | ### 1) Illustrative Example: 7 | See: Illustrative_Example.m 8 | 9 | This example seeks to provide an illustration of the Affine-invariant property of this Ensemble sampler. 10 | 11 | In most instances, when one encounters a highly-anisotropic target distribution, one approach would be to perform an Affine transformation so as to simplify the form of the target distribution and allow for it to be sampled from easily. From there, an inverse Affine transformation is performed on the generated samples so as to obtain the associated samples that is obtained indirectly from the original highly-anisotropic target distribution. 12 | 13 | For an Affine-invariant sampler, such as the Ensemble sampler here, it is able to sample directly from both the highly-anisotropic and the Affine-transformed distributions and views both distributions as equal. What this means is that an Affine-invariant sampler's sampling performance is unaffected regardless of whether that target distribution is scaled or not. This tutorial aims to highlight this property of the Ensemble sampler and as a comparison, the [Metropolis-Hastings](https://doi.org/10.1093/biomet/57.1.97) sampler will also be implemented to demonstrate the latter's absence of the Affine-invariant property. 14 | 15 | ### 2) Example Coupled Oscillator: 16 | See: example_CoupledOscillator.m 17 | 18 | This tutorial seeks to compare the robustness and strength of the TEMCMC sampler against the standard [TMCMC](https://doi.org/10.1061/(ASCE)0733-9399(2007)133:7(816)) sampler in identifying the spring stiffness of the primary and secondary springs of the Coupled Oscillator as well as the associated noise with the measured frequency modes. 19 | 20 | ### 3) Example Himmelblau's Function: 21 | See: example_himmelblau.m 22 | 23 | This tutorial seeks to compare the effectiveness of the TEMCMC sampler against the standard [TMCMC](https://doi.org/10.1061/(ASCE)0733-9399(2007)133:7(816)) sampler in identifying the peaks of the 4-peaked Himmelbau's function. 24 | 25 | ### 4) Aluminium Frame Problem: 26 | See: Aluminium_Frame_Problem folder 27 | 28 | This problem involves a 2 Degree-of-Freedom Shear Alumium Frame with 2 moveable masses, whose respective positions represent the location of damage. Here, actual experimental data of the response frequencies of the frame is obtained from a hammer test given a configuration of the positions of the moveable masses. This data is then used to infer the mass positions using an Artificial Neural Network (ANN), used in [R. Rocchetta et. al (2018)](https://doi.org/10.1016/j.ymssp.2017.10.015), that is trained with 103 synthetic data of the output response frequencies and the input mass positions obtained from the frame's Finite Element Model and Monte Carlo sampling. 29 | 30 | For this problem, the TEMCMC and TMCMC samplers are used to perform robust Bayes with different aggregated likelihood functions and obtain Posteriors of the mass positions in the form of P-boxes from which the interval estimates of the mass positions are obtained. The purpose of this work is to verify the estimates by the TEMCMC and validate the algorithm using actual experimental data. 31 | 32 | For more details to the Alumium Frame set-up, readers can also refer to the work by [H. H. Khodaparast et. al (2011)](https://doi.org/10.1016/j.ymssp.2010.10.009), [P. Liang et. al (2016)](http://past.isma-isaac.be/downloads/isma2016/papers/isma2016_0351.pdf), and [Z. Yuan et. al (2019)](https://doi.org/10.1016/j.ymssp.2018.05.048). 33 | 34 | ## Reference(s): 35 | * A. Lye, A. Cicirello, and E. Patelli (2022). An efficient and robust sampler for Bayesian inference: Transitional Ensemble Markov Chain Monte Carlo. *Mechanical Systems and Signal Processing, 167*, 108471. doi: [10.1016/j.ymssp.2021.108471](https://doi.org/10.1016/j.ymssp.2021.108471) 36 | * A. Lye, A. Cicirello, and E. Patelli (2021). Sampling methods for solving Bayesian model updating problems: A tutorial. *Mechanical Systems and Signal Processing, 159*, 107760. doi: [10.1016/j.ymssp.2021.107760](https://doi.org/10.1016/j.ymssp.2021.107760) 37 | 38 | * H. H. Khodaparast, J. E. Mottershead, and K. J. Badcock (2011). Interval model updating with irreducible uncertainty using the Kriging predictor. *Mechanical Systems and Signal Processing, 25*(4), 1204-1226. doi: [10.1016/j.ymssp.2010.10.009](https://doi.org/10.1016/j.ymssp.2010.10.009) 39 | * P. Liang, J. E. Mottershead, and F. A. DiazDelaO (2016). Model Updating with the Kriging Predictor: Effect of Code Uncertainty. [*In the Proceedings of ISMA 2016 including USD 2016*](http://past.isma-isaac.be/downloads/isma2016/papers/isma2016_0351.pdf) 40 | * R. Rocchetta, M. Broggi, Q. Huchet, and E. Patelli (2018). On-line Bayesian model updating for structural health monitoring. *Mechanical Systems and Signal Processing, 103*, 174-195. doi: [10.1016/j.ymssp.2017.10.015](https://doi.org/10.1016/j.ymssp.2017.10.015) 41 | * Z. Yuan, P. Liang, T. Silva, K. Yu, and J. E. Mottershead (2019). Parameter selection for model updating with global sensitivity analysis. *Mechanical Systems and Signal Processing, 115*, 483-496. doi: [10.1016/j.ymssp.2018.05.048](https://doi.org/10.1016/j.ymssp.2018.05.048) 42 | 43 | ## Author: 44 | * Name: Adolphus Lye 45 | * Contact: adolphus.lye@liverpool.ac.uk 46 | * Affiliation: Insitute for Risk and Uncertainty, University of Liverpool 47 | -------------------------------------------------------------------------------- /TEMCMCsampler.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Adolphus8/Transitional_Ensemble_MCMC/3e6cbf62c1fc269e553942dc18d479c9cb25c847/TEMCMCsampler.m -------------------------------------------------------------------------------- /TMCMCsampler.m: -------------------------------------------------------------------------------- 1 | function [output] = TMCMCsampler(varargin) 2 | %% Transitional Markov Chain Monte Carlo sampler 3 | % 4 | % This program implements a method described in: 5 | % Ching, J. and Chen, Y. (2007). "Transitional Markov Chain Monte Carlo 6 | % Method for Bayesian Model Updating, Model Class Selection, and Model 7 | % Averaging." J. Eng. Mech., 133(7), 816-832. 8 | % 9 | % Usage: 10 | % [samples_fT_D, fD] = tmcmc_v1(fD_T, fT, sample_from_fT, N); 11 | % 12 | % where: 13 | % 14 | % inputs: 15 | % log_fD_T = function handle of log(fD_T(t)), Loglikelihood 16 | % 17 | % fT = function handle of fT(t), Prior PDF 18 | % 19 | % sample_from_fT = handle to a function that samples from of fT(t), 20 | % Sampling rule function from Prior PDF 21 | % 22 | % nsamples = number of samples of fT_D, Posterior, to generate 23 | % 24 | % outputs: 25 | % samples_fT_D = samples of fT_D (N x D) = samples from Posterior 26 | % distribution 27 | % 28 | % log_fD = log(evidence) = log(normalization constant) 29 | 30 | % ------------------------------------------------------------------------ 31 | % who when observations 32 | %-------------------------------------------------------------------------- 33 | % Diego Andres Alvarez Jul-24-2013 First algorithm 34 | %-------------------------------------------------------------------------- 35 | % Diego Andres Alvarez - daalvarez@unal.edu.co 36 | % Edoardo Patelli - edoardo.patelli@strath.ac.uk 37 | 38 | % parse the information in the name/value pairs: 39 | pnames = {'nsamples','loglikelihood','priorpdf','priorrnd','burnin','lastburnin','beta'}; 40 | 41 | dflts = {[],[],[],[],0,0,0.2}; % define default values 42 | 43 | [nsamples,loglikelihood,priorpdf,prior_rnd,burnin,lastBurnin,beta] = ... 44 | internal.stats.parseArgs(pnames, dflts, varargin{:}); 45 | 46 | %% Number of cores 47 | if ~isempty(gcp('nocreate')) 48 | pool = gcp; 49 | Ncores = pool.NumWorkers; 50 | fprintf('TMCMC is running on %d cores.\n', Ncores); 51 | end 52 | 53 | %% Obtain N samples from the prior pdf f(T) 54 | j = 0; % Initialise loop for the transitional likelihood 55 | thetaj = prior_rnd(nsamples); % theta0 = N x D 56 | pj = 0; % p0 = 0 (initial tempering parameter) 57 | Dimensions = size(thetaj, 2); % size of the vector theta 58 | 59 | count = 1; % Counter 60 | samps(:,:,count) = thetaj; 61 | beta_j(count) = pj; 62 | beta = 2.4./sqrt(Dimensions); 63 | scale(count) = beta; 64 | 65 | %% Initialization of matrices and vectors 66 | thetaj1 = zeros(nsamples, Dimensions); 67 | 68 | %% Main loop 69 | while pj < 1 70 | j = j+1; 71 | 72 | %% Calculate the tempering parameter p(j+1): 73 | for l = 1:nsamples 74 | log_fD_T_thetaj(l) = loglikelihood(thetaj(l,:)); 75 | end 76 | if any(isinf(log_fD_T_thetaj)) 77 | error('The prior distribution is too far from the true region'); 78 | end 79 | pj1 = calculate_pj1(log_fD_T_thetaj, pj); 80 | fprintf('TMCMC: Iteration j = %2d, pj1 = %f\n', j, pj1); 81 | 82 | %% Compute the plausibility weight for each sample wrt f_{j+1} 83 | fprintf('Computing the weights ...\n'); 84 | a = (pj1-pj)*log_fD_T_thetaj; 85 | wj = exp(a); 86 | wj_norm = wj./sum(wj); % normalization of the weights 87 | 88 | %% Compute S(j) = E[w{j}] (eq 15) 89 | S(j) = mean(wj); 90 | 91 | %% Do the resampling step to obtain N samples from f_{j+1}(theta) and 92 | % then perform Metropolis-Hastings on each of these samples using as a 93 | % stationary PDF "fj1" 94 | % fj1 = @(t) fT(t).*log_fD_T(t).^pj1; % stationary PDF (eq 11) f_{j+1}(theta) 95 | log_posterior = @(t) log(priorpdf(t)) + pj1*loglikelihood(t); 96 | 97 | 98 | % and using as proposal PDF a Gaussian centered at thetaj(idx,:) and 99 | % with covariance matrix equal to an scaled version of the covariance 100 | % matrix of fj1: 101 | 102 | % weighted mean 103 | mu = zeros(1, Dimensions); 104 | for l = 1:nsamples 105 | mu = mu + wj_norm(l)*thetaj(l,:); % 1 x N 106 | end 107 | 108 | % scaled covariance matrix of fj1 (eq 17) 109 | cov_gauss = zeros(Dimensions); 110 | for k = 1:nsamples 111 | % this formula is slightly different to eq 17 (the transpose) 112 | % because of the size of the vectors)m and because Ching and Chen 113 | % forgot to normalize the weight wj: 114 | tk_mu = thetaj(k,:) - mu; 115 | cov_gauss = cov_gauss + wj_norm(k)*(tk_mu'*tk_mu); 116 | end 117 | 118 | cov_gauss = beta^2 * cov_gauss; 119 | assert(~isinf(cond(cov_gauss)),'Something is wrong with the likelihood.') 120 | 121 | % Define the Proposal distribution: 122 | proppdf = @(x,y) prop_pdf(x, y, cov_gauss, priorpdf); %q(x,y) = q(x|y). 123 | proprnd = @(x) prop_rnd(x, cov_gauss, priorpdf); 124 | 125 | %% During the last iteration we require to do a better burnin in order 126 | % to guarantee the quality of the samples: 127 | if pj1 == 1 128 | burnin = lastBurnin; 129 | end 130 | 131 | %% Start N different Markov chains 132 | fprintf('Markov chains ...\n\n'); 133 | idx = randsample(nsamples, nsamples, true, wj_norm); 134 | 135 | for i = 1:nsamples % For parallel, type: parfor 136 | %% Sample one point with probability wj_norm 137 | 138 | % smpl = mhsample(start, nsamples, 139 | % 'pdf', pdf, 'proppdf', proppdf, 'proprnd', proprnd); 140 | % start = row vector containing the start value of the Markov Chain, 141 | % nsamples = number of samples to be generated 142 | [thetaj1(i,:), acceptance_rate(i)] = mhsample(thetaj(idx(i), :), 1, ... 143 | 'logpdf', log_posterior, ... 144 | 'proppdf', proppdf, ... 145 | 'proprnd', proprnd, ... 146 | 'thin', 3, ... 147 | 'burnin', burnin); 148 | % According to Cheung and Beck (2009) - Bayesian model updating ..., 149 | % the initial samples from reweighting and the resample of samples of 150 | % fj, in general, do not exactly follow fj1, so that the Markov 151 | % chains must "burn-in" before samples follow fj1, requiring a large 152 | % amount of samples to be generated for each level. 153 | 154 | %% Adjust the acceptance rate (optimal = 23%) 155 | % See: http://www.dms.umontreal.ca/~bedard/Beyond_234.pdf 156 | %{ 157 | if acceptance_rate < 0.3 158 | % Many rejections means an inefficient chain (wasted computation 159 | %time), decrease the variance 160 | beta = 0.99*beta; 161 | elseif acceptance_rate > 0.5 162 | % High acceptance rate: Proposed jumps are very close to current 163 | % location, increase the variance 164 | beta = 1.01*beta; 165 | end 166 | %} 167 | end 168 | fprintf('\n'); 169 | acceptance(count) = mean(acceptance_rate); 170 | 171 | %% Prepare for the next iteration 172 | c_a = (acceptance(count) - ((0.21./Dimensions) + 0.23))./sqrt(j); 173 | beta = beta .* exp(c_a); 174 | 175 | count = count+1; 176 | scale(count) = beta; 177 | samps(:,:,count) = thetaj1; 178 | thetaj = thetaj1; 179 | pj = pj1; 180 | beta_j(count) = pj; 181 | end 182 | 183 | % estimation of f(D) -- this is the normalization constant in Bayes 184 | log_fD = sum(log(S(1:j))); 185 | 186 | %% Description of outputs: 187 | 188 | output.allsamples = samps; % To show samples from all transitional distributions 189 | output.samples = samps(:,:,end); % To only show samples from the final posterior 190 | output.log_evidence = log_fD; % To generate the logarithmic of the evidence 191 | output.acceptance = acceptance; % To show the mean acceptance rates for all iterations 192 | output.beta = beta_j; % To show the values of temepring parameters, beta_j 193 | output.scale = scale(1:end-1); % To show the values of the scaling factor of the covariance matrix across iterations 194 | 195 | return; % End 196 | 197 | 198 | %% Calculate the tempering parameter p(j+1) 199 | function pj1 = calculate_pj1(log_fD_T_thetaj, pj) 200 | % find pj1 such that COV <= threshold, that is 201 | % 202 | % std(wj) 203 | % --------- <= threshold 204 | % mean(wj) 205 | % 206 | % here 207 | % size(thetaj) = N x D, 208 | % wj = fD_T(thetaj).^(pj1 - pj) 209 | % e = pj1 - pj 210 | 211 | threshold = 1; % 100% = threshold on the COV 212 | 213 | % wj = @(e) fD_T_thetaj^e; % N x 1 214 | % Note the following trick in order to calculate e: 215 | % Take into account that e>=0 216 | wj = @(e) exp(abs(e)*log_fD_T_thetaj); % N x 1 217 | fmin = @(e) std(wj(e)) - threshold*mean(wj(e)) + realmin; 218 | e = abs(fzero(fmin, 0)); % e is >= 0, and fmin is an even function 219 | if isnan(e) 220 | error('There is an error finding e'); 221 | end 222 | 223 | pj1 = min(1, pj + e); 224 | 225 | return; % End 226 | 227 | function proppdf = prop_pdf(x, mu, covmat, box) 228 | % This is the Proposal PDF for the Markov Chain. 229 | 230 | % Box function is the Prior PDF in the feasible region. 231 | % So if a point is out of bounds, this function will 232 | % return 0. 233 | 234 | proppdf = mvnpdf(x, mu, covmat).*box(x); %q(x,y) = q(x|y). 235 | 236 | return; 237 | 238 | 239 | function proprnd = prop_rnd(mu, covmat, box) 240 | % Sampling from the proposal PDF for the Markov Chain. 241 | 242 | while true 243 | proprnd = mvnrnd(mu, covmat, 1); 244 | if box(proprnd) 245 | % The box function is the Prior PDF in the feasible region. 246 | % If a point is out of bounds, this function will return 0 = false. 247 | break; 248 | end 249 | end 250 | 251 | return 252 | -------------------------------------------------------------------------------- /example_CoupledOscillator.m: -------------------------------------------------------------------------------- 1 | %% The TEMCMC sampler 2 | % 3 | % The TEMCMC sampler is based on the orginal TMCMC sampler proposed by 4 | % Ching and Chen (2007). For the TEMCMC sampler, the MH sampler is replaced 5 | % by the Affine-invariant Ensemble sampler in the resampling procedure 6 | % given the latter's strength in sampling from highly anisotropic 7 | % distributions, which is the case of the transitional distributions. 8 | % 9 | %% Coupled spring-mass system 10 | % 11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 12 | % 13 | % Reference: http://farside.ph.utexas.edu/teaching/336k/Newtonhtml/ 14 | % node100.html 15 | % 16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 17 | % 18 | % We have a coupled oscillator configuration: spring > mass > spring > mass > 19 | % spring. 20 | % 21 | % Eigenfrequencies: sqrt(k./m), sqrt((k + 2.*k_12)./m) 22 | % Hence, theoretical eigenfrequencies = 1.0954 Hz, 2.2804 Hz 23 | % 24 | % Input data: 25 | % Primary spring stiffness, k = 0.6 N/m; 26 | % Secondary spring stiffness, k_12 = 1 N/m; 27 | % Mass, m = 0.5 kg 28 | % 29 | %% Define the parameters and random variables: 30 | 31 | m = 0.5; % Mass of the blocks in [kg] 32 | k = 0.6; % Stiffness of primary spring [N/m] 33 | k_12 = 1; % Stiffness of secondary spring [N/m] 34 | 35 | %% Define the model: 36 | 37 | % Define model for the first eigenfrequency: 38 | model_1 = @(x) sqrt(x(:,2)./x(:,1)); 39 | 40 | % Define model for the second eigenfrequency: 41 | model_2 = @(x) sqrt((x(:,2) + 2.*x(:,3))./x(:,1)); 42 | 43 | %% Generate noisy measurements of Eigenfrequencies: 44 | 45 | % Define the stochastic noise term for eigenfrequency 1: 46 | noise_1 = 0.1*model_1([m,k])*randn(15,1); 47 | 48 | % Define the stochastic noise term for eigenfrequency 2: 49 | noise_2 = 0.1*model_2([m,k,k_12])*randn(15,1); 50 | 51 | % Define the "noisy" measurements: 52 | measurements = [model_1([m,k]), model_2([m,k,k_12])] + [noise_1, noise_2]; 53 | 54 | % To plot the 2D scatter plot of the model: 55 | figure; 56 | hold on; box on; grid on 57 | scatter(measurements(:,1), measurements(:,2), 60, 'r ^', 'filled'); 58 | plot(model_1([m,k]), model_2([m,k,k_12]), 'k+', 'linewidth', 2.5); 59 | xlabel('$\omega_1^{noisy}$ $[Hz]$','Interpreter','latex'); 60 | ylabel('$\omega_2^{noisy}$ $[Hz]$','Interpreter','latex'); 61 | xlim([0.9 1.4]) 62 | legend('Noisy eigenfrequencies', 'True eigenfrequency','LineWidth',2) 63 | set(gca, 'fontsize', 20) 64 | 65 | %% Define the Prior: 66 | 67 | lowerBound = [0.01, 1e-05]; upperBound = [4, 1]; 68 | 69 | % Prior PDF of k: 70 | priorPDF_k = @(x) unifpdf(x, lowerBound(1), upperBound(1)); 71 | 72 | % Prior PDF of k_12: 73 | priorPDF_k12 = @(x) unifpdf(x, lowerBound(1), upperBound(1)); 74 | 75 | % Prior PDF of sigma_1 (standard deviation of f1): 76 | priorPDF_sigma1 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 77 | 78 | % Prior PDF of sigma_2 (standard deviation of f2): 79 | priorPDF_sigma2 = @(x) unifpdf(x, lowerBound(2), upperBound(2)); 80 | 81 | % Define the overall prior PDF: 82 | prior_pdf = @(x) priorPDF_k(x(:,1)).*priorPDF_k12(x(:,2)).*... 83 | priorPDF_sigma1(x(:,3)).*priorPDF_sigma2(x(:,4)); 84 | 85 | prior_rnd = @(N) [unifrnd(lowerBound(1), upperBound(1), N, 1),... 86 | unifrnd(lowerBound(1), upperBound(1), N, 1),... 87 | unifrnd(lowerBound(2), upperBound(2), N, 1),... 88 | unifrnd(lowerBound(2), upperBound(2), N, 1)]; 89 | 90 | %% Define the Log-likelihood: 91 | 92 | logL = @(x) - 0.5 .* (1./x(:,3)).^2 .*(measurements(:,1) - model_1([m, x(:,1)]))' *... 93 | (measurements(:,1) - model_1([m, x(:,1)])) -... 94 | length(measurements).*log(sqrt(2*pi).*x(:,3)) +... 95 | - 0.5 .* (1./x(:,4)).^2 .*(measurements(:,2) - model_2([m, x(:,1), x(:,2)]))' *... 96 | (measurements(:,2) - model_2([m, x(:,1), x(:,2)])) -... 97 | length(measurements).*log(sqrt(2*pi).*x(:,4)); 98 | 99 | %% Run TMCMC sampler: 100 | 101 | Nsamples = 1000; 102 | 103 | tic; 104 | TMCMC = TMCMCsampler('nsamples',Nsamples,'loglikelihood',logL,... 105 | 'priorpdf',prior_pdf,'priorrnd',prior_rnd,'burnin',0); 106 | timeTMCMC = toc; 107 | fprintf('Time elapsed is for the TMCMC sampler: %f \n',timeTMCMC) 108 | 109 | samples_tmcmc = TMCMC.samples; 110 | 111 | tmcmc_mean = mean(samples_tmcmc); % To calcuate the sample mean 112 | tmcmc_std = std(samples_tmcmc); % To calculate the sample standard deviation 113 | COV_tmcmc = (tmcmc_std./tmcmc_mean)*100; % To calculate the COV of the estimation 114 | 115 | %% Run the TEMCMC sampler: 116 | 117 | Nsamples = 1000; 118 | 119 | tic; 120 | TEMCMC = TEMCMCsampler('nsamples',Nsamples,'loglikelihood',logL,... 121 | 'priorpdf',prior_pdf,'priorrnd',prior_rnd,'burnin',0); 122 | timeTEMCMC = toc; 123 | fprintf('Time elapsed is for the TEMCMC sampler: %f \n',timeTEMCMC) 124 | 125 | samples_temcmc = TEMCMC.samples; 126 | 127 | temcmc_mean = mean(samples_temcmc); % To calcuate the sample mean 128 | temcmc_std = std(samples_temcmc); % To calculate the sample standard deviation 129 | COV_temcmc = (temcmc_std./temcmc_mean)*100; % To calculate the COV of the estimation 130 | 131 | %% Plot the combined Scatterplot matrix: 132 | 133 | figure(); 134 | subplot(1,2,1) 135 | [~,ax1] = plotmatrix(samples_tmcmc); 136 | for i=1:4 137 | ax1(i,1).FontSize = 18; 138 | ax1(4,i).FontSize = 18; 139 | end 140 | ylabel(ax1(1,1),'$k$ $[N/m]$','Interpreter','latex'); 141 | ylabel(ax1(2,1),'$k_{12}$ $[N/m]$','Interpreter','latex'); 142 | ylabel(ax1(3,1),'$\sigma_1$ $[Hz]$','Interpreter','latex'); 143 | ylabel(ax1(4,1),'$\sigma_2$ $[Hz]$','Interpreter','latex'); 144 | xlabel(ax1(4,1),'$k$ $[N/m]$','Interpreter','latex'); 145 | xlabel(ax1(4,2),'$k_{12}$ $[N/m]$','Interpreter','latex'); 146 | xlabel(ax1(4,3),'$\sigma_1$ $[Hz]$','Interpreter','latex'); 147 | xlabel(ax1(4,4),'$\sigma_2$ $[Hz]$','Interpreter','latex'); 148 | title('TMCMC Posterior') 149 | set(gca,'FontSize',20) 150 | 151 | subplot(1,2,2) 152 | [~,ax2] = plotmatrix(samples_temcmc); 153 | for i=1:4 154 | ax2(i,1).FontSize = 18; 155 | ax2(4,i).FontSize = 18; 156 | end 157 | ylabel(ax2(1,1),'$k$ $[N/m]$','Interpreter','latex'); 158 | ylabel(ax2(2,1),'$k_{12}$ $[N/m]$','Interpreter','latex'); 159 | ylabel(ax2(3,1),'$\sigma_1$ $[Hz]$','Interpreter','latex'); 160 | ylabel(ax2(4,1),'$\sigma_2$ $[Hz]$','Interpreter','latex'); 161 | xlabel(ax2(4,1),'$k$ $[N/m]$','Interpreter','latex'); 162 | xlabel(ax2(4,2),'$k_{12}$ $[N/m]$','Interpreter','latex'); 163 | xlabel(ax2(4,3),'$\sigma_1$ $[Hz]$','Interpreter','latex'); 164 | xlabel(ax2(4,4),'$\sigma_2$ $[Hz]$','Interpreter','latex'); 165 | title('TEMCMC Posterior') 166 | set(gca,'FontSize',20) 167 | 168 | %% Model Update 169 | 170 | update_model_1 = @(x) sqrt(x./m); 171 | update_model_2 = @(x) sqrt((x(:,1) + 2.*x(:,2))./m); 172 | 173 | figure; 174 | subplot(1,2,1) % Plot Model Update results for TMCMC 175 | hold on; box on; grid on 176 | scatter(update_model_1(samples_tmcmc(:,1)), update_model_2([samples_tmcmc(:,1),samples_tmcmc(:,2)]), 18, 'b', 'filled') 177 | %scatter(measurements(:,1), measurements(:,2), 35, 'r ^', 'filled'); 178 | scatter(measurements(:,1), measurements(:,2), 18, 'r', 'filled'); 179 | plot(model_1([m,k]), model_2([m,k,k_12]), 'k +','LineWidth', 2); 180 | xlabel('$\omega_1^{noisy}$ $[Hz]$','Interpreter','latex'); 181 | ylabel('$\omega_2^{noisy}$ $[Hz]$','Interpreter','latex'); 182 | legend('TMCMC Model Update','Noisy eigenfrequencies', 'True eigenfrequency','LineWidth',2) 183 | set(gca, 'fontsize', 20) 184 | 185 | subplot(1,2,2) % Plot Model Update results for TEMCMC 186 | hold on; box on; grid on 187 | scatter(update_model_1(samples_temcmc(:,1)), update_model_2([samples_temcmc(:,1),samples_temcmc(:,2)]), 18, 'b', 'filled') 188 | %scatter(measurements(:,1), measurements(:,2), 35, 'r ^', 'filled'); 189 | scatter(measurements(:,1), measurements(:,2), 18, 'r', 'filled'); 190 | plot(model_1([m,k]), model_2([m,k,k_12]), 'k +','LineWidth', 2); 191 | xlabel('$\omega_1^{noisy}$ $[Hz]$','Interpreter','latex'); 192 | ylabel('$\omega_2^{noisy}$ $[Hz]$','Interpreter','latex'); 193 | legend('TEMCMC Model Update','Noisy eigenfrequencies', 'True eigenfrequency','LineWidth',2) 194 | set(gca, 'fontsize', 20) 195 | 196 | %% TMCMC vs TEMCMC Statistics: 197 | 198 | dim = size(samples_temcmc, 2); % dimensionality of the problem 199 | target_accept = 0.23 + (0.21./dim); 200 | 201 | % Plot their beta values: 202 | figure; 203 | subplot(1,2,1) 204 | xin = 0:length(TEMCMC.beta)-1; 205 | yin = 0:length(TMCMC.beta)-1; 206 | hold on; box on; grid on; 207 | plot(xin, TEMCMC.beta, '--rs', 'MarkerFaceColor','r','linewidth', 1.5) 208 | plot(yin, TMCMC.beta, '--bs', 'MarkerFaceColor','b','linewidth', 1.5) 209 | legend('TEMCMC \beta_j values', 'TMCMC \beta_j values', 'linewidth', 2) 210 | title('Plot of \beta_j values') 211 | xlabel('$j$','Interpreter','latex'); ylabel('$\beta_j$','Interpreter','latex'); 212 | set(gca, 'fontsize', 20) 213 | 214 | subplot(1,2,2) 215 | ain = 1:length(TEMCMC.acceptance); 216 | bin = 1:length(TMCMC.acceptance); 217 | hold on; box on; grid on; 218 | plot(ain, TEMCMC.acceptance, '--rs', 'MarkerFaceColor','r','linewidth', 1.5) 219 | plot(bin, TMCMC.acceptance, '--bs', 'MarkerFaceColor','b','linewidth', 1.5) 220 | plot([1 10],[target_accept target_accept] , 'c','linewidth', 1.5) 221 | plot([1 10],[0.15 0.15] , 'k','linewidth', 1.5) 222 | plot([1 10],[0.5 0.5] , 'k','linewidth', 1.5) 223 | legend('TEMCMC acceptance rates', 'TMCMC acceptance rates',... 224 | 'Target acceptance rate', 'Optimum acceptance limits', 'linewidth', 2) 225 | title('Plot of Acceptance rates') 226 | xlabel('$j$','Interpreter','latex'); ylabel('Acceptance rate'); 227 | xlim([1 10]) 228 | set(gca, 'fontsize', 20) 229 | 230 | %% Save the data: 231 | 232 | save('example_CoupledOscillator_m'); 233 | -------------------------------------------------------------------------------- /example_himmelblau.m: -------------------------------------------------------------------------------- 1 | %% The TEMCMC sampler 2 | % 3 | % The TEMCMC sampler is based on the orginal TMCMC sampler proposed by 4 | % Ching and Chen (2007). For the TEMCMC sampler, the MH sampler is replaced 5 | % by the Affine-invariant Ensemble sampler in the resampling procedure 6 | % given the latter's strength in sampling from highly anisotropic 7 | % distributions, which is the case of the transitional distributions. 8 | % 9 | %% The Himmelblau's Function: 10 | % 11 | % In this example, we will evaluate the performance of the TEMCMC sampler 12 | % against the TMCMC sampler in sampling from the Himmelblau's function, a 13 | % 2D distribution function with 4 local minima. 14 | % 15 | % The Himmelblau's function is defined as: 16 | % 17 | % f(x,y) = (x^2 + y - 11)^2 + (x + y^2 - 7)^2; 18 | % 19 | % with the local minima located at: f(3.0, 2.0), f(-2.805118, 3.131312), 20 | % f(-3.779310, -3.283186), and f(3.584428, -1848126). To convert these 21 | % local minima into local maxima, into a region of high probability, the 22 | % loglikelihood becomes: 23 | % 24 | % log_like(x,y) = -f(x, y); 25 | % 26 | clc; close all; 27 | 28 | % The Negative-Log of the Himmelblau's Function: 29 | logPfun = @(x) - ((x(:,1).^2 + x(:,2) - 11).^2 + (x(:,1) + x(:,2).^2 - 7).^2); 30 | 31 | % Plotting the Himmelblau's Function: 32 | [X1,X2] = meshgrid(-6:.01:6,-6:.01:6); 33 | Z = logPfun([X1(:) X2(:)]); 34 | Z = reshape(Z,size(X1)); 35 | 36 | figure; 37 | hold on; box on; grid on; 38 | contour(X1,X2,exp(Z)) 39 | colormap(parula) 40 | title('Himmelblau Function') 41 | xlim([-5 5]); ylim([-5 5]); 42 | xlabel('$x_1$','Interpreter','latex'); ylabel('$x_2$','Interpreter','latex'); 43 | set(gca,'fontsize',20) 44 | 45 | %% Define the Prior: 46 | 47 | lowerBound = -5; upperBound = 5; 48 | prior_pdf = @(x) unifpdf(x(:,1),lowerBound,upperBound).*unifpdf(x(:,2),lowerBound,upperBound); 49 | prior_rnd = @(N) [unifrnd(lowerBound,upperBound,N,1),unifrnd(lowerBound,upperBound,N,1)]; 50 | 51 | %% Define the Likelihood function: 52 | 53 | logl = @(x) - ((x(:,1).^2 + x(:,2) - 11).^2 + (x(:,1) + x(:,2).^2 - 7).^2); 54 | 55 | %% Run TMCMC sampler: 56 | 57 | Nsamples = 1000; 58 | 59 | tic; 60 | TMCMC = TMCMCsampler('nsamples',Nsamples,'loglikelihood',logl,... 61 | 'priorpdf',prior_pdf,'priorrnd',prior_rnd,'burnin',0); 62 | timeTMCMC = toc; 63 | fprintf('Time elapsed is for the TMCMC sampler: %f \n',timeTMCMC) 64 | 65 | samples_tmcmc = TMCMC.samples; 66 | 67 | %% Run the TEMCMC sampler: 68 | 69 | Nsamples = 1000; 70 | 71 | %parpool(2); 72 | tic; 73 | TEMCMC = TEMCMCsampler('nsamples',Nsamples,'loglikelihood',logl,... 74 | 'priorpdf',prior_pdf,'priorrnd',prior_rnd,'burnin',0); 75 | timeTEMCMC = toc; 76 | fprintf('Time elapsed is for the TEMCMC sampler: %f \n',timeTEMCMC) 77 | 78 | samples_temcmc = TEMCMC.samples; 79 | 80 | %% Plot the combined scatterplot matrix: 81 | 82 | figure(); 83 | subplot(1,2,1) 84 | [~,ax1] = plotmatrix(samples_tmcmc); 85 | title('TMCMC Scatterplot Matrix', 'Fontsize', 20); 86 | for i=1:2 87 | ax1(i,1).FontSize = 16; 88 | ax1(2,i).FontSize = 16; 89 | ylabel(ax1(i,1),sprintf('x_{%d}', i)); 90 | xlabel(ax1(2,i),sprintf('x_{%d}', i)); 91 | end 92 | set(gca,'FontSize',18) 93 | subplot(1,2,2) 94 | [~,ax2] = plotmatrix(samples_temcmc); 95 | title('TEMCMC Scatterplot Matrix', 'Fontsize', 20); 96 | for i=1:2 97 | ax2(i,1).FontSize = 16; 98 | ax2(2,i).FontSize = 16; 99 | ylabel(ax2(i,1),sprintf('x_{%d}', i)); 100 | xlabel(ax2(2,i),sprintf('x_{%d}', i)); 101 | end 102 | set(gca,'FontSize',18) 103 | 104 | %% Plot the combined scatterplot: 105 | 106 | figure; 107 | subplot(1,2,1) 108 | hold on; box on; grid on; 109 | contour(X1,X2,exp(Z)) 110 | colormap(parula) 111 | scatter(samples_tmcmc(:,1), samples_tmcmc(:,2),18,'r','filled'); 112 | legend('Himmelblau function', 'TMCMC samples', 'linewidth', 2) 113 | xlim([-5 5]); ylim([-5 5]); 114 | xlabel('$x_1$','Interpreter','latex'); ylabel('$x_2$','Interpreter','latex'); 115 | set(gca,'fontsize',20) 116 | 117 | subplot(1,2,2) 118 | hold on; box on; grid on; 119 | contour(X1,X2,exp(Z)) 120 | colormap(parula) 121 | scatter(samples_temcmc(:,1), samples_temcmc(:,2),18,'r','filled'); 122 | legend('Himmelblau function', 'TEMCMC samples', 'linewidth', 2) 123 | xlim([-5 5]); ylim([-5 5]); 124 | xlabel('$x_1$','Interpreter','latex'); ylabel('$x_2$','Interpreter','latex'); 125 | set(gca,'fontsize',20) 126 | 127 | %% Plotting the Transitional distributions and the respective samples: 128 | 129 | beta_tmcmc = TMCMC.beta; 130 | beta_temcmc = TEMCMC.beta; 131 | 132 | % Plotting the Transitional functions for TMCMC: 133 | for k = 1:length(TMCMC.beta) 134 | log_transition_func = @(x) log(prior_pdf(x)) + (beta_tmcmc(k)).*logl(x); 135 | Z1 = zeros(length(X1(:)),length(beta_tmcmc)); 136 | 137 | Z1(:,k) = log_transition_func([X1(:) X2(:)]); 138 | 139 | Z_tmcmc(:,:,k) = reshape(Z1(:,k), size(X1)); 140 | end 141 | 142 | % Plotting the Transitional functions for TEMCMC: 143 | for k = 1:length(TEMCMC.beta) 144 | log_transition_func = @(x) log(prior_pdf(x)) + (beta_temcmc(k)).*logl(x); 145 | Z2 = zeros(length(X1(:)),length(beta_temcmc)); 146 | 147 | Z2(:,k) = log_transition_func([X1(:) X2(:)]); 148 | 149 | Z_temcmc(:,:,k) = reshape(Z2(:,k), size(X1)); 150 | end 151 | 152 | % Plotting the transitional distributions and the samples for TMCMC: 153 | allsamples_tmcmc = TMCMC.allsamples; 154 | 155 | figure; 156 | for k = 1:length(beta_tmcmc) 157 | subplot(2,3,k) 158 | hold on; box on; grid on 159 | contour(X1,X2,exp(Z_tmcmc(:,:,k))) 160 | colormap(parula) 161 | scatter(allsamples_tmcmc(:,1,k), allsamples_tmcmc(:,2,k), 18, 'r', 'filled'); 162 | xlim([-5 5]); ylim([-5 5]); 163 | xlabel('$x_1$','Interpreter','latex'); ylabel('$x_2$','Interpreter','latex'); 164 | title(sprintf('$j = %d$', (k-1)),'Interpreter','latex') 165 | set(gca,'fontsize',20) 166 | end 167 | 168 | % Plotting the transitional distributions and the samples for TMCMC: 169 | allsamples_temcmc = TEMCMC.allsamples; 170 | 171 | figure; 172 | for k = 1:length(beta_temcmc) 173 | subplot(2,3,k) 174 | hold on; box on; grid on 175 | contour(X1,X2,exp(Z_temcmc(:,:,k))) 176 | colormap(parula) 177 | scatter(allsamples_temcmc(:,1,k), allsamples_temcmc(:,2,k), 18, 'r', 'filled'); 178 | xlim([-5 5]); ylim([-5 5]); 179 | xlabel('$x_1$','Interpreter','latex'); ylabel('$x_2$','Interpreter','latex'); 180 | title(sprintf('$j = %d$', (k-1)),'Interpreter','latex') 181 | set(gca,'fontsize',20) 182 | end 183 | 184 | %% Plotting the marginal ECDFs: 185 | 186 | loglike = @(x,y) - ((x.^2 + y - 11).^2 + (x + y.^2 - 7).^2); 187 | prior = @(x,y) unifpdf(x,lowerBound,upperBound).*unifpdf(y,lowerBound,upperBound); 188 | fun = @(x,y) prior(x,y).*exp(loglike(x,y)); 189 | 190 | % Marginal distribution of x1: 191 | fun_x1 = @(x) integral(@(y) fun(x,y),lowerBound,upperBound); 192 | % Marginal distribution of x2: 193 | fun_x2 = @(y) integral(@(x) fun(x,y),lowerBound,upperBound); 194 | 195 | fun_x1_out = zeros(1000,1); fun_x2_out = zeros(1000,1); 196 | x1 = linspace(lowerBound,upperBound,1000); x2 = linspace(lowerBound,upperBound,1000); 197 | for i = 1:1000 198 | fun_x1_out(i) = fun_x1(x1(i)); 199 | fun_x2_out(i) = fun_x2(x2(i)); 200 | end 201 | fun_x1_out = normalize(cumsum(fun_x1_out),'range',[0,1]); 202 | fun_x2_out = normalize(cumsum(fun_x2_out),'range',[0,1]); 203 | 204 | %% Combined ECDF Plots: 205 | 206 | figure(); 207 | subplot(1,2,1) 208 | hold on; box on; grid on; 209 | plot(x1',fun_x1_out,'k','LineWidth',1.5) 210 | [f1a,x1a] = ecdf(samples_temcmc(:,1)); 211 | plot(x1a,f1a,'r','LineWidth',1.5) 212 | [f1b,x1b] = ecdf(samples_tmcmc(:,1)); 213 | plot(x1b,f1b,'b','LineWidth',1.5) 214 | xlabel('x_1') 215 | ylabel('F(x_1)') 216 | xlim([-5 5]) 217 | legend('Posterior Marginal CDF', 'ECDF TEMCMC samples', 'ECDF TMCMC samples', 'linewidth',2); 218 | title('ECDF of samples for x_1') 219 | set(gca, 'Fontsize', 15) 220 | 221 | subplot(1,2,2) 222 | hold on 223 | box on 224 | grid on 225 | plot(x2',fun_x2_out,'k','LineWidth',1.5) 226 | [f2a,x2a] = ecdf(samples_temcmc(:,2)); 227 | plot(x2a,f2a,'r','LineWidth',1.5) 228 | [f2b,x2b] = ecdf(samples_tmcmc(:,2)); 229 | plot(x2b,f2b,'b','LineWidth',1.5) 230 | xlabel('x_2') 231 | ylabel('F(x_2)') 232 | xlim([-5 5]) 233 | legend('Posterior Marginal CDF', 'ECDF TEMCMC samples', 'ECDF TMCMC samples', 'linewidth',2); 234 | title('ECDF of samples for x_2') 235 | set(gca, 'Fontsize', 15) 236 | 237 | %% TMCMC vs TEMCMC Statistics: 238 | 239 | dim = size(samples_temcmc, 2); % dimensionality of the problem 240 | target_accept = 0.23 + (0.21./dim); 241 | 242 | % Plot their beta values: 243 | figure; 244 | subplot(1,2,1) 245 | xin = 0:length(TEMCMC.beta)-1; 246 | yin = 0:length(TMCMC.beta)-1; 247 | hold on; box on; grid on; 248 | plot(xin, TEMCMC.beta, '--rs', 'MarkerFaceColor','r','linewidth', 1.5) 249 | plot(yin, TMCMC.beta, '--bs', 'MarkerFaceColor','b','linewidth', 1.5) 250 | legend('TEMCMC \beta_j values', 'TMCMC \beta_j values', 'linewidth', 2) 251 | title('Plot of \beta_j values') 252 | xlabel('$j$','Interpreter','latex'); ylabel('$\beta_j$','Interpreter','latex'); 253 | set(gca, 'fontsize', 20) 254 | 255 | subplot(1,2,2) 256 | ain = 1:length(TEMCMC.acceptance); 257 | bin = 1:length(TMCMC.acceptance); 258 | hold on; box on; grid on; 259 | plot(ain, TEMCMC.acceptance, '--rs', 'MarkerFaceColor','r','linewidth', 1.5) 260 | plot(bin, TMCMC.acceptance, '--bs', 'MarkerFaceColor','b','linewidth', 1.5) 261 | plot([1 5],[target_accept target_accept] , 'c','linewidth', 1.5) 262 | plot([1 5],[0.15 0.15] , 'k','linewidth', 1.5) 263 | plot([1 5],[0.5 0.5] , 'k','linewidth', 1.5) 264 | legend('TEMCMC acceptance rates', 'TMCMC acceptance rates',... 265 | 'Target acceptance rate', 'Optimum acceptance limits', 'linewidth', 2) 266 | title('Plot of Acceptance rates') 267 | xlabel('$j$','Interpreter','latex'); ylabel('Acceptance rate'); 268 | xlim([1 5]); ylim([0 0.8]) 269 | set(gca, 'fontsize', 20) 270 | 271 | %% Save the data: 272 | 273 | save('example_himmelblau_m'); 274 | --------------------------------------------------------------------------------