├── LICENSE
├── README.md
├── docs
├── Benchmark_Tests.pdf
├── Eggholder_2d.gif
└── SFVCT_1d.gif
├── main.m
└── src
├── Autocorrelation_functions
├── Cubic_spline_matrix.m
├── Matern32_matrix.m
├── Matern52_matrix.m
├── R_ex_matrix.m
├── R_sq_ex_matrix.m
└── Read_me_autofunctions.txt
├── Error_Saver.m
├── OK_model.m
├── TPLHD
├── createTPLHD.m
├── reshapeSeed.m
├── resizeTPLHD.m
├── scale.m
├── scaled_TPLHD.m
└── tplhsdesign.m
├── adaptive_sampling_process.m
├── adaptive_techniques
├── ACE_function.m
├── AME_function.m
├── CVVor_function.m
├── EIGF_function.m
├── EI_function.m
├── Jin_CV_function.m
├── LIP_function.m
├── LOLA_function.m
├── MASA_function.m
├── MEPE_function.m
├── MIPT_function.m
├── MSD_function.m
├── SFVCT_function.m
├── SSA_function.m
├── TEAD_function.m
├── WAE_function.m
└── WEI_function.m
├── benchmark_functions
├── Boha_2d.m
├── Booth_2d.m
├── Branin_2d.m
├── Bukin_2d.m
├── Colville_4d.m
├── DampedCos_1d.m
├── Detpep_3d.m
├── DixonP_4d.m
├── Drop_wave_2d.m
├── Eggholder_2d.m
├── Exploit_1d.m
├── Franke_2d.m
├── Gramacy_Lee3_1d.m
├── Gramacy_Lee_1d.m
├── Griewank_2d.m
├── Griewank_3d.m
├── Hartmann_3d.m
├── Hartmann_6d.m
├── Ishigami_3d.m
├── Langermann_3d.m
├── Micha_2d.m
├── Michalewicz_3d.m
├── Mod_Gramacy_Lee_1d.m
├── Perm0db_1d.m
├── Rastrigin_2d.m
├── Rosenbrock_2d.m
├── SHCamel_2d.m
├── Schwefel_5d.m
├── Shekel_4d.m
├── Single_hump_1d.m
├── Sphere_3d.m
├── Sphere_4d.m
├── Sphere_5d.m
├── Styblinski_Tang_5d.m
├── Two_humps_function_1d.m
└── Zakharov_4d.m
├── call_benchmarkFunctions.m
├── chooseSamplingMethod.m
├── help_functions
├── Read_me_helpfunctions.py
├── intersite_proj_th.m
├── lhs_scaled.m
├── randomVoronoi.m
├── scale_rand.m
├── scale_to_unity.m
├── scale_vector_from_unity.m
└── scale_vector_to_unity.m
├── initial_metamodel.m
├── optimizationTools.m
└── run_input_prompts.m
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Jan N Fuhg
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Matlab Implementation Of State-Of-The-Art Adapative Techniques for Ordinary Kriging
2 |
3 | The following library provides a Matlab implementation of Ordinary Kriging accompanied by the most commonly used adaptive sampling techniques. For a comparative review of these methods test cases are provided within this framework.
4 |
5 |
6 | ## Getting Started
7 |
8 | We encourage those who are interested in using this library to run and pick a random input adaptive method and benchmark function.
9 | Run [`main.m`] and follow the instructions for user input. 1D and 2D benchmark functions allow for updated visualization of the adaptive process.
10 |
11 |
12 | ### Prerequisites
13 |
14 | Matlab version R2017a or higher.
15 |
16 |
17 | ## Examples
18 |
19 |
20 | Example of a 1D adaptive sampling process with the SFCVT technique. Upper image target function (red dashed), metamodel (blue) and sample positions.
21 | Lower image sample positions, optimization function for next sample (blue), position of new sample (red dashed). Red areas are constraints on the optimization space.
22 |
23 |
24 |
25 |
26 | ---
27 | ---
28 | Example of a 2D adaptive sampling process with the MEPE technique. Current metamodel over parametric space.
29 | Lower image sample positions (black dots), new found sample (red dot) and local normalized mean absolute error as contour lines over the parametric space.
30 |
31 |
32 |
33 |
34 | #### List of Adaptive Sampling Techniques
35 |
36 | - Smart Sampling Algorithm (SSA)
37 | Garud, Sushant Suhas, Iftekhar A. Karimi, and Markus Kraft. "Smart sampling algorithm for surrogate model development." Computers & Chemical Engineering 96 (2017): 103-114
38 |
39 |
40 | - Cross-Validation-Voronoi (CVVOR)
41 | Xu, Shengli, et al. "A robust error-pursuing sequential sampling approach for global metamodeling based on voronoi diagram and cross validation." Journal of Mechanical Design 136.7 (2014): 071009.
42 |
43 |
44 | - ACcumulative Error (ACE)
45 | Li, Genzi, Vikrant Aute, and Shapour Azarm. "An accumulative error based adaptive design of experiments for offline metamodeling."Structural and Multidisciplinary Optimization 40.1-6 (2010): 137.
46 |
47 |
48 | - MC-intersite-proj-th (MIPT)
49 | Crombecq, Karel, Eric Laermans, and Tom Dhaene. "Efficient space-filling and non-collapsing sequential design strategies for simulation-based modeling."European Journal of Operational Research 214.3 (2011): 683-696.
50 |
51 |
52 | - LOLA-Voronoi (LOLA)
53 | Crombecq, Karel, et al. "A novel hybrid sequential design strategy for global surrogate modeling of computer experiments."SIAM Journal on Scientific Computing 33.4 (2011): 1948-1974.
54 |
55 |
56 | - Adaptive Maximum Entropy (AME)
57 | Liu, Haitao, et al. "An adaptive Bayesian sequential sampling approach for global metamodeling." Journal of Mechanical Design 138.1 (2016): 011404.
58 |
59 |
60 | - Maximizing Expected Prediction Error (MEPE)
61 | Liu, Haitao, Jianfei Cai, and Yew-Soon Ong. "An adaptive sampling approach for kriging metamodeling by maximizing expected prediction error."Computers & Chemical Engineering 106 (2017): 171-182.
62 |
63 |
64 | - Mixed Adaptive Sampling Algorithm (MASA)
65 | Eason, John, and Selen Cremaschi. "Adaptive sequential sampling for surrogate model generation with artificial neural networks." Computers & Chemical Engineering 68 (2014): 220-232.
66 |
67 |
68 | - Weighted Accumulative Error (WAE)
69 | Jiang, Ping, et al. "A novel sequential exploration-exploitation sampling strategy for global metamodeling."IFAC-PapersOnLine 48.28 (2015): 532-537.
70 |
71 |
72 | - Sampling with Lipschitz Criterion (LIP)
73 | Lovison, Alberto, and Enrico Rigoni. "Adaptive sampling with a Lipschitz criterion for accurate metamodeling." Communications in Applied and Industrial Mathematics 1.2 (2011): 110-126.
74 |
75 |
76 | - Taylor expansion-based adaptive design (TEAD)
77 | Mo, Shaoxing, et al. "A Taylor expansion‐based adaptive design strategy for global surrogate modeling with applications in groundwater modeling."Water Resources Research 53.12 (2017): 10802-10823.
78 |
79 |
80 | - Space-Filling Cross Validation Tradeoff (SFCVT)
81 | Aute, Vikrant, et al. "Cross-validation based single response adaptive design of experiments for Kriging metamodeling of deterministic computer simulations."Structural and Multidisciplinary Optimization 48.3 (2013): 581-605.
82 |
83 |
84 | - Expected improvement (EI)
85 | Jones, Donald R., Matthias Schonlau, and William J. Welch. "Efficient global optimization of expensive black-box functions."Journal of Global optimization 13.4 (1998): 455-492.
86 |
87 |
88 | - Expected improvement for global fit (EIGF)
89 | Lam, Chen Quin. "Sequential adaptive designs in computer experiments for response surface model fit."Diss. The Ohio State University, 2008.
90 |
91 |
92 |
93 | ### References
94 |
95 | If you use part of the work consider citing:
96 | Fuhg, Jan N., Amélie Fau, and Udo Nackenhorst. "State-of-the-Art and Comparative Review of Adaptive Sampling Methods for Kriging." Archives of Computational Methods in Engineering (2020): 1-59.
97 |
--------------------------------------------------------------------------------
/docs/Benchmark_Tests.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FuhgJan/StateOfTheArtAdaptiveSampling/f3d9b779ba3ac97f2cdd85f6cc93ef2a1e6a3626/docs/Benchmark_Tests.pdf
--------------------------------------------------------------------------------
/docs/Eggholder_2d.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FuhgJan/StateOfTheArtAdaptiveSampling/f3d9b779ba3ac97f2cdd85f6cc93ef2a1e6a3626/docs/Eggholder_2d.gif
--------------------------------------------------------------------------------
/docs/SFVCT_1d.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FuhgJan/StateOfTheArtAdaptiveSampling/f3d9b779ba3ac97f2cdd85f6cc93ef2a1e6a3626/docs/SFVCT_1d.gif
--------------------------------------------------------------------------------
/main.m:
--------------------------------------------------------------------------------
1 | % State-of-the-art and Comparative Review on Adaptive
2 | % Sampling Methods for Ordinary Kriging
3 | clear all;
4 | close all;
5 | %% Essential imports
6 | addpath(genpath('src'))
7 | warning('off', 'all')
8 | global plotFlag
9 |
10 | %% Start input prompt
11 | [ methodID, benchmarkId, numberSamples, numberRepetitions, Vis ]= run_input_prompts();
12 |
13 | if strcmp(Vis,'y')
14 | plotFlag =1;
15 | elseif strcmp(Vis,'n')
16 | plotFlag = 0;
17 | end
18 |
19 | %% Call benchmark function
20 | [ y, lb, ub ,x, M ] = call_benchmarkFunctions( benchmarkId );
21 |
22 | %%
23 | A_sampling =[lb';ub'];
24 | % Scale parameter samples to unity
25 | x = scale_vector_to_unity(lb, ub, x);
26 |
27 | % Number of adaptive samples
28 | Number_of_benchmark_iterations = numberSamples;
29 |
30 | % Optimization strategy for sampling:
31 | % 'AN': Simulated annealing
32 | % 'GA': Genetic algorithm
33 | % 'fmincon': MATLAB fmincon
34 | % 'patternsearch': MATLAB patternsearch algorithm
35 | % 'MS': Multistart algorithm
36 | Sampling_optimization_strategy = 'MS';
37 |
38 | % Number of repetitive iterations
39 | max_iteration = numberRepetitions;
40 |
41 |
42 |
43 |
44 | %% Start process
45 | [output] = chooseSamplingMethod(M,x, y, A_sampling, Number_of_benchmark_iterations,max_iteration,Sampling_optimization_strategy,methodID);
46 |
47 | save('OutputData.mat','output')
48 |
--------------------------------------------------------------------------------
/src/Autocorrelation_functions/Cubic_spline_matrix.m:
--------------------------------------------------------------------------------
1 | function [ R ] = Cubic_spline_matrix( x1,x2,theta )
2 | % Details: Create autocorrelation matrix with cubic spline kernel
3 | %
4 | % inputs:
5 | % x1 - First input vector
6 | % x2 - Second input vector
7 | % theta - Hyperparameter vector
8 | %
9 | % outputs:
10 | % R - Autocorrelation matrix
11 |
12 | n = size(x1,2);
13 | m1 = size(x1,1);
14 | m2 = size(x2,1);
15 |
16 | R = -inf*(ones(m1,m2));
17 | for i=1: m1
18 |
19 | for j=1:m2
20 | R_val = 1;
21 | for p=1:n
22 | xi = theta(p) * abs(x1(i,p)-x2(j,p));
23 | if (0.0 <= xi) && (xi <= 0.2)
24 | val = 1 - 15*xi^2 + 30* xi^3;
25 | elseif (0.2 < xi) && (xi < 1.0)
26 | val = 1.25 * (1 - xi)^3;
27 | elseif xi >= 1.0
28 | val = 0.0;
29 | end
30 | R_val = R_val* val;
31 | end
32 | R(i,j) = R_val;
33 | end
34 | end
35 |
36 |
37 | end
--------------------------------------------------------------------------------
/src/Autocorrelation_functions/Matern32_matrix.m:
--------------------------------------------------------------------------------
1 | function [ R ] = Matern32_matrix( x1,x2,theta )
2 | % Details: Create autocorrelation matrix with Matern32 kernel
3 | %
4 | % inputs:
5 | % x1 - First input vector
6 | % x2 - Second input vector
7 | % theta - Hyperparameter vector
8 | %
9 | % outputs:
10 | % R - Autocorrelation matrix
11 |
12 | n = size(x1,2);
13 | m1 = size(x1,1);
14 | m2 = size(x2,1);
15 |
16 | R = -inf*(ones(m1,m2));
17 | for i=1: m1
18 |
19 | for j=1:m2
20 | R_val = 1;
21 | for p=1:n
22 | r = x1(i,p)-x2(j,p);
23 | %kval = (sqrt(3)*abs(r))/theta(p);
24 | kval = (sqrt(3)*abs(r))*theta(p);
25 | val = (1+ kval)*exp(-kval);
26 | R_val = R_val* val;
27 | end
28 | R(i,j) = R_val;
29 | end
30 | end
31 |
32 |
33 |
34 | end
35 |
--------------------------------------------------------------------------------
/src/Autocorrelation_functions/Matern52_matrix.m:
--------------------------------------------------------------------------------
1 | function [ R ] = Matern52_matrix( x1,x2,theta )
2 | % Details: Create autocorrelation matrix with Matern52 kernel
3 | %
4 | % inputs:
5 | % x1 - First input vector
6 | % x2 - Second input vector
7 | % theta - Hyperparameter vector
8 | %
9 | % outputs:
10 | % R - Autocorrelation matrix
11 |
12 |
13 |
14 | n = size(x1,2);
15 | m1 = size(x1,1);
16 | m2 = size(x2,1);
17 |
18 | R = -inf*(ones(m1,m2));
19 | for i=1: m1
20 |
21 | for j=1:m2
22 | R_val = 1;
23 | for p=1:n
24 | r = x1(i,p)-x2(j,p);
25 | kval = (sqrt(5)*abs(r))/theta(p);
26 | val = (1+ kval + (5*(r^2))/(3*(theta(p)^2)))*exp(-kval);
27 | R_val = R_val* val;
28 | end
29 | R(i,j) = R_val;
30 | end
31 | end
32 |
33 |
34 |
35 | end
36 |
37 |
--------------------------------------------------------------------------------
/src/Autocorrelation_functions/R_ex_matrix.m:
--------------------------------------------------------------------------------
1 | function [ R ] = R_ex_matrix( x1,x2,theta )
2 | % Details: Create autocorrelation matrix with exponential kernel
3 | %
4 | % inputs:
5 | % x1 - First input vector
6 | % x2 - Second input vector
7 | % theta - Hyperparameter vector
8 | %
9 | % outputs:
10 | % R - Autocorrelation matrix
11 |
12 | n = size(x1,2);
13 | m1 = size(x1,1);
14 | m2 = size(x2,1);
15 |
16 | R = -inf*(ones(m1,m2));
17 | for i=1: m1
18 |
19 | for j=1:m2
20 | val = 0.0;
21 | for p=1:n
22 | r = x1(i,p)-x2(j,p);
23 | val = val + abs(r)/(sqrt(theta(p)));
24 | end
25 | R(i,j) = exp(- val);
26 | end
27 | end
28 |
29 |
30 | end
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/Autocorrelation_functions/R_sq_ex_matrix.m:
--------------------------------------------------------------------------------
1 | function [ R ] = R_sq_ex_matrix( x1,x2,theta )
2 | % Details: Create autocorrelation matrix with squared exponential kernel
3 | %
4 | % inputs:
5 | % x1 - First input vector
6 | % x2 - Second input vector
7 | % theta - Hyperparameter vector
8 | %
9 | % outputs:
10 | % R - Autocorrelation matrix
11 | n = size(x1,2);
12 | m1 = size(x1,1);
13 | m2 = size(x2,1);
14 |
15 | R = -inf*(ones(m1,m2));
16 | for i=1: m1
17 |
18 | for j=1:m2
19 | val = 0.0;
20 | for p=1:n
21 | r = abs(x1(i,p)-x2(j,p));
22 | val = val + ((r)/theta(p))^(2);
23 | end
24 | R(i,j) = exp(- val);
25 | end
26 | end
27 |
28 |
29 | end
--------------------------------------------------------------------------------
/src/Autocorrelation_functions/Read_me_autofunctions.txt:
--------------------------------------------------------------------------------
1 | This folder contains the functions used for creating the autocorrelation matrices.
2 |
--------------------------------------------------------------------------------
/src/Error_Saver.m:
--------------------------------------------------------------------------------
1 | classdef Error_Saver
2 | % Details: Utility class that defines and saves the error values
3 | %
4 |
5 | properties
6 | error_data;
7 | end
8 |
9 | methods
10 | function obj = Error_Saver()
11 | obj.error_data = table;
12 |
13 | end
14 |
15 | function obj = update(obj,it,X,Y)
16 | nMAE_val = MeanAE(X,Y);
17 | nRMAE_val = RMAE(X,Y);
18 | nRMSE_val = RMSE(X,Y);
19 | R_sq_val = R_sq(X,Y);
20 | i= size(obj.error_data,1);
21 | obj.error_data(i+1,:)= {it ,nMAE_val , nRMAE_val, nRMSE_val, R_sq_val };
22 | if i==0
23 | obj.error_data.Properties.VariableNames = {'Iterator','MAE','RMAE','RMSE', 'R_sq'};
24 | end
25 | end
26 |
27 |
28 | function plot_data(obj)
29 | data = obj.error_data.Variables;
30 | Iterator = data(:,1);
31 | MeanAE = data(:,2);
32 | RMAE = data(:,3);
33 | RMSE = data(:,4);
34 | R_sq = data(:,5);
35 |
36 | figure
37 | plot(Iterator, MeanAE, 'LineWidth', 2.0); hold on;
38 | plot(Iterator, RMAE, 'LineWidth', 2.0); hold on;
39 | plot(Iterator, RMSE, 'LineWidth', 2.0); hold on;
40 | plot(Iterator, R_sq, 'LineWidth', 2.0); hold off;
41 | legend('MAE','MeanAE','RMSE', 'R^2');
42 | xlabel('Iterations');
43 |
44 | end
45 |
46 | end
47 |
48 |
49 | end
50 |
51 | function error_val = standard(X)
52 | % root mean squared error
53 | m = numel(X);
54 | mean_response = (1/m)*(sum(X));
55 | error_val = sqrt((1/(m))*(sum((X-mean_response).^2)));
56 | end
57 |
58 | function error_val = RMAE(X,Y)
59 | % relative maximum absolute error
60 | error_val = max(abs(X-Y)); %/standard(X);
61 |
62 | error_val = error_val/(max(Y)-min(Y));
63 | end
64 |
65 |
66 | function error_val = MeanAE(X,Y)
67 | % mean absolute error
68 | error_val = mean(abs(X-Y));
69 |
70 | error_val = error_val/(max(Y)-min(Y));
71 | end
72 |
73 |
74 | function error_val = RMSE(X,Y)
75 | % root mean squared error
76 | error_val = sqrt(mean(((X-Y).^2)));
77 |
78 | error_val = error_val/(max(Y)-min(Y));
79 | end
80 |
81 |
82 | function error_val = R_sq(X,Y)
83 | % R_sq score
84 | mean_response = mean(X);
85 | MSE_val = (sum((X-Y).^2));
86 |
87 | divisor = sum((X-mean_response).^2);
88 |
89 | error_val = 1-(MSE_val/divisor);
90 | end
--------------------------------------------------------------------------------
/src/OK_model.m:
--------------------------------------------------------------------------------
1 | classdef OK_model
2 | % Details: Class that defines an ordinary kriging surrogate model
3 | properties
4 | % Autocorrelation function
5 | auto_correlation_function;
6 | % Input samples
7 | X;
8 | % Output corresponding to inputs values
9 | Y;
10 | % Optimization technique to find hyperparamters
11 | theta_opti_technique;
12 | % Number of input samples
13 | m;
14 | % One-vector definition
15 | F;
16 | % Hyperparameter
17 | theta;
18 | % Autocorrelation matrix
19 | R;
20 | end
21 |
22 |
23 | methods
24 | %Constructor
25 | function obj=OK_model(af,x,y,opti, givenTheta)
26 | % Details: Constructor for ordinary Kriging class
27 | %
28 | % inputs:
29 | % af - Function handle for autocorrelation function
30 | % x - Input sample values
31 | % y - Output sample values
32 | % opti - optimization strategy for hyoeroarameters
33 | % givenTheta - (optional) Define ordinary Kriging model with specific
34 | % hyperparameter values
35 | %
36 | % outputs:
37 | % obj - Ordinary kriging class
38 |
39 | obj.auto_correlation_function = af;
40 | obj.X = x; % x = [x1 y1 z1; x2 y2 z2]
41 | obj.Y = y;
42 | obj.theta_opti_technique = opti;
43 |
44 | obj.m = size(x,1);
45 | obj.F = ones(size(1:obj.m))';
46 |
47 | if nargin<5
48 | obj.theta = optimize_theta(obj);
49 | else
50 | obj.theta = givenTheta;
51 | end
52 |
53 |
54 |
55 | obj.R = compute_R(obj, obj.theta);
56 | end
57 |
58 |
59 |
60 |
61 |
62 | function R = compute_R(obj, theta)
63 | % Details: Obtain autocorrelation matrix
64 | %
65 | % inputs:
66 | % obj - Ordinary kriging class object
67 | % theta - Hyperparameters
68 | %
69 | % outputs:
70 | % R - Autocorrelation matrix
71 |
72 | R = obj.auto_correlation_function(obj.X,obj.X, theta);
73 |
74 |
75 | if sum(isnan(R(:)))
76 | disp('NAN values')
77 | end
78 |
79 | end
80 |
81 |
82 |
83 | function beta_hat = compute_beta_hat(obj,R)
84 | % Details: Obtain a priori mean
85 | %
86 | % inputs:
87 | % obj - Ordinary kriging class object
88 | % R - Autocorrelation matrix
89 | %
90 | % outputs:
91 | % sigma_sq_hat - A priori mean
92 | beta_hat = ((obj.F'*(R\obj.F))\obj.F') * (R\obj.Y);
93 | end
94 |
95 | function sigma_sq_hat = compute_sigma_sq_hat(obj,R,beta_hat)
96 | % Details: Obtain a priori variance
97 | %
98 | % inputs:
99 | % obj - Ordinary kriging class object
100 | % R - Autocorrelation matrix
101 | % beta_hat - A priori mean
102 | %
103 | % outputs:
104 | % sigma_sq_hat - A priori variance
105 | sigma_sq_hat = (1/obj.m) *(obj.Y- obj.F*beta_hat)' * (R\(obj.Y- obj.F*beta_hat));
106 | end
107 |
108 |
109 | function r0 = compute_r0(obj,theta,x0)
110 | % Details: Obtain r0 autocorrelation vector
111 | %
112 | % inputs:
113 | % obj - Ordinary kriging class object
114 | % theta - Hyperparameters
115 | % x0 - Input value
116 | %
117 | % outputs:
118 | % r0 - r0 autocorrelation vector
119 | r0= obj.auto_correlation_function(obj.X,x0,theta);
120 |
121 | end
122 |
123 | function mu_hat = compute_mu_hat(obj,R,beta_hat,x0,theta)
124 | % Details: Obtain the prediction mean
125 | %
126 | % inputs:
127 | % obj - Ordinary kriging class object
128 | % R - Autocorrelation matrix
129 | % beta_hat - A priori mean
130 | % x0 - Input value to obtain the mean for
131 | % theta - Hyperparameters
132 | %
133 | % outputs:
134 | % mu_hat - A posteriori mean prediction
135 |
136 | r0 = compute_r0(obj,theta,x0);
137 |
138 | mu_hat = beta_hat + r0' * (R\(obj.Y - obj.F*beta_hat));
139 |
140 | end
141 |
142 | function sigma_Y_sq_hat = compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x0,theta,R)
143 | % Details: Obtain the prediction variance
144 | %
145 | % inputs:
146 | % obj - Ordinary kriging class object
147 | % sigma_sq_hat - A priori variance
148 | % x0 - Input value to obtain variance for
149 | % theta - Hyperparameter value
150 | % R - Autocorrelation matrix
151 | %
152 | % outputs:
153 | % sigma_Y_sq_hat - A posteriori variance prediction
154 |
155 | r0 = compute_r0(obj,theta,x0);
156 | u0 = obj.F' * (R\r0) - 1;
157 | sigma_Y_sq_hat = sigma_sq_hat * (1 - r0' * (R\r0) + u0 * ((obj.F' * (R\obj.F))\u0));
158 |
159 |
160 | end
161 |
162 | function theta = optimize_theta(obj)
163 | % Details: Define the optimization process for the
164 | % hyperparameters
165 | %
166 | % inputs:
167 | % obj - Ordinary kriging class object
168 | %
169 | % outputs:
170 | % theta - Optimized hyperaparameter vector
171 |
172 | AA = [];
173 | b = [];
174 | Aeq = [];
175 | beq = [];
176 | n = numel(obj.X(1,:));
177 | % disp(strcat('Dimension: ',num2str(n)));
178 |
179 |
180 | n = size(obj.X,2);
181 |
182 | for k=1:n
183 | iter =1;
184 | clear distance
185 | for i=1:obj.m
186 | for j=1:obj.m
187 | if ~(i == j)
188 | distance(iter) = abs(obj.X(i,k) - obj.X(j,k));
189 | iter = iter +1;
190 | end
191 | end
192 | end
193 | max_distance = max(distance);
194 | min_distance = min(distance);
195 |
196 | lb(k) = 0.0005*min_distance;
197 | if lb(k) == 0.0
198 | lb(k) = 10^(-5);
199 | end
200 | ub(k) = 10*max_distance;
201 | end
202 |
203 | fun = @obj.computeMLE;
204 | theta = optimizationTools(fun,obj.theta_opti_technique,AA,b,Aeq,beq,lb,ub,[]);
205 |
206 |
207 | end
208 |
209 |
210 | function Psi = computeMLE(obj,theta)
211 | % Details: Define the Maximum Likelihood estimation for the
212 | % hyperparameters
213 | %
214 | % inputs:
215 | % obj - Ordinary kriging class object
216 | % theta - Hyperparameter input value
217 | %
218 | % outputs:
219 | % Psi - Value to be optimized
220 |
221 | R_matrix = compute_R(obj, theta);
222 | beta_hat = compute_beta_hat(obj,R_matrix);
223 |
224 | sigma_sq_hat = compute_sigma_sq_hat(obj,R_matrix,beta_hat);
225 |
226 | if cond(R_matrix)> 10^7
227 | Psi = 100000;
228 | else
229 | Psi = 0.5 * (obj.m*log(sigma_sq_hat) + log(det(R_matrix)));
230 | end
231 | end
232 |
233 |
234 |
235 |
236 | function [mu_hat,sigma_Y_sq_hat] = predict(obj,x0)
237 | % Details: Prediction of the surrogate model for input x0
238 | %
239 | % inputs:
240 | % obj - Ordinary kriging class object
241 | % x0 - Input value
242 | %
243 | % outputs:
244 | % mu_hat - Mean predition output
245 | % sigma_Y_sq_hat - Variance prediction output
246 |
247 |
248 | beta_hat = compute_beta_hat(obj,obj.R);
249 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
250 | for i=1:numel(x0(:,1))
251 | mu_hat(i) = compute_mu_hat(obj,obj.R,beta_hat,x0(i,:),obj.theta);
252 | sigma_Y_sq_hat(i) = compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x0(i,:),obj.theta,obj.R);
253 |
254 | alpha2 = 0.05;
255 | z_0p95_u(i) = mu_hat(i) + norminv(1- (alpha2/2)) *sqrt(sigma_Y_sq_hat(i));
256 | z_0p95_l(i) = mu_hat(i) - norminv(1- (alpha2/2)) *sqrt(sigma_Y_sq_hat(i));
257 |
258 | end
259 |
260 |
261 | end
262 |
263 |
264 | function x_new = adaptive_sampling(obj,method,A,strategy)
265 | % Details: Choosing the right function to create new sample based on
266 | % user input
267 | %
268 | % inputs:
269 | % obj - Ordinary kriging class object
270 | % method - String defining the adaptive technique
271 | % A - Definition of parametric space
272 | % strategy - Optimization technique to be used
273 | %
274 | % outputs:
275 | % x_new - New found sample point
276 |
277 | addpath('adaptive_techniques')
278 | if strcmp(method,'SSA')
279 | x_new = SSA_function(obj,A,strategy);
280 | elseif strcmp(method,'CVVor')
281 | x_new = CVVor_function(obj,A);
282 | elseif strcmp(method,'ACE')
283 | x_new = ACE_function(obj,A);
284 | elseif strcmp(method,'MIPT')
285 | x_new = MIPT_function(obj,A);
286 | elseif strcmp(method,'LOLA')
287 | x_new = LOLA_function(obj,A);
288 | elseif strcmp(method,'AME')
289 | x_new = AME_function(obj,A,strategy);
290 | elseif strcmp(method,'MEPE')
291 | x_new = MEPE_function(obj,A,strategy);
292 | elseif strcmp(method,'MASA')
293 | x_new = MASA_function(obj,A);
294 | elseif strcmp(method,'SFVCT')
295 | x_new = SFVCT_function(obj,A,strategy);
296 | elseif strcmp(method,'WAE')
297 | x_new = WAE_function(obj,A,strategy);
298 | elseif strcmp(method,'TEAD')
299 | x_new =TEAD_function(obj,A);
300 | elseif strcmp(method,'LIP')
301 | x_new = LIP_function(obj,A,strategy);
302 | elseif strcmp(method,'EI')
303 | x_new = EI_function(obj,A,strategy);
304 | elseif strcmp(method,'EIGF')
305 | x_new = EIGF_function(obj,A,strategy);
306 |
307 | elseif strcmp(method,'WEI')
308 | x_new = WEI_function(obj,A);
309 | elseif strcmp(method,'MSD')
310 | x_new = MSD_function(obj,A);
311 | elseif strcmp(method,'Jin_CV')
312 | x_new = Jin_CV_function(obj,A,strategy);
313 | elseif strcmp(method,'QBC_Jackknifing')
314 | x_new = QBC_Jackknifing_function(obj,A);
315 | end
316 |
317 | end
318 |
319 |
320 | end
321 |
322 |
323 |
324 |
325 | end
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
--------------------------------------------------------------------------------
/src/TPLHD/createTPLHD.m:
--------------------------------------------------------------------------------
1 | function X = createTPLHD(seed, ns, npStar, ndStar, nv)
2 | % Details: Create TPLHD design
3 | %
4 | % inputs:
5 | % seed - initial seed design (points within 1 and ns)
6 | % ns - number of points in the seed design
7 | % npStar - number of points of the Latin hypercube (LH)
8 | % nd - number of divisions of the LH
9 | % nv - number of variables in the LH
10 | %
11 | % outputs:
12 | % X - Latin hypercube design created by the translational
13 | % propagation algorithm
14 | % we warn that this function has to be properly translated to other
15 | % programming languages to avoid problems with memory allocation
16 | X = seed;
17 | d = ones(1, nv); % just for memory allocation
18 | for c1 = 1 : nv % shifting one direction at a time
19 | seed = X; % update seed with the latest points added
20 | d(1 : (c1 - 1)) = ndStar^(c1 - 2);
21 | d(c1) = npStar/ndStar;
22 | d((c1 + 1) : end) = ndStar^(c1 - 1);
23 | for c2 = 2 : ndStar % fill each of the divisions
24 | ns = length(seed(:,1)); % update seed size
25 | for c3 = 1 : ns
26 | seed(c3,:) = seed(c3,:) + d;
27 | end
28 | X = vertcat(X, seed);
29 | end
30 | end
31 | return
32 |
--------------------------------------------------------------------------------
/src/TPLHD/reshapeSeed.m:
--------------------------------------------------------------------------------
1 | function seed = reshapeSeed(seed , ns, npStar, ndStar, nv)
2 | % inputs: seed - initial seed design (points within 1 and ns)
3 | % ns - number of points in the seed design
4 | % npStar - number of points of the Latin hypercube (LH)
5 | % nd - number of divisions of the LH
6 | % nv - number of variables in the LH
7 | % outputs: seed - seed design properly scaled
8 | if ns == 1
9 | seed = ones(1, nv); % arbitrarily put at the origin
10 | else
11 | uf = ns*ones(1, nv);
12 | ut = ( (npStar / ndStar) - ndStar*(nv - 1) + 1 )*ones(1, nv);
13 | rf = uf - 1;
14 | rt = ut - 1;
15 | a = rt./rf;
16 | b = ut - a.*uf;
17 | for c1 = 1 : ns
18 | seed(c1,:) = a.*seed(c1,:) + b;
19 | end
20 | seed = round(seed); % to make sure that the numbers are integer
21 | end
22 | return
23 |
24 |
--------------------------------------------------------------------------------
/src/TPLHD/resizeTPLHD.m:
--------------------------------------------------------------------------------
1 | function X = resizeTPLHD(X, npStar, np, nv)
2 | % inputs: X - initial Latin hypercube design
3 | % npStar - number of points in the initial X
4 | % np - number of points in the final X
5 | % nv - number of variables
6 | % outputs: X - final X, properly shrunk
7 | center = npStar*ones(1,nv)/2; % center of the design space
8 | % distance between each point of X and the center of the design space
9 | distance = zeros(npStar, 1);
10 | for c1 = 1 : npStar
11 | distance(c1) = norm( ( X(c1,:) - center) );
12 | end
13 | [dummy, idx] = sort(distance);
14 | X = X( idx(1:np), : ); % resize X to np points
15 | % re-establish the LH conditions
16 | Xmin = min(X);
17 | for c1 = 1 : nv
18 | % place X in the origin
19 | X = sortrows(X, c1);
20 | X(:,c1) = X(:,c1) - Xmin(c1) + 1;
21 | % eliminate empty coordinates
22 | flag = 0;
23 | while flag == 0;
24 | mask = (X(:,c1) ~= ([1:np]'));
25 | flag = isequal(mask,zeros(np,1));
26 | X(:,c1) = X(:,c1) - (X(:,c1) ~= ([1:np]'));
27 | end
28 | end
29 | return
30 |
--------------------------------------------------------------------------------
/src/TPLHD/scale.m:
--------------------------------------------------------------------------------
1 | function scaled_value = scale(c,d,x)
2 | % c lower, d upper
3 | scaled_value= c + (d-c) * x;
4 | end
5 |
6 |
--------------------------------------------------------------------------------
/src/TPLHD/scaled_TPLHD.m:
--------------------------------------------------------------------------------
1 | function S = scaled_TPLHD(n,lb,ub)
2 |
3 | S = tplhsdesign(n, numel(lb), 1, 1)./n;
4 | % S = [zeros(size(S(1,:))) ;S];
5 | for i=1:numel(lb)
6 | c = lb(i);
7 | d = ub(i);
8 |
9 | S(:,i) = scale(c,d,S(:,i));
10 | end
11 |
12 | end
13 |
14 |
--------------------------------------------------------------------------------
/src/TPLHD/tplhsdesign.m:
--------------------------------------------------------------------------------
1 | function X = tplhsdesign(np, nv, seed, ns)
2 | % inputs: np - number of points of the desired Latin hypercube (LH)
3 | % nv - number of variables in the LH
4 | % seed - initial seed design (points within 1 and ns)
5 | % ns - number of points in the seed design
6 | % outputs: X - Latin hypercube created using the translational
7 | % propagation algorithm
8 | % define the size of the TPLHD to be created first
9 | nd = ( np/ns)^( 1/nv ); % number of divisions, nd
10 | ndStar = ceil( nd );
11 | if (ndStar > nd)
12 | nb = ndStar^nv; % it is necessary to create a bigger TPLHD
13 | else
14 | nb = np/ns; % it is NOT necessary to create a bigger TPLHD
15 | end
16 | npStar = nb*ns; % size of the TPLHD to be created first
17 | % reshape seed to properly create the first design
18 | seed = reshapeSeed(seed , ns, npStar, ndStar, nv);
19 | % create TPLHD with npStar points
20 | X = createTPLHD(seed, ns, npStar, ndStar, nv);
21 | % resize TPLH if necessary
22 | npStar > np;
23 | if (npStar > np)
24 | X = resizeTPLHD(X, npStar, np, nv);
25 | end
26 | return
27 |
28 |
--------------------------------------------------------------------------------
/src/adaptive_sampling_process.m:
--------------------------------------------------------------------------------
1 | function [stored_metamodels,single_errors] = adaptive_sampling_process(metamodel_ini,M,adaptive_method,A_sampling,number_of_adaptive_iterations, opti_strategy, iteration_number)
2 | % Details: Process of adding new samples until threshold reached
3 | %
4 | % inputs:
5 | % metamodel_ini - initial metamodel
6 | % M - respone function
7 | % adaptive_method - adaptive sampling technique
8 | % A_sampling - parameter space
9 | % number_of_adaptive_iterations - maximum number of iterations
10 | %
11 | % outputs:
12 | % stored_metamodels - stored metamodels
13 | % single_errors - errors of each adaptive step
14 |
15 | addpath('help_functions')
16 | global plotFlag
17 | iter = 1;
18 | stored_metamodels{1} = metamodel_ini;
19 | metamodel = metamodel_ini;
20 |
21 |
22 | lb = A_sampling(1,:);
23 | ub = A_sampling(2,:);
24 |
25 | lb_unity = zeros(size(lb));
26 | ub_unity = ones(size(ub));
27 | A_sampling_unity =[lb_unity;ub_unity];
28 |
29 |
30 | Y = metamodel_ini.Y;
31 | X = metamodel_ini.X;
32 |
33 | n_of_Variables = size(metamodel_ini.X,2);
34 | no_test_points = 5000 * n_of_Variables;
35 | test_points_unscaled = lhs_scaled(no_test_points,A_sampling(1,:),A_sampling(2,:));
36 | if n_of_Variables == 1
37 | test_points_unscaled = sort(test_points_unscaled);
38 | end
39 | test_points = scale_vector_to_unity(lb, ub, test_points_unscaled);
40 | if plotFlag == 1
41 |
42 |
43 | for i=1:no_test_points
44 | test_points_response(i) = M(test_points_unscaled(i,:));
45 | end
46 |
47 | minTestPoints = min(test_points_response);
48 | maxTestPoints = max(test_points_response);
49 |
50 | maxTestPointsLimit = maxTestPoints + abs(maxTestPoints)/10;
51 | minTestPointsLimit = minTestPoints - abs(minTestPoints)/10;
52 | minTestPointsLimitUpper = minTestPoints - abs(minTestPoints)/20;
53 |
54 |
55 |
56 |
57 | for i=1:no_test_points
58 | [metamodel_response(i),~] = metamodel_ini.predict(test_points(i,:));
59 | end
60 |
61 | if n_of_Variables == 1
62 | figure(1)
63 | plot(test_points, test_points_response,'--','LineWidth',2.0); hold on;
64 | plot(test_points,metamodel_response ,'LineWidth',2.0); hold on;
65 | scatter(X, minTestPointsLimitUpper*ones(numel(X),1),30,'k', 'filled'); hold on;
66 | hold off;
67 | % scatter(x_new, -10,60,'k', 'filled'); hold on;
68 |
69 | xlabel('$x$', 'Interpreter', 'Latex','FontSize',18);
70 | ylabel('$y$', 'Interpreter', 'Latex','FontSize',18);
71 | ST1 = ['$M$'];
72 | ST2 = ['$\hat{M}$'];
73 | ldg = legend({ ST1, ST2},'Interpreter', 'Latex','FontSize',12,'Location','north');
74 | ylim([minTestPointsLimit, maxTestPointsLimit])
75 | set(gca,'FontSize',18)
76 | drawnow()
77 | elseif n_of_Variables == 2
78 | xd = (max(test_points(:,1))-min(test_points(:,1)))/200;
79 | yd = (max(test_points(:,2))-min(test_points(:,2)))/200;
80 | [xq,yq] = meshgrid(min(test_points(:,1)):xd:max(test_points(:,1)), min(test_points(:,2)):yd:max(test_points(:,2)));
81 |
82 |
83 | h = figure(1) ;
84 | set(gcf, 'Renderer', 'painters', 'Position',[0 200 700 500]);
85 | subplot(2,1,1);
86 | [xq,yq] = meshgrid(min(test_points(:,1)):xd:max(test_points(:,1)), min(test_points(:,2)):yd:max(test_points(:,2)));
87 | vq = griddata(test_points(:,1),test_points(:,2),metamodel_response,xq,yq);
88 |
89 | s = surf(xq,yq,vq); hold on;
90 | hold off;
91 | xlabel('$x$', 'Interpreter', 'Latex','FontSize',18);
92 | ylabel('$y$', 'Interpreter', 'Latex','FontSize',18);
93 | zlabel('$\hat{M}$', 'Interpreter', 'Latex','FontSize',18);
94 | s.EdgeColor = 'none';
95 |
96 | ylim([0, 1])
97 | zlim([minTestPointsLimit, maxTestPointsLimit])
98 | xlim([0,1])
99 | set(gca,'FontSize',18)
100 |
101 | error = abs(metamodel_response- test_points_response)/(abs(max(test_points_response)-min(test_points_response)));
102 | min_error = 0;
103 | max_error = max(error);
104 |
105 | hold off;
106 |
107 | subplot(2,1,2);
108 |
109 | vq = griddata(test_points(:,1),test_points(:,2),error,xq,yq);
110 | s = surf(xq,yq,vq); hold on;
111 | scatter3(X(:,1),X(:,2), 1000*ones(size(X(:,1))),80,'k', 'filled'); hold on;
112 |
113 | xlabel('$x$', 'Interpreter', 'Latex','FontSize',18);
114 | ylabel('$y$', 'Interpreter', 'Latex','FontSize',18);
115 | % zlabel('$M_{Bu,2d}$', 'Interpreter', 'Latex','FontSize',18);
116 | s.EdgeColor = 'none';
117 | s.FaceAlpha = 0.9;
118 | ylim([0, 1])
119 | xlim([0,1])
120 | colormap(jet)
121 | colorbar
122 | caxis([min_error ,max_error ])
123 | % h( 'Position', [10 10 900 600])
124 | % h.Position = [10 10 700 600];
125 | set(gcf, 'Renderer', 'painters', 'Position',[10 10 700 600]);
126 | view(2)
127 | set(gca,'FontSize',18)
128 |
129 | hold off;
130 | drawnow()
131 | end
132 |
133 | end
134 |
135 | data_errors = Error_Saver();
136 |
137 |
138 | if strcmp(adaptive_method,'AME')
139 | global gamma_index
140 | global AMEpattern
141 | AMEpattern = [0,0.5,1.0,100];
142 | gamma_index = 1;
143 | elseif strcmp(adaptive_method,'MEPE')
144 | global MEPE_q
145 | MEPE_q = 1;
146 | end
147 |
148 | while (iter <= number_of_adaptive_iterations)
149 | clear y_new x_new
150 | x_new = metamodel.adaptive_sampling(adaptive_method,A_sampling_unity, opti_strategy);
151 |
152 | ST = [adaptive_method,' Iteration_no: ', num2str(iteration_number), ' m = ', num2str(size(X,1)+1),' New found point: ', num2str(x_new)];
153 | disp(ST);
154 |
155 | x_new_scaled = scale_vector_from_unity(lb,ub,x_new);
156 |
157 |
158 | for ss=1:size(x_new_scaled,1)
159 | y_new(ss,1) = M(x_new_scaled(ss,:));
160 | end
161 |
162 | Y = [Y; y_new];
163 | X = [X; x_new];
164 |
165 | metamodel = OK_model(metamodel.auto_correlation_function,X,Y,metamodel.theta_opti_technique);
166 |
167 |
168 | iter = iter+1;
169 |
170 | %% Errors
171 |
172 | for i=1:no_test_points
173 | test_points_response(i) = M(test_points_unscaled(i,:));
174 | end
175 |
176 |
177 |
178 |
179 | for i=1:no_test_points
180 | [metamodel_response(i),~] = metamodel.predict(test_points(i,:));
181 | end
182 |
183 |
184 | if plotFlag == 1
185 | if n_of_Variables == 1
186 | figure(1)
187 | plot(test_points, test_points_response,'--','LineWidth',2.0); hold on;
188 | plot(test_points,metamodel_response ,'LineWidth',2.0); hold on;
189 | scatter(X, minTestPointsLimitUpper*ones(numel(X),1),30,'k', 'filled'); hold on;
190 | scatter(x_new, minTestPointsLimitUpper,60,'r', 'filled'); hold off;
191 | xlabel('$x$', 'Interpreter', 'Latex','FontSize',18);
192 | ylabel('$y$', 'Interpreter', 'Latex','FontSize',18);
193 | ST1 = ['$M$'];
194 | ST2 = ['$\hat{M}$'];
195 | ldg = legend({ ST1, ST2},'Interpreter', 'Latex','FontSize',12,'Location','north');
196 | ylim([minTestPointsLimit, maxTestPointsLimit])
197 | set(gca,'FontSize',18)
198 | drawnow()
199 | elseif n_of_Variables == 2
200 | vq1 = griddata(test_points(:,1),test_points(:,2),metamodel_response',xq,yq);
201 | h = figure(1) ;
202 |
203 | subplot(2,1,1);
204 | % cla(h1);
205 |
206 | s = surf(xq,yq,vq1); hold on;
207 | s.EdgeColor = 'none';
208 | xlabel('$x$', 'Interpreter', 'Latex','FontSize',18);
209 | ylabel('$y$', 'Interpreter', 'Latex','FontSize',18);
210 | zlabel('$\hat{M}$', 'Interpreter', 'Latex','FontSize',18);
211 | ylim([0, 1])
212 | zlim([minTestPointsLimit, maxTestPointsLimit])
213 | xlim([0,1])
214 | set(gca,'FontSize',18)
215 | hold off;
216 |
217 |
218 | error = abs(metamodel_response- test_points_response)/(abs(max(test_points_response)-min(test_points_response))) ;
219 |
220 | vq2 = griddata(test_points(:,1),test_points(:,2),error',xq,yq);
221 | subplot(2,1,2);
222 |
223 |
224 | s = surf(xq,yq,vq2); hold on;
225 | scatter3(X(:,1),X(:,2), 1000*ones(size(X(:,1))),80,'k', 'filled'); hold on;
226 | scatter3(x_new(:,1),x_new(:,2), 1200,150,'r', 'filled'); hold on;
227 | xlabel('$x$', 'Interpreter', 'Latex','FontSize',18);
228 | ylabel('$y$', 'Interpreter', 'Latex','FontSize',18);
229 | % zlabel('$M_{Bu,2d}$', 'Interpreter', 'Latex','FontSize',18);
230 | s.EdgeColor = 'none';
231 | s.FaceAlpha = 0.9;
232 | ylim([0, 1])
233 | xlim([0,1])
234 | %colormap(jet)
235 | colorbar
236 | caxis([min_error ,max_error ])
237 | % h( 'Position', [10 10 900 600])
238 | % h.Position = [10 10 700 600];
239 | set(gcf, 'Renderer', 'painters', 'Position',[10 10 700 600]);
240 | view(2)
241 | set(gca,'FontSize',18)
242 | hold off;
243 |
244 | drawnow()
245 | end
246 | end
247 |
248 |
249 | data_errors=data_errors.update(metamodel.m,test_points_response , metamodel_response );
250 |
251 |
252 |
253 | ST = [adaptive_method,' Iteration_no: ', num2str(iteration_number), ' m = ', num2str(size(X,1)),' NMAE: ', num2str(data_errors.error_data.MAE(end))];
254 | disp(ST);
255 | ST =[adaptive_method,' Iteration_no: ', num2str(iteration_number), ' m = ', num2str(size(X,1)),' NMaxAE: ', num2str(data_errors.error_data.RMAE(end))];
256 | disp(ST);
257 | ST =[adaptive_method,' Iteration_no: ', num2str(iteration_number), ' m = ', num2str(size(X,1)),' NRMSE: ', num2str(data_errors.error_data.RMSE(end))];
258 | disp(ST);
259 | ST =[adaptive_method,' Iteration_no: ', num2str(iteration_number), ' m = ', num2str(size(X,1)),' R^2: ', num2str(data_errors.error_data.R_sq(end))];
260 | disp(ST);
261 | fprintf('\n');
262 |
263 | stored_metamodels{iter} = metamodel;
264 | single_errors = {adaptive_method,data_errors};
265 |
266 | end
267 |
268 |
269 | end
270 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/ACE_function.m:
--------------------------------------------------------------------------------
1 | %% ACcumulative Error (ACE)
2 | % Li, Genzi, Vikrant Aute, and Shapour Azarm. "An accumulative error based adaptive design of experiments for offline metamodeling."
3 | % Structural and Multidisciplinary Optimization 40.1-6 (2010): 137.
4 | function x_new = ACE_function(obj,A)
5 | %%
6 | % Details: Obtain new sample point via ACE algorithm
7 | %
8 | % inputs:
9 | % obj - Ordinary kriging class object
10 | % A - Definition of parametric space
11 | %
12 | % outputs:
13 | % x_new - New sample point
14 |
15 | AA = [];
16 | b = [];
17 | Aeq = [];
18 | beq = [];
19 | lb = A(1,:);
20 | ub = A(2,:);
21 | eLOO = eL00_function(obj);
22 |
23 | [alpha,d] = ACE_alpha(obj);
24 |
25 | fun = @(x)ACE_optimize(obj,alpha,eLOO,x);
26 | nonlcon = @(x)ACE_con(obj,d,x);
27 |
28 |
29 | strategy = 'MS';
30 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,nonlcon);
31 |
32 |
33 | end
34 |
35 | function eLOO_perPoint = eL00_function(obj)
36 | %%
37 | % Details: Define leave-one-out error for each sample point
38 | %
39 | % inputs:
40 | % obj - Ordinary kriging class object
41 | %
42 | % outputs:
43 | % eLOO_perPoint - Array of leave-one-out error values
44 |
45 |
46 | eLOO_perPoint = zeros(obj.m,1);
47 | for i=1:obj.m
48 | Xp = obj.X;
49 | Xp(i,:) = [];
50 | Yp = obj.Y;
51 | Yp(i) = [];
52 | M_hatp = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
53 |
54 | y = obj.Y(i);
55 | [mu_hatp,~] = predict(M_hatp,obj.X(i,:));
56 |
57 | eLOO_perPoint(i) = norm(y - mu_hatp);
58 | end
59 | end
60 |
61 | function E_min = ACE_optimize(obj,alpha,eLOO, x)
62 | %%
63 | % Details: Define the optimization problem for ACE
64 | %
65 | % inputs:
66 | % obj - Ordinary kriging class object
67 | % alpha - Parameter value
68 | % eLOO - Array of leave-one-out error values
69 | % x - input for optimization
70 | %
71 | % outputs:
72 | % E_min - Value to be minimized
73 |
74 | E = 0;
75 | for i=1:obj.m
76 | E = E+ eLOO(i) * exp( - alpha * norm(obj.X(i,:) - x));
77 | end
78 |
79 | E_min = - E;
80 | end
81 |
82 |
83 | function [alpha, d] = ACE_alpha(obj)
84 | %%
85 | % Details: Obtain the alpha-value and the cluster threshold
86 | %
87 | % inputs:
88 | % obj - Ordinary kriging class object
89 | %
90 | % outputs:
91 | % alpha - alpha value needed for optimization problem
92 | % d - distance criterion
93 |
94 | min_distances= zeros(obj.m,1);
95 | for i=1:obj.m
96 | clear distance_min
97 | iter = 1;
98 | distance_min = zeros(obj.m-1,1);
99 | for j=1:obj.m
100 |
101 | if ~(i==j)
102 | distance_min(iter) = norm(obj.X(i,:) - obj.X(j,:));
103 | iter = iter +1;
104 | end
105 | end
106 | min_distances(i) = min(distance_min);
107 | end
108 |
109 | max_distance = max(min_distances);
110 |
111 | DOI_distance = 0.5 * max_distance;
112 | worst_case_DOI = 10^(-5);
113 | alpha = -log(worst_case_DOI) / DOI_distance;
114 |
115 | d= 0.5 *mean(min_distances);
116 |
117 | end
118 |
119 |
120 | function [c, ceq] = ACE_con(obj,d,x)
121 | %%
122 | % Details: Define the nonlinear constraint
123 | %
124 | % inputs:
125 | % obj - Ordinary kriging class object
126 | % d - distance constraint value
127 | % x - input value
128 | %
129 | % outputs:
130 | % ceq - Array of equality constraints
131 | % c - Array of inequality constraints
132 |
133 | ceq = [];
134 |
135 | for i=1:obj.m
136 | c(i) = d - norm(obj.X(i,:) - x);
137 | end
138 | end
139 |
140 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/AME_function.m:
--------------------------------------------------------------------------------
1 | %% Adaptive Maximum Entropy (AME)
2 | % Liu, Haitao, et al. "An adaptive Bayesian sequential sampling approach for global metamodeling."
3 | % Journal of Mechanical Design 138.1 (2016): 011404.
4 | function x_new = AME_function(obj,A,strategy)
5 | % Details: Starting function for AME sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | global gamma_index
16 | global AMEpattern
17 |
18 | global theta_or_not
19 | eloos = AME_eL00_function(obj);
20 | nu = ( (1/max(eloos)) .* eloos ).^(AMEpattern(gamma_index));
21 |
22 |
23 | R_adj = zeros(obj.m,obj.m);
24 |
25 | %theta2 = optimize_theta2(obj, nu);
26 |
27 | for i=1:obj.m
28 | for j=1:obj.m
29 | R_adj(i,j) = bayesian_autocorrelation_Matern(obj.X(i,:),obj.X(j,:),nu(i),nu(j),obj.theta );
30 | end
31 | end
32 |
33 | % obj.R = R_adj;
34 | R_adj = obj.R;
35 | beta_hat = compute_beta_hat(obj,obj.R);
36 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
37 |
38 | fun =@(x) adj_r0(obj,x,R_adj,nu, sigma_sq_hat);
39 | AA = [];
40 | b = [];
41 | Aeq = [];
42 | beq = [];
43 | lb = A(1,:);
44 | ub = A(2,:);
45 |
46 | strategy = 'MS';
47 |
48 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,[]);
49 |
50 | gamma_index = gamma_index+1;
51 | if gamma_index > numel(AMEpattern)
52 | gamma_index = 1;
53 | end
54 | %disp(AMEpattern(gamma_index))
55 |
56 |
57 | end
58 |
59 |
60 | function eLOO_perPoint = AME_eL00_function(obj)
61 | % Details: Obtains leave-one-out error value per sample point
62 | %
63 | % inputs:
64 | % obj - Ordinary kriging class object
65 | %
66 | % outputs:
67 | % eLOO_perPoint - Array of leave-one-out error values
68 |
69 | eLOO_perPoint = zeros(obj.m,1);
70 |
71 | for i=1:obj.m
72 |
73 | Xp = obj.X;
74 |
75 | Xp(i,:) = [];
76 |
77 | Yp = obj.Y;
78 |
79 | Yp(i) = [];
80 |
81 | M_hatp = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique, obj.theta);
82 |
83 |
84 |
85 | y = obj.Y(i);
86 |
87 | [mu_hatp,~] = predict(M_hatp,obj.X(i,:));
88 |
89 |
90 |
91 | eLOO_perPoint(i) = norm(y - mu_hatp);
92 |
93 | end
94 |
95 | end
96 |
97 |
98 | function AME_function_opti = adj_r0(obj,x,R,nu, sigma_sq)
99 | % Details: Function that needs to be optimized for AME.
100 | %
101 | % inputs:
102 | % obj - Ordinary kriging class object
103 | % x - Point in sample space
104 | % R - Given autocorrelation matrix
105 | % sigma_sq - Variance
106 | %
107 | % outputs:
108 | % AME_function_opti - Function output to be optimized
109 |
110 | %r0 = zeros(obj.m,1);
111 |
112 | Idx = knnsearch(obj.X,x);
113 |
114 | r02 = zeros(obj.m,1);
115 | for i=1:obj.m
116 | r02(i,1) =bayesian_autocorrelation_Matern(x,obj.X(i,:),nu(Idx),nu(i),obj.theta);
117 | end
118 | OptiMatrix = [R r02;
119 | r02' 1];
120 |
121 | AME_function_opti = -det(OptiMatrix);
122 |
123 |
124 |
125 | end
126 |
127 | function R = bayesian_autocorrelation_Matern(x1,x2,nu1,nu2,theta )
128 | %%
129 | % Details: Define altered Matern 3/2 autocorrelation function
130 | %
131 | % inputs:
132 | % x1 - First input
133 | % x2 - Second input
134 | % nu1 - First alteration value
135 | % nu2 - Second alteration value
136 | % theta - Hyperparamter vector
137 | %
138 | % outputs:
139 | % R - Autocorrelation value
140 |
141 | n = numel(x1);
142 |
143 |
144 | R_val = 1;
145 | for i=1:n
146 | r = x1(i)-x2(i);
147 | %kval = (sqrt(3)*abs(r))/theta(p);
148 | kval = (sqrt(3)*abs(r))*(theta(i)*nu1*nu2);
149 | val = (1+ kval)*exp(-kval);
150 | R_val = R_val* val;
151 | end
152 | R = R_val;
153 |
154 |
155 |
156 | end
157 |
158 |
159 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/CVVor_function.m:
--------------------------------------------------------------------------------
1 | %% Cross-Validation-Voronoi (CVVOR)
2 | % Xu, Shengli, et al. "A robust error-pursuing sequential sampling approach for global metamodeling based on voronoi diagram and cross validation."
3 | % Journal of Mechanical Design 136.7 (2014): 071009.
4 | function x_new = CVVor_function(obj,A)
5 | % Details: Starting function for CVVor sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | %
11 | % outputs:
12 | % x_new - New found sample point
13 |
14 | addpath('help_functions')
15 | lb = A(1,:);
16 | ub = A(2,:);
17 | C = randomVoronoi(obj.X,lb,ub);
18 |
19 |
20 | eLOO_perPoint = eL00_function(obj);
21 |
22 | [~,k] = max(eLOO_perPoint);
23 |
24 | P_sensitive = C{k,1};
25 | maxDistance = -inf;
26 | for i=2:size(C{k,2},1)
27 | distance = norm(C{k,2}(i,:)-P_sensitive);
28 | if distance > maxDistance
29 | x_new = C{k,2}(i,:);
30 | maxDistance = distance;
31 | end
32 | end
33 |
34 |
35 | end
36 |
37 | function eLOO_perPoint = eL00_function(obj)
38 | % Details: Obtains leave-one-out error value per sample point
39 | %
40 | % inputs:
41 | % obj - Ordinary kriging class object
42 | %
43 | % outputs:
44 | % eLOO_perPoint - Array of leave-one-out error values
45 |
46 | eLOO_perPoint = zeros(obj.m,1);
47 | for i=1:obj.m
48 | Xp = obj.X;
49 | Xp(i,:) = [];
50 | Yp = obj.Y;
51 | Yp(i) = [];
52 | M_hatp = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
53 |
54 | y = obj.Y(i);
55 | [mu_hatp,~] = predict(M_hatp,obj.X(i,:));
56 |
57 | eLOO_perPoint(i) = norm(y - mu_hatp);
58 | end
59 | end
60 |
61 |
62 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/EIGF_function.m:
--------------------------------------------------------------------------------
1 | %% Expected improvement for global fit (EIGF)
2 | % Lam, Chen Quin. "Sequential adaptive designs in computer experiments for response surface model fit."
3 | % Diss. The Ohio State University, 2008.
4 | function x_new = EIGF_function(obj,A, strategy)
5 | % Details: Starting function for EIGF sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | fun = @(x) adaptive_EIGF(obj,x);
16 | AA = [];
17 | b = [];
18 | Aeq = [];
19 | beq = [];
20 | n = numel(A(1,:));
21 | lb = A(1,:);
22 | ub = A(2,:);
23 |
24 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,[]);
25 | end
26 |
27 | function EIGF_min = adaptive_EIGF(obj, x)
28 | % Details: EIGF optimization function.
29 | %
30 | % inputs:
31 | % obj - Ordinary kriging class object
32 | % x - Input value
33 | %
34 | % outputs:
35 | % EIGF_min - Value to be optimized
36 |
37 | beta_hat = compute_beta_hat(obj,obj.R);
38 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
39 |
40 | mu_hat = compute_mu_hat(obj,obj.R,beta_hat,x,obj.theta);
41 | sigma_Y_sq_hat = compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x,obj.theta,obj.R);
42 |
43 |
44 | k = dsearchn(obj.X,x);
45 | EIGF = (mu_hat - obj.Y(k))^(2) + sigma_Y_sq_hat;
46 |
47 | EIGF_min=-EIGF;
48 | end
49 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/EI_function.m:
--------------------------------------------------------------------------------
1 | %% Expected improvement (EI)
2 | % Jones, Donald R., Matthias Schonlau, and William J. Welch. "Efficient global optimization of expensive black-box functions."
3 | % Journal of Global optimization 13.4 (1998): 455-492.
4 | function x_new = EI_function(obj,A, strategy)
5 | % Details: Starting function for EI sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | fun = @(x) adaptive_EI(obj,x);
16 | AA = [];
17 | b = [];
18 | Aeq = [];
19 | beq = [];
20 | n = numel(A(1,:));
21 | lb = A(1,:);
22 | ub = A(2,:);
23 |
24 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,[]);
25 | end
26 |
27 | function EI_min = adaptive_EI(obj,x)
28 | % Details: EI optimization function.
29 | %
30 | % inputs:
31 | % obj - Ordinary kriging class object
32 | % x - Input value
33 | %
34 | % outputs:
35 | % EI_min - Value to be optimized
36 |
37 | beta_hat = compute_beta_hat(obj,obj.R);
38 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
39 |
40 | mu_hat = compute_mu_hat(obj,obj.R,beta_hat,x,obj.theta);
41 | sigma_Y_sq_hat = compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x,obj.theta,obj.R);
42 |
43 |
44 | ymin = min(obj.Y);
45 | val = (ymin-mu_hat)/sqrt(sigma_Y_sq_hat);
46 | EI = (ymin - mu_hat) * normcdf(val) + sqrt(sigma_Y_sq_hat) * normpdf(val);
47 |
48 | EI_min=-EI;
49 |
50 |
51 | end
52 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/Jin_CV_function.m:
--------------------------------------------------------------------------------
1 | %% Jin's Cross validation approach (CVA)
2 | % Jin, Ruichen, Wei Chen, and Agus Sudjianto. "On sequential sampling for global metamodeling in engineering design."
3 | % ASME 2002 International Design Engineering Technical Conferences and Computers and Information in Engineering Conference. American Society of Mechanical Engineers, 2002.
4 | function x_new = Jin_CV_function(obj,A,strategy)
5 | % Details: Starting function for CVA sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 |
16 | for i=1:obj.m
17 | Xp = obj.X;
18 | Xp(i,:) = [];
19 | Yp = obj.Y;
20 | Yp(i) = [];
21 | M_hatps{i} = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
22 | end
23 |
24 | fun = @(x) adaptive_Jin_CV(M_hatps,x);
25 | AA = [];
26 | b = [];
27 | Aeq = [];
28 | beq = [];
29 | n = numel(A(1,:));
30 | lb = A(1,:);
31 | ub = A(2,:);
32 |
33 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,[]);
34 | end
35 |
36 |
37 |
38 | function Jin_CV_min = adaptive_Jin_CV(obj,M_hatps,x)
39 | % Details: Optimization function definition for CVA sampling.
40 | %
41 | % inputs:
42 | % obj - Ordinary kriging class object
43 | % M_hatps - Leave-one-out surrgoate class
44 | % x - Input value
45 | %
46 | % outputs:
47 | % Jin_CV_min - Value to be optimized
48 |
49 | val=0;
50 | for i=1:obj.m
51 |
52 | [mu_hat,~] = predict(obj,x);
53 | [mu_hatp,~] = predict(M_hatps{i},x);
54 |
55 | val = val + (mu_hatp - mu_hat)^2;
56 | end
57 |
58 | e = sqrt((1/obj.m) * val);
59 |
60 | Jin_CV_min = - e * d(obj,x);
61 | end
62 |
63 | function min_distance = d(obj,x)
64 | % Details: Definition of the optimization constraint
65 | %
66 | % inputs:
67 | % obj - Ordinary kriging class object
68 | % x - Input value
69 | %
70 | % outputs:
71 | % min_distance - Constraint value
72 |
73 | for i=1:obj.m
74 | d(i) = norm(obj.X(i,:)-x);
75 | end
76 | min_distance = min(d);
77 | end
78 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/LIP_function.m:
--------------------------------------------------------------------------------
1 | %% Sampling with Lipschitz Criterion (LIP)
2 | % Lovison, Alberto, and Enrico Rigoni. "Adaptive sampling with a Lipschitz criterion for accurate metamodeling."
3 | % Communications in Applied and Industrial Mathematics 1.2 (2011): 110-126.
4 | function x_new = LIP_function(obj,A, strategy)
5 | % Details: Starting function for LIP sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | d = size(obj.X,2);
16 | lb = A(1,:);
17 | ub = A(2,:);
18 |
19 | % Delaunay triangulation of current sample points
20 | T = delaunayn(obj.X);
21 |
22 | % Find the maximum Lipschitz constant for each candidate point
23 | max_Lips = zeros(obj.m,1);
24 | for i=1:obj.m
25 | max_Lips(i)= find_max_Lip_value(obj,i,T);
26 | end
27 |
28 |
29 | % Generate candidate points with OLHD
30 | m_candidate=5000*d;
31 | x_cand=lhs_scaled(2*m_candidate,lb,ub);
32 |
33 |
34 | % Obtain the merit function for each candidate point and find
35 | % the candidate with the highest merit
36 | merit = 0;
37 |
38 | for i=1:size(x_cand,1)
39 | [Idx,radius] = knnsearch(obj.X, x_cand(i,:));
40 | merit_temp = max_Lips(Idx)*radius;
41 | if merit_temp > merit
42 | merit = merit_temp;
43 | x_temp = x_cand(i,:);
44 | end
45 | end
46 |
47 | % New sample point is candidate point with maximum merit
48 | x_new = x_temp;
49 |
50 | end
51 | function adj_nodes = find_adj_Delaunay_nodes(obj, index, T)
52 | % Details: Obtain adjacent nodes in Delaunay triangulation
53 | %
54 | % inputs:
55 | % obj - Ordinary kriging class object
56 | % index - Sample point index
57 | % T - Delaunay tringulation definition
58 | %
59 | % outputs:
60 | % adj_nodes - Adjacent nodes
61 |
62 | [neighbor_index,~] = find(T==index);
63 |
64 | neighbors = T(neighbor_index,:);
65 | adj_nodes = setdiff(unique(neighbors), index);
66 |
67 | end
68 |
69 | function max_Lip = find_max_Lip_value(obj,index,T)
70 | % Details: Find the maximum Lipschitz constant of the sample point given
71 | % by the index
72 | %
73 | % inputs:
74 | % obj - Ordinary kriging class object
75 | % index - Sample point index
76 | % T - Delaunay tringulation definition
77 | %
78 | % outputs:
79 | % max_Lip - Maximum Lipschitz constant
80 |
81 | % Obtain adjacent nodes in Delaunay triangulation
82 | adj_nodes = find_adj_Delaunay_nodes(obj, index, T);
83 | for i=1:numel(adj_nodes)
84 | L(i) = norm(obj.Y(index)-obj.Y(adj_nodes(i)))/ norm(obj.X(index,:) - obj.X(adj_nodes(i),:) );
85 | end
86 |
87 | max_Lip = max(L);
88 |
89 | end
90 |
91 |
92 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/LOLA_function.m:
--------------------------------------------------------------------------------
1 | %% LOLA-Voronoi (LOLA)
2 | % Crombecq, Karel, et al. "A novel hybrid sequential design strategy for global surrogate modeling of computer experiments."
3 | % SIAM Journal on Scientific Computing 33.4 (2011): 1948-1974.
4 | function x_new = LOLA_function(obj,A)
5 | %%
6 | % Details: Obtain new sample point via LOLA sampling algorithm
7 | %
8 | % inputs:
9 | % obj - Ordinary kriging class object
10 | % A - definition of parametric space
11 | %
12 | % outputs:
13 | % x_new - New sample point
14 |
15 |
16 | addpath('help_functions')
17 | lb = A(1,:);
18 | ub = A(2,:);
19 |
20 | % Obtain volume estimates
21 | C = randomVoronoi(obj.X,lb,ub);
22 | Vol = zeros(obj.m,1);
23 | for I_sample_P=1:obj.m
24 | Vol(I_sample_P) = size(C{I_sample_P,2},1);
25 | end
26 | abs_Vol= sum(Vol);
27 | Vol = (1/abs_Vol)*Vol;
28 |
29 |
30 | d = numel(lb);
31 | no_neighbors = 2 * d;
32 |
33 | for I_sample_P=1:obj.m
34 |
35 | % Find neighborhood
36 | p_ref = obj.X(I_sample_P,:);
37 | y_ref = obj.Y(I_sample_P);
38 | [p_neighbors, best_indexs] = find_best_p_neighbors(obj, p_ref, no_neighbors);
39 | data_neighbors{I_sample_P} =best_indexs;
40 | y_neighbors = obj.Y([best_indexs],:);
41 |
42 | % Gradient estimation
43 | Pr = zeros(no_neighbors, d);
44 | for i=1:d
45 | for j=1:no_neighbors
46 | Pr(j,i) = p_neighbors(j,i)-p_ref(i);
47 | end
48 | end
49 | g = linsolve(Pr,y_neighbors);
50 |
51 | % Obtain nonlinearity measure
52 | E(I_sample_P) =0.0;
53 | for j=1:no_neighbors
54 | E(I_sample_P) = E(I_sample_P)+ abs( y_neighbors(j,:) - (y_ref + g' *(p_neighbors(j,:) - p_ref)'));
55 | end
56 |
57 |
58 | end
59 |
60 | % Obtain H
61 | Esum = sum(E) ;
62 | H = zeros(obj.m,1);
63 | for I_sample_P=1:obj.m
64 | H(I_sample_P) = Vol(I_sample_P) + (E(I_sample_P) / Esum);
65 | end
66 |
67 | [~,I_H] = max(H);
68 |
69 | % Obtain new point with distance constraint
70 | maxdistance = - inf;
71 |
72 | p_ref_and_neighbors = [obj.X(I_H,:); obj.X([data_neighbors{I_H}],:)];
73 | for i=2:size(C{I_H,2},1)
74 | cumDistance = 0;
75 | for j=1:size(p_ref_and_neighbors,1)
76 | cumDistance = cumDistance + norm(C{I_H,2}(i,:) - p_ref_and_neighbors(j,:));
77 | end
78 | if cumDistance > maxdistance
79 | x_new = C{I_H,2}(i,:);
80 | end
81 | end
82 |
83 |
84 |
85 | end
86 |
87 |
88 |
89 | function [p_neighbors, best_indexs] =find_best_p_neighbors(obj, p_ref, no_neighbors)
90 | %%
91 | % Details: Find the best neighborhod points around sample p_ref
92 | %
93 | % inputs:
94 | % obj - Ordinary kriging class object
95 | % p_ref - Reference point to find best neighbors for
96 | % no_neighbors - number of neighbors to find
97 | %
98 | % outputs:
99 | % p_neighbors - Found neighborhood points
100 | % best_indexs - Indices of the p_neighbors
101 |
102 |
103 | d = size(p_ref,2);
104 |
105 | k = find(ismember(obj.X ,p_ref,'rows'));
106 |
107 |
108 |
109 | % X_P = norm(p_ref.*ones(size(obj.X)) , obj.X);
110 |
111 | for i=1:size(obj.X,1)
112 | X_P(i,1) = norm(p_ref - obj.X(i,:));
113 | if X_P(i,1) == 0
114 | X_P(i,1) =100;
115 | end
116 | end
117 |
118 | index_close = [];
119 | a = 0.15;
120 | while numel(index_close) < no_neighbors
121 | index_close = find(X_P < a*d);
122 | a = a+0.05;
123 |
124 | end
125 |
126 |
127 | C = nchoosek(index_close,no_neighbors);
128 |
129 | C_wo_k = C;
130 |
131 |
132 |
133 | Smax = -inf;
134 | best_indexs = 0;
135 | for i=1:size(C_wo_k,1)
136 | C = 0;
137 | for j=1:size(C_wo_k,2)
138 | C = C + norm(obj.X(C_wo_k(i,j),:));
139 | end
140 | C = (1/no_neighbors) * C;
141 |
142 | if d ==1
143 | pr1 = obj.X(C_wo_k(i,1),:);
144 | pr2 = obj.X(C_wo_k(i,2),:);
145 | R_val = 1 - (abs(pr1+pr2)/(abs(pr1)+abs(pr2)+abs(pr1 - pr2)));
146 | else
147 | A = 0;
148 | for ii=1:size(C_wo_k,2)
149 | min_distance = inf;
150 | for jj=1:size(C_wo_k,2)
151 | if ~ (ii==jj)
152 | distance = norm(obj.X(C_wo_k(i,ii),:) - obj.X(C_wo_k(i,jj),:));
153 | if distance < min_distance
154 | min_distance = distance;
155 | end
156 | end
157 | end
158 | A = A + min_distance;
159 |
160 |
161 | end
162 | A = (1/no_neighbors) * A;
163 | R_val = A/(sqrt(2) * C);
164 | end
165 |
166 | S = R_val/C;
167 | if S > Smax
168 | Smax = S;
169 | best_indexs = C_wo_k(i,:);
170 | end
171 | end
172 | p_neighbors = obj.X([best_indexs],:);
173 |
174 | end
--------------------------------------------------------------------------------
/src/adaptive_techniques/MASA_function.m:
--------------------------------------------------------------------------------
1 | %% Mixed Adaptive Sampling Algorithm (MASA)
2 | % Eason, John, and Selen Cremaschi. "Adaptive sequential sampling for surrogate model generation with artificial neural networks."
3 | % Computers & Chemical Engineering 68 (2014): 220-232.
4 | function x_new = MASA_function(obj,A)
5 | % Details: Starting function for MASA sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | %
11 | % outputs:
12 | % x_new - New found sample point
13 |
14 |
15 | % Create committee by LOOCV or different autocorrelation functions
16 | committee_type = 'AutoCorrelation';
17 | % committee_type = 'LOOCV';
18 |
19 | Committee = MASA_comittee(obj, committee_type);
20 | N_of_Committe = numel(Committee);
21 |
22 | % Create candidate points, user chosen 5000*dimensions
23 | addpath('help_functions')
24 | d = size(obj.X,2);
25 | C_candidatePoints = 5000 * d;
26 | lb = A(1,:);
27 | ub = A(2,:);
28 | CandidatePoints = scale_rand(C_candidatePoints,lb,ub);
29 |
30 | for j=1:C_candidatePoints
31 | normal_model_value = predict(obj,CandidatePoints(j,:));
32 | for i=1:N_of_Committe
33 | committee_model_value = predict(Committee{i},CandidatePoints(j,:));
34 | y_tilde(j,i) = N_of_Committe * normal_model_value - (N_of_Committe - 1) * committee_model_value;
35 | end
36 | k = dsearchn(obj.X,CandidatePoints(j,:));
37 | nndp(j) = norm(obj.X(k,:) - CandidatePoints(j,:));
38 | end
39 |
40 | for j=1:C_candidatePoints
41 | y_tilde_avg = mean(y_tilde(j,:));
42 |
43 | val = 0;
44 | for i=1:N_of_Committe
45 | val = val + (y_tilde(j,i) - y_tilde_avg)^2;
46 | end
47 | s_sq(j) = ( 1/(N_of_Committe * (N_of_Committe - 1))) * val;
48 | end
49 |
50 |
51 | for j=1:C_candidatePoints
52 | np(j) = (nndp(j)/max(nndp)) + (s_sq(j)/(max(s_sq)));
53 | end
54 |
55 | [~,I_c] = max(np);
56 | x_new = CandidatePoints(I_c,:);
57 |
58 | end
59 |
60 | function Committee = MASA_comittee(obj, committee_type)
61 | % Details: Create comittee of kriging models by LOOCV or Autocorrelation
62 | % function
63 | %
64 | % inputs:
65 | % obj - Ordinary kriging class object
66 | % committee_type - Definition of how to create committee
67 | %
68 | % outputs:
69 | % Committee - Cell array of committee members
70 |
71 | % Create comittee of kriging models by LOOCV
72 | if strcmp(committee_type,'LOOCV')
73 | N=obj.m;
74 | index = randperm( size(obj.X,1),N);
75 | for i=1:numel(index)
76 | Xp = obj.X;
77 | Xp(index(i),:) = [];
78 | Yp = obj.Y;
79 | Yp(index(i)) = [];
80 |
81 | M_hatp = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
82 | Committee{i} = M_hatp;
83 | end
84 | % Create comittee of kriging models by AutoCorrelation
85 | elseif strcmp(committee_type,'AutoCorrelation')
86 | M_hat_Cubic = OK_model(@Cubic_spline_matrix,obj.X,obj.Y,obj.theta_opti_technique);
87 | M_hat_Matern32 = OK_model(@Matern32_matrix,obj.X,obj.Y,obj.theta_opti_technique);
88 | M_hat_Matern52 = OK_model(@Matern52_matrix,obj.X,obj.Y,obj.theta_opti_technique);
89 | M_hat_ex_sq = OK_model(@R_sq_ex_matrix,obj.X,obj.Y,obj.theta_opti_technique);
90 | M_hat_ex = OK_model(@R_ex_matrix,obj.X,obj.Y,obj.theta_opti_technique);
91 |
92 |
93 | Committee = {M_hat_Cubic; M_hat_Matern32; M_hat_Matern52; M_hat_ex_sq; M_hat_ex };
94 | end
95 | end
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/MEPE_function.m:
--------------------------------------------------------------------------------
1 | %% Maximizing Expected Prediction Error (MEPE)
2 | %Liu, Haitao, Jianfei Cai, and Yew-Soon Ong. "An adaptive sampling approach for kriging metamodeling by maximizing expected prediction error."
3 | %Computers & Chemical Engineering 106 (2017): 171-182.
4 | function x_new = MEPE_function(obj,A,strategy)
5 | % Details: Starting function for MEPE sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | global MEPE_q
16 | global old_Model
17 | if MEPE_q == 1
18 | old_Model = obj;
19 | e_true = inf;
20 | elseif MEPE_q>1
21 | e_true = abs(old_Model.predict(obj.X(end,:))-obj.Y(end));
22 | old_Model = obj;
23 | end
24 |
25 | beta_hat = compute_beta_hat(obj,obj.R);
26 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
27 |
28 | sigma_Y_sq_hat_fun = @(x) compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x,obj.theta,obj.R);
29 |
30 |
31 | e_CV = e_CV_fast_function(obj);
32 | alpha = MEPE_alpha_function(obj,e_true, e_CV(end));
33 | fun =@(x) -( alpha * e_CV_nearestPoint_function(obj,e_CV, x) + (1-alpha) * sigma_Y_sq_hat_fun(x));
34 |
35 | AA = [];
36 | b = [];
37 | Aeq = [];
38 | beq = [];
39 | lb = A(1,:);
40 | ub = A(2,:);
41 |
42 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,[]);
43 | end
44 |
45 | function e_CV_fast = e_CV_fast_function(obj)
46 | % Details: Efficient way to obtain leave-one-out cross validation error as
47 | % defined in paper
48 | %
49 | % inputs:
50 | % obj - Ordinary kriging class object
51 | %
52 | % outputs:
53 | % e_CV_fast - Array of leave-one-out error values
54 |
55 | beta_hat = compute_beta_hat(obj,obj.R);
56 | H = obj.F *((obj.F' * obj.F)\obj.F');
57 | d = obj.Y - obj.F * beta_hat;
58 | inv_R = inv(obj.R);
59 | e_CV_fast = zeros(obj.m,1);
60 | for i=1:obj.m
61 |
62 | e_CV_fast(i) = ((inv_R(i,:) * ( d + H(:,i) * (d(i)/(1-H(i,i)))) ) / (inv_R(i,i)) )^2;
63 | end
64 | end
65 |
66 |
67 | function e_CV_nearestPoint = e_CV_nearestPoint_function(obj, e_CV, x)
68 | % Details: Utility function to find the leave-one-out error of the nearest
69 | % point
70 | %
71 | % inputs:
72 | % obj - Ordinary kriging class object
73 | % e_CV - Array of leave-one-out errors
74 | % x - Input value
75 | %
76 | % outputs:
77 | % e_CV_nearestPoint - Leave-one-out error at position x
78 |
79 | k = dsearchn(obj.X,x);
80 | e_CV_nearestPoint = e_CV(k);
81 | end
82 |
83 |
84 |
85 |
86 | function alpha = MEPE_alpha_function(obj,e_true, e_CV)
87 | % Details: Utility function to get the current alpha value
88 | %
89 | % inputs:
90 | % obj - Ordinary kriging class object
91 | % e_true - "True" leave-one-out errors
92 | % e_CV - Leave-one-out errors
93 | %
94 | % outputs:
95 | % alpha - alpha value
96 |
97 | global MEPE_q
98 |
99 | if MEPE_q == 1
100 | alpha = 0.5;
101 | elseif MEPE_q>1
102 | val = 0.5 * ((e_true^2) / e_CV);
103 | alpha = 0.99 * min(val,1);
104 | end
105 |
106 | MEPE_q = MEPE_q + 1;
107 | end
108 |
109 |
110 |
111 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/MIPT_function.m:
--------------------------------------------------------------------------------
1 | %% MC-intersite-proj-th (MIPT)
2 | % Crombecq, Karel, Eric Laermans, and Tom Dhaene. "Efficient space-filling and non-collapsing sequential design strategies for simulation-based modeling."
3 | % European Journal of Operational Research 214.3 (2011): 683-696.
4 | function x_new = MIPT_function(obj,A)
5 | % Details: Starting function for MIPT sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | %
11 | % outputs:
12 | % x_new - New found sample point
13 |
14 | addpath('TPLHD')
15 | n = numel(A(1,:));
16 | lb = A(1,:);
17 | ub = A(2,:);
18 | p = scaled_TPLHD(100 * obj.m,lb,ub);
19 |
20 | alpha = 0.5;
21 | dmin = (2*alpha)/ obj.m;
22 | MIPT_val = -inf;
23 |
24 | addpath('help_functions')
25 | for i=1:size(p,1)
26 | val = intersite_proj_th(dmin, obj.X,p(i,:));
27 | if val > MIPT_val
28 | MIPT_val = val;
29 | x_new = p(i,:);
30 | end
31 |
32 | end
33 |
34 | end
35 |
36 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/MSD_function.m:
--------------------------------------------------------------------------------
1 | %% Maximin Distance design (MSD)
2 | % Johnson, Mark E., Leslie M. Moore, and Donald Ylvisaker. "Minimax and maximin distance designs."
3 | % Journal of statistical planning and inference 26.2 (1990): 131-148.
4 | function x_new = MSD_function(obj,A)
5 | % Details: Starting function for MSD sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | %
11 | % outputs:
12 | % x_new - New found sample point
13 |
14 | k = numel(obj.theta);
15 | alpha = obj.theta;
16 | lb = A(1,:);
17 | ub = A(2,:);
18 | addpath('TPLHD')
19 | n = obj.m*100*k;
20 | Xc = lhsdesign_modified(n,lb,ub);
21 |
22 | XA = [Xc; obj.X];
23 |
24 | for i=1:size(Xc,1)
25 | iter =1;
26 | clear min_distance
27 | for j=1:(obj.m + size(Xc,1))
28 | if ~(isequal(Xc(i,:),XA(j,:)))
29 | min_distance(iter) = MSD_d_function(Xc(i,:),XA(j,:),obj.theta);
30 | iter = iter+1;
31 | end
32 | end
33 | distance(i) = min(min_distance);
34 | end
35 |
36 | [~,index] = max(distance);
37 |
38 | x_new = Xc(index,:);
39 | end
40 |
41 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/SFVCT_function.m:
--------------------------------------------------------------------------------
1 | %% Space-Filling Cross Validation Tradeoff (SFCVT)
2 | % Aute, Vikrant, et al. "Cross-validation based single response adaptive design of experiments for Kriging metamodeling of deterministic computer simulations."
3 | % Structural and Multidisciplinary Optimization 48.3 (2013): 581-605.
4 | function x_new = SFVCT_function(obj,A,strategy)
5 | % Details: Starting function for SFCVT sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | rel_elo = calculate_rel_elo(obj);
16 |
17 | e_model = OK_model(obj.auto_correlation_function,obj.X,rel_elo,strategy);
18 |
19 |
20 | AA = [];
21 | b = [];
22 | Aeq = [];
23 | beq = [];
24 |
25 | lb = A(1,:);
26 | ub = A(2,:);
27 |
28 | S = SFVCT_S(obj);
29 |
30 | fun = @(x)SFVCT_optimize(e_model,x);
31 | nonlcon = @(x)SFVCT_con(obj,S,x);
32 |
33 |
34 | strategy = 'MS';
35 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,nonlcon);
36 |
37 |
38 |
39 | end
40 |
41 | function elo_min = SFVCT_optimize(e_model,x)
42 | % Details: Function to be optimized.
43 | %
44 | % inputs:
45 | % e_model - Ordinary kriging class object
46 | % x - Input value
47 | %
48 | % outputs:
49 | % elo_min - Value to be optimized
50 |
51 | elo_min = - e_model.predict(x);
52 | end
53 |
54 | function rel_elo = calculate_rel_elo(obj)
55 | % Details: Obtain the relative leave-one-out error
56 | %
57 | % inputs:
58 | % e_model - Ordinary kriging class object
59 | %
60 | % outputs:
61 | % rel_elo - Array of relative leave-one-out errors
62 |
63 | rel_elo = zeros(obj.m,1);
64 | for i=1:obj.m
65 | Xp = obj.X;
66 | Xp(i,:) = [];
67 | Yp = obj.Y;
68 | Yp(i) = [];
69 | M_hatp = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
70 |
71 | y = obj.Y(i,:);
72 | [mu_hatp,~] = predict(M_hatp,obj.X(i,:));
73 |
74 | rel_elo(i,1) = (norm(y - mu_hatp));%/y;
75 | end
76 | end
77 |
78 | function [S] = SFVCT_S(obj)
79 | % Details: Get the SFVCT distance for the nonlinear constraint
80 | %
81 | % inputs:
82 | % obj - Ordinary kriging class object
83 | %
84 | % outputs:
85 | % S - Distance constraint value
86 |
87 |
88 | % This is faster than pdist2
89 | min_distances= zeros(obj.m,1);
90 | for i=1:obj.m
91 | clear distance_min
92 | iter = 1;
93 | distance_min = zeros(obj.m-1,1);
94 | for j=1:obj.m
95 |
96 | if ~(i==j)
97 | distance_min(iter) = norm(obj.X(i,:) - obj.X(j,:));
98 | iter = iter +1;
99 | end
100 | end
101 | min_distances(i) = min(distance_min);
102 | end
103 |
104 | % Check pdist2
105 | %min_distances = max(pdist2(obj.X,obj.X,'euclidean','Smallest',2));
106 |
107 |
108 | max_distance = max(min_distances);
109 |
110 | S= 0.5 *max_distance;
111 |
112 | end
113 |
114 | function [c, ceq] = SFVCT_con(obj,d,x)
115 | % Details: Define arrays needed for the constraint definition
116 | %
117 | % inputs:
118 | % obj - Ordinary kriging class object
119 | % d - Distance constraint value
120 | % x - Input value
121 | %
122 | % outputs:
123 | % c - Inequality constraint array
124 | % ceq - Equality constraint array
125 |
126 | ceq = [];
127 |
128 | for i=1:obj.m
129 | c(i) = d - norm(obj.X(i,:) - x);
130 | end
131 | end
132 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/SSA_function.m:
--------------------------------------------------------------------------------
1 | %% Smart Sampling Algorithm (SSA)
2 | % Garud, Sushant Suhas, Iftekhar A. Karimi, and Markus Kraft.
3 | % "Smart sampling algorithm for surrogate model development." Computers & Chemical Engineering 96 (2017): 103-114
4 | function x_new = SSA_function(obj,A,strategy)
5 | % Details: Starting function for SSA sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | % obtain crowding distance metric
16 | CMD = zeros(obj.m,1);
17 | for i=1:obj.m
18 | val = 0.0;
19 | for j=1:obj.m
20 | val = val + norm(obj.X(i,:)-obj.X(j,:))^2;
21 | end
22 | CMD(i) = val;
23 | end
24 |
25 | [~,p] = sort(CMD,'descend');
26 |
27 | AA = [];
28 | b = [];
29 | Aeq = [];
30 | beq = [];
31 | lb = A(1,:);
32 | ub = A(2,:);
33 |
34 | % Assume epsilon to 0.01. Value not given by the authors.
35 | epsilon = 0.01;
36 |
37 | T = 0;
38 | iter = 1;
39 | while ~all(T)
40 |
41 | clear diff Xp Yp
42 | Xp = obj.X;
43 | Xp(p(iter),:) = [];
44 |
45 | Yp = obj.Y;
46 | Yp(p(iter),:) = [];
47 | M_hatp = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
48 | M_hat = obj;
49 | fun = @(x) SSA_optimize(x,M_hatp,M_hat);
50 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,[]);
51 |
52 | diff = zeros(obj.m,1);
53 | for ii=1:obj.m
54 | diff(ii) = norm(x_new - obj.X(ii,:));
55 | end
56 |
57 | T = diff > epsilon;
58 |
59 | iter = iter+1;
60 |
61 | if (iter > obj.m)
62 | iter =1;
63 | epsilon = epsilon/2;
64 | end
65 | end
66 |
67 | end
68 |
69 |
70 | function NLP = SSA_optimize(x,OKp,obj)
71 | % Details: Function to be optimized
72 | %
73 | % inputs:
74 | % x - Input value
75 | % OKp - Leave-one-out ordinary Kriging models
76 | % obj - Ordinary kriging class object
77 | %
78 | % outputs:
79 | % NLP - Value to be optimized
80 |
81 | val = 0.0;
82 | for i=1:obj.m
83 | val = val + norm(x-obj.X(i,:))^2;
84 | end
85 | CMD = val;
86 | [mu_hat,~] = predict(obj,x);
87 | [mu_hatp,~] = predict(OKp,x);
88 | Delta = (mu_hat - mu_hatp)^2;
89 |
90 | NLP = -Delta * CMD;
91 | end
92 |
93 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/TEAD_function.m:
--------------------------------------------------------------------------------
1 | %% Taylor expansion-based adaptive design (TEAD)
2 | % Mo, Shaoxing, et al. "A Taylor expansion‐based adaptive design strategy for global surrogate modeling with applications in groundwater modeling."
3 | % Water Resources Research 53.12 (2017): 10802-10823.
4 | function x_new = TEAD_function(obj,A, strategy)
5 | % Details: Starting function for TEAD sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | n = size(obj.X,2);
16 | lb = A(1,:);
17 | ub = A(2,:);
18 | % Obtain gradient information
19 | delta_s = central_difference_gradient(obj);
20 |
21 | % Generate candidate points
22 | number_candidate_points = 20000*n;
23 | x_cand = lhs_scaled(number_candidate_points,lb,ub);
24 |
25 | % Obtain nearest neighbor
26 | [Idx,D] = knnsearch(obj.X, x_cand);
27 |
28 | % Obtain weight
29 | Lmax = max(max(pdist2(obj.X, obj.X)));
30 | w = ones(number_candidate_points,1) - D./Lmax;
31 |
32 | % Conduct first order Taylor expansion
33 | %t = zeros(number_candidate_points,1);
34 | %s = zeros(number_candidate_points,1);
35 | for i=1:number_candidate_points
36 | g = obj.X(Idx(i,1),:) - x_cand(i,:);
37 | t(i,:) = obj.Y(Idx(i,1))+ delta_s(Idx(i,1),:)*g';
38 | s(i,:) = obj.predict(x_cand(i,:));
39 | Res(i,1) = norm(s(i,:)- t(i,:));
40 | end
41 |
42 | % Obtain residual
43 | % Res = norm(s-t);
44 |
45 | % Compute J
46 | J = D/max(D) + w.* (Res / max(Res));
47 |
48 | % Find max J
49 | [~,idx_max] = max(J);
50 |
51 | % New point
52 | x_new = x_cand(idx_max,:);
53 | end
54 |
55 |
56 |
57 | function delta_s = central_difference_gradient(obj)
58 | % Details: Find gradient with cenral difference scheme
59 | %
60 | % inputs:
61 | % obj - Ordinary kriging class object
62 | %
63 | % outputs:
64 | % delta_s - Array of approx. gradient values
65 |
66 |
67 | h = 1e-4;
68 | n = size(obj.X,2);
69 | delta_s = zeros(obj.m,n);
70 | for i=1:obj.m
71 | X_ip1 = obj.X(i,:)+h;
72 | X_im1 = obj.X(i,:)-h;
73 |
74 | Y_ip1 = obj.predict(X_ip1);
75 | Y_im1 = obj.predict(X_im1);
76 | delta_s(i,:) = (Y_ip1- Y_im1)/(2*h);
77 | end
78 | end
79 |
80 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/WAE_function.m:
--------------------------------------------------------------------------------
1 | %% Weighted Accumulative Error (WAE)
2 | % Jiang, Ping, et al. "A novel sequential exploration-exploitation sampling strategy for global metamodeling."
3 | % IFAC-PapersOnLine 48.28 (2015): 532-537.
4 | function x_new = WAE_function(obj,A, strategy)
5 | % Details: Starting function for WAE sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | % strategy - Optimization strategy to be used
11 | %
12 | % outputs:
13 | % x_new - New found sample point
14 |
15 | for i=1:obj.m
16 | Xp = obj.X;
17 | Xp(i,:) = [];
18 | Yp = obj.Y;
19 | Yp(i) = [];
20 |
21 | M_hatp{i} = OK_model(obj.auto_correlation_function,Xp,Yp,obj.theta_opti_technique);
22 | end
23 |
24 | d_WAE = WAE_threshold(obj);
25 | AA = [];
26 | b = [];
27 | Aeq = [];
28 | beq = [];
29 |
30 | lb = A(1,:);
31 | ub = A(2,:);
32 | fun = @(x) WAE_optimize(obj,M_hatp,x);
33 | nonlcon = @(x)WAE_con(obj,d_WAE,x);
34 |
35 | strategy = obj.theta_opti_technique;
36 | x_new = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,nonlcon);
37 |
38 |
39 | end
40 |
41 | function d_WAE = WAE_threshold(obj)
42 | % Details: Define distance threshold needed for optimization
43 | %
44 | % inputs:
45 | % obj - Ordinary kriging class object
46 | %
47 | % outputs:
48 | % d_WAE - Distance threshold
49 |
50 | alpha = 0.3;
51 | WAE_min_distances = zeros(obj.m,1);
52 |
53 | for i=1:obj.m
54 | Xp = obj.X;
55 | Xp(i,:) = [];
56 | [~,WAE_min_distances(i)] = knnsearch(Xp, obj.X(i,:));
57 | end
58 |
59 | d_WAE = mean(WAE_min_distances)*alpha;
60 | end
61 |
62 |
63 | function e_WAE = WAE_optimize(obj, M_hatp, x)
64 | % Details: Function to be optimized to define new sample
65 | %
66 | % inputs:
67 | % obj - Ordinary kriging class object
68 | % M_hatp - Cell array of Leave-one-out ordinary kriging class objects
69 | %
70 | % outputs:
71 | % e_WAE - Value to be optimized
72 |
73 | w_vec = zeros(obj.m,1);
74 |
75 | mu_hat_m1 = zeros(obj.m,1);
76 | for i=1:obj.m
77 | w_vec(i) = exp(-norm(x- obj.X(i,:)));
78 | end
79 |
80 | wi = w_vec/sum(w_vec);
81 |
82 | [mu_hat,~] = predict(obj,x);
83 | for i=1:obj.m
84 | [mu_hat_m1(i),~] = predict(M_hatp{i},x);
85 | end
86 |
87 | e_WAE = 0;
88 | for i=1:obj.m
89 | e_WAE = e_WAE + wi(i)*(mu_hat_m1(i)-mu_hat)^2;
90 | end
91 |
92 | e_WAE = sqrt(e_WAE);
93 |
94 | end
95 |
96 | function [c, ceq] = WAE_con(obj,d,x)
97 | % Details: Define arrays needed for constraint optimization
98 | %
99 | % inputs:
100 | % obj - Ordinary kriging class object
101 | % d - distance constraint
102 | % x - Input value
103 | %
104 | % outputs:
105 | % c - Inequality array
106 | % ceq - Equality array
107 |
108 | ceq = [];
109 |
110 | for i=1:obj.m
111 | c(i) = d - norm(obj.X(i,:) - x);
112 | end
113 | end
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
--------------------------------------------------------------------------------
/src/adaptive_techniques/WEI_function.m:
--------------------------------------------------------------------------------
1 | %% Weighted expected improvement (WEI)
2 | % Jones, Donald R., Matthias Schonlau, and William J. Welch. "Efficient global optimization of expensive black-box functions."
3 | % Journal of Global optimization 13.4 (1998): 455-492.
4 | function WEI_min = WEI_function(obj,x)
5 | % Details: Starting function for WEI sampling.
6 | %
7 | % inputs:
8 | % obj - Ordinary kriging class object
9 | % A - Definition of parametric space
10 | %
11 | % outputs:
12 | % x_new - New found sample point
13 |
14 | beta_hat = compute_beta_hat(obj,obj.R);
15 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
16 |
17 | mu_hat = compute_mu_hat(obj,obj.R,beta_hat,x(:),obj.theta);
18 | sigma_Y_sq_hat = compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x(:),obj.theta,obj.R);
19 |
20 |
21 | ymin = min(obj.Y);
22 | val = (ymin-mu_hat)/sqrt(sigma_Y_sq_hat);
23 | w = 0.2;
24 |
25 | WEI = w * (ymin - mu_hat) * normcdf(val) + (1-w) * sqrt(sigma_Y_sq_hat) * normpdf(val);
26 |
27 | WEI_min=-WEI;
28 | end
29 |
30 | %% WEI
31 | function WEI_min = adaptive_WEI(obj,x)
32 | % Details: Function to be optimized to find new sample
33 | %
34 | % inputs:
35 | % obj - Ordinary kriging class object
36 | % x - Input value
37 | %
38 | % outputs:
39 | % WEI_min - Value to be optimized
40 |
41 | beta_hat = compute_beta_hat(obj,obj.R);
42 | sigma_sq_hat = compute_sigma_sq_hat(obj,obj.R,beta_hat);
43 |
44 | mu_hat = compute_mu_hat(obj,obj.R,beta_hat,x(:),obj.theta);
45 | sigma_Y_sq_hat = compute_sigma_Y_sq_hat(obj,sigma_sq_hat,x(:),obj.theta,obj.R);
46 |
47 |
48 | ymin = min(obj.Y);
49 | val = (ymin-mu_hat)/sqrt(sigma_Y_sq_hat);
50 | w = 0.2;
51 | WEI = w * (ymin - mu_hat) * normcdf(val) + (1-w) * sqrt(sigma_Y_sq_hat) * normpdf(val);
52 |
53 | WEI_min=-WEI;
54 | end
55 |
56 |
57 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Boha_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Boha_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = [-100;-100]; % lower bound
7 | ub = [100;100];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(6,lb,ub);
12 |
13 | M = @(xx) Boha_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Boha_function(xx)
26 |
27 | x1 = xx(1);
28 | x2 = xx(2);
29 |
30 | term1 = x1^2;
31 | term2 = 2*x2^2;
32 | term3 = -0.3 * cos(3*pi*x1);
33 | term4 = -0.4 * cos(4*pi*x2);
34 |
35 | y = term1 + term2 + term3 + term4 + 0.7;
36 | end
37 |
38 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Booth_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Booth_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = [-10;-10]; % lower bound
7 | ub = [10;10];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(6,lb,ub);
12 |
13 | M = @(xx) Booth_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Booth_function(xx)
26 |
27 | x1 = xx(1);
28 | x2 = xx(2);
29 |
30 | term1 = (x1 + 2*x2 - 7)^2;
31 | term2 = (2*x1 + x2 - 5)^2;
32 |
33 | y = term1 + term2;
34 | end
35 |
36 |
37 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Branin_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Branin_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = [-5;0]; % lower bound
7 | ub = [10;15];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10,lb,ub);
12 |
13 | M = @(xx) Branin_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 | function [y] = Branin_function(xx)
25 |
26 |
27 | x1 = xx(1);
28 | x2 = xx(2);
29 |
30 |
31 | a = 1;
32 | b = 5.1 / (4*pi^2);
33 | c = 5 / pi;
34 | r = 6;
35 | s = 10;
36 | t = 1 / (8*pi);
37 | term1 = a * (x2 - b*x1^2 + c*x1 - r)^2;
38 | term2 = s*(1-t)*cos(x1);
39 |
40 | y = term1 + term2 + s;
41 | end
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Bukin_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Bukin2_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = [-20;-5]; % lower bound
7 | ub = [15;5];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Bukin_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Bukin_function(xx)
26 | x1 = xx(1);
27 | x2 = xx(2);
28 |
29 | term1 = 20 * sqrt(abs(x2 * 0.01*x1^2));
30 | term5 = 20 * abs(cos(x2 - 0.01*x1^2));
31 | term3 = 5 * sqrt(abs(x2^2 * x1));
32 |
33 | term4 = 10 * sqrt(abs(0.02*x2^2 + 0.001*x1^3));
34 | term2 = 0.01 * abs(x1+10);
35 |
36 | y = term1 + term3 + term5;
37 | end
38 |
39 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Colville_4d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Colville_4d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 4;
6 | lb = -2*ones(n,1); % lower bound
7 | ub = 2*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10*n,lb,ub);
12 |
13 | M = @(xx) Colville_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Colville_function(xx)
26 | x1 = xx(1);
27 | x2 = xx(2);
28 | x3 = xx(3);
29 | x4 = xx(4);
30 |
31 | if x1 < 0 || x2 < 0
32 | term1 = -100 * ((x1-x2)^3)*sin(x3);
33 | term2 = (x1-4)^2;
34 | else
35 | term1 = 100 * (x1^2-x2)^2;
36 | term2 = (x1-1)^2;
37 | end
38 | term3 = (x3-1)^2;
39 | term4 = 90 * (x3^2-x4)^2;
40 | term5 = 10.1 * ((x2-1)^2 + (x4-1)^2);
41 | term6 = 19.8*(x2-1)*(x4-1);
42 |
43 | y = term1 + term2 + term3 + term4 + term5 + term6;
44 | end
45 |
46 |
--------------------------------------------------------------------------------
/src/benchmark_functions/DampedCos_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = DampedCos_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = 0*ones(n,1); % lower bound
7 | ub = 1*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(5,lb,ub);
12 | % x = [-0.5; 0.5; 1.5];
13 |
14 | M = @(xx) damped_cos_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function [y] = damped_cos_function(x)
27 |
28 |
29 | fact1 = exp(-1.4*x);
30 | fact2 = cos(3.5*pi*x);
31 |
32 | y = fact1 * fact2;
33 |
34 | end
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Detpep_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Detpep_3d( )
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | lb = 0*ones(n,1); % lower bound
7 | ub = 0.5*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) detpep10curv(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 | end
20 |
21 |
22 | function [y] = detpep10curv(xx)
23 |
24 | x1 = xx(1);
25 | x2 = xx(2);
26 | x3 = xx(3);
27 |
28 | term1 = 4 * (x1 - 2 + 8*x2 - 8*x2^2);
29 | term2 = (4*x2)^2;
30 | term3 = 16 * sqrt(x3+1) * (2*x1-1);
31 |
32 | y = 0.6*term2 + sin(term3+ term1);
33 |
34 | end
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/src/benchmark_functions/DixonP_4d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = DixonP_4d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | lb = [-10;-10;-10;-10]; % lower bound
7 | ub = [10;10;10;10];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10,lb,ub);
12 |
13 | M = @(xx) dixonp_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = dixonp_function(xx)
26 | x1 = xx(1);
27 | d = length(xx);
28 | term1 = (x1-1)^2;
29 |
30 | sum = 0;
31 | for ii = 2:d
32 | xi = xx(ii);
33 | xold = xx(ii-1);
34 | new = ii * (2*xi^2 - xold)^2;
35 | sum = sum + new;
36 | end
37 |
38 | y = term1 + sum;
39 | end
40 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Drop_wave_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Drop_wave_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = -0.6*ones(n,1); % lower bound
7 | ub = 0.9*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Drop_wave_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Drop_wave_function(xx)
26 | x1 = xx(1);
27 | x2 = xx(2);
28 |
29 |
30 | frac1 = 1 + cos(12*sqrt(x1^2+x2^2));
31 | frac2 = 0.5*(x1^2+x2^2) + 2;
32 |
33 | y = -frac1/frac2;
34 | end
35 |
36 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Eggholder_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Eggholder_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = -216*ones(n,1); % lower bound
7 | ub = 216*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Eggholder(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Eggholder_function(xx)
26 | x1 = xx(1);
27 | x2 = xx(2);
28 |
29 | term1 = -(x2+47) * sin(sqrt(abs(x2+x1/2+47)));
30 | term2 = -x1 * sin(sqrt(abs(x1-(x2+47))));
31 |
32 | y = term1 + term2;
33 | end
--------------------------------------------------------------------------------
/src/benchmark_functions/Exploit_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Exploit_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -12*ones(n,1); % lower bound
7 | ub = 12*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(5,lb,ub);
12 |
13 | M = @(xx) Exploit_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Exploit_function(xx)
26 | if xx<0
27 | % term1 = 0.25*xx^4;
28 | term1 = -sinh(0.3*xx) ;
29 | elseif xx<5
30 | term1 = -5*xx;
31 | elseif xx < 8
32 | term1 = (25/3)*xx - 200/3;
33 | else
34 | term1 = sinh(0.9*(xx-8)) ;
35 | end
36 | y = term1 ;
37 | end
38 |
39 |
40 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Franke_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Franke_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = -0.5*ones(n,1); % lower bound
7 | ub = 1*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10,lb,ub);
12 |
13 | M = @(xx) franke_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function [y] = franke_function(xx)
26 |
27 |
28 | x1 = xx(1);
29 | x2 = xx(2);
30 |
31 | term1 = 0.75 * exp(-(9*x1-2)^2/4 - (9*x2-2)^2/4);
32 | term2 = 0.75 * exp(-(9*x1+1)^2/49 - (9*x2+1)/10);
33 | term3 = 0.5 * exp(-(9*x1-7)^2/4 - (9*x2-3)^2/4);
34 | term4 = -0.2 * exp(-(9*x1-4)^2 - (9*x2-7)^2);
35 |
36 | y = 2.5*term1 + 1.5*term2 + term3 + 8*term4;
37 |
38 | end
--------------------------------------------------------------------------------
/src/benchmark_functions/Gramacy_Lee3_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Gramacy_Lee3_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -1.5*ones(n,1); % lower bound
7 | ub = 6*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Gramacy_Lee_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Gramacy_Lee_function(xx)
26 |
27 | term1 = 50*sin(6*pi*xx) / (cos(xx));
28 | term2 = (xx-1)^4;
29 |
30 | if xx>0.5 && xx<=2.5
31 | term2 = 0.8*(xx-1)^4;
32 | term1 = 20*sin(6*pi*xx)/(3*(xx));
33 | epsilon = -100.0;
34 | elseif xx>2.5
35 | term2 = 0.5*(xx-1)^3;
36 | term1 = 100*sin(4*pi*xx)/((xx));
37 | epsilon = -50.0;
38 | else
39 |
40 | epsilon= -30.0;
41 | end
42 | y = term1 + term2+epsilon;
43 | end
44 |
45 |
46 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Gramacy_Lee_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Gramacy_Lee_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -1.5*ones(n,1); % lower bound
7 | ub = 1.0*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Gramacy_Lee_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Gramacy_Lee_function(xx)
26 |
27 | term1 = 10*sin(6*pi*xx) / (2*cos(xx));
28 | term2 = (xx-1)^4;
29 |
30 |
31 | y = term1 + term2;
32 | end
33 |
34 |
35 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Griewank_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Griewank_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = -4*ones(n,1); % lower bound
7 | ub = 0*ones(n,1);
8 |
9 |
10 |
11 | % Initial samples
12 | x = scaled_TPLHD(n*10,lb,ub);
13 |
14 | M = @(xx) Griwank_2d_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 | function y = Griwank_2d_function(xx)
26 |
27 |
28 | d = length(xx);
29 | sum = 0;
30 | prod = 1;
31 |
32 | for ii = 1:d
33 | xi = xx(ii);
34 | sum = sum + xi^3/4000;
35 | prod = prod * cos(xi^2/sqrt(ii));
36 | end
37 |
38 | y = sum - prod + 1;
39 | end
40 |
41 |
42 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Griewank_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Griewank_3d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | lb = -4*ones(n,1); % lower bound
7 | ub = 0*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Griwank_4d_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Griwank_4d_function(xx)
26 |
27 | d = length(xx);
28 | sum = 0;
29 | prod = 1;
30 |
31 | for ii = 1:d
32 | xi = xx(ii);
33 | sum = sum + xi^2/4000;
34 | prod = prod * cos(xi/sqrt(ii));
35 | end
36 |
37 | y = sum - prod + 1;
38 | end
39 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Hartmann_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Hartmann_3d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | lb = -1*ones(n,1); % lower bound
7 | ub = 1*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10*n,lb,ub);
12 |
13 | M = @(xx) Hartmann_3d_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Hartmann_3d_function(xx)
26 | alpha = [1.0, 1.2, 3.0, 3.2]';
27 | A = [3.0, 10, 30;
28 | 0.1, 10, 35;
29 | 3.0, 10, 30;
30 | 0.1, 10, 35];
31 | P = 10^(-4) * [3689, 1170, 2673;
32 | 4699, 4387, 7470;
33 | 1091, 8732, 5547;
34 | 381, 5743, 8828];
35 |
36 | outer = 0;
37 | for ii = 1:4
38 | inner = 0;
39 | for jj = 1:3
40 | xj = xx(jj);
41 | Aij = A(ii, jj);
42 | Pij = P(ii, jj);
43 | inner = inner + Aij*(xj-Pij)^2;
44 | end
45 | new = alpha(ii) * exp(-inner);
46 | outer = outer + new;
47 | end
48 |
49 | y = -outer;
50 |
51 | end
52 |
53 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Hartmann_6d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Hartmann_6d()
2 | n = 6;
3 | lb = 0*ones(n,1); % lower bound
4 | ub = 1*ones(n,1);
5 |
6 |
7 | % Initial samples
8 | x = scaled_TPLHD(n*10,lb,ub);
9 |
10 | M = @(xx) Hartmann_6d_function(xx);
11 |
12 | y = zeros(size(x,1),1);
13 | for i=1:size(x,1)
14 | y(i,1) = M(x(i,:));
15 | end
16 |
17 |
18 |
19 | end
20 |
21 |
22 | function y = Hartmann_6d_function(xx)
23 | alpha = [1.0, 1.2, 3.0, 3.2]';
24 | A = [10, 3, 17, 3.5, 1.7, 8;
25 | 0.05, 10, 17, 0.1, 8, 14;
26 | 3, 3.5, 1.7, 10, 17, 8;
27 | 17, 8, 0.05, 10, 0.1, 14];
28 | P = 10^(-4) * [1312, 1696, 5569, 124, 8283, 5886;
29 | 2329, 4135, 8307, 3736, 1004, 9991;
30 | 2348, 1451, 3522, 2883, 3047, 6650;
31 | 4047, 8828, 8732, 5743, 1091, 381];
32 |
33 | outer = 0;
34 | for ii = 1:4
35 | inner = 0;
36 | for jj = 1:6
37 | xj = xx(jj);
38 | Aij = A(ii, jj);
39 | Pij = P(ii, jj);
40 | inner = inner + Aij*(xj-Pij)^2;
41 | end
42 | new = alpha(ii) * exp(-inner);
43 | outer = outer + new;
44 | end
45 |
46 | y = -(2.58 + outer) / 1.94;
47 |
48 | end
49 |
50 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Ishigami_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Ishigami_3d( )
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | lb = 0*ones(n,1); % lower bound
7 | ub = 4*pi*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) ishi_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 | end
20 |
21 |
22 | function [y] = ishi_function(xx)
23 |
24 | x1 = xx(1);
25 | x2 = xx(2);
26 | x3 = xx(3);
27 |
28 | if (nargin == 1)
29 | a = 7;
30 | b = 0.1;
31 | elseif (nargin == 2)
32 | b = 0.1;
33 | end
34 |
35 | term1 = sin(x1);
36 | term2 = a * (sin(x2))^2;
37 | term3 = b * x3^4 * sin(x1);
38 |
39 | y = term1 + term2 + term3;
40 |
41 | end
42 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Langermann_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Langermann_3d()
2 | % Langermann https://www.sfu.ca/~ssurjano/langer.html
3 |
4 | n = 3;
5 | lb = 0*ones(n,1); % lower bound
6 | ub = 10*ones(n,1);
7 |
8 |
9 | % Initial samples
10 | x = scaled_TPLHD(n*10,lb,ub);
11 |
12 | M = @(xx) Langermann_3d_function(xx);
13 |
14 | y = zeros(size(x,1),1);
15 | for i=1:size(x,1)
16 | y(i,1) = M(x(i,:));
17 | end
18 |
19 |
20 | end
21 |
22 |
23 | function y = Peppe_Det_3d_function(xx)
24 |
25 | x1 = xx(1);
26 | x2 = xx(2);
27 | x3 = xx(3);
28 |
29 | term1 = exp(-2/(x1^1.75));
30 | term2 = exp(-2/(x2^1.5));
31 | term3 = exp(-2/(x3^1.25));
32 |
33 | y = 100 * (term1 + term2 + term3);
34 |
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Micha_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Micha_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = [0;0]; % lower bound
7 | ub = [pi;pi];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Micha_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Micha_function(xx)
26 | if (nargin == 1)
27 | m = 4;
28 | end
29 |
30 | d = length(xx);
31 | sum = 0;
32 |
33 | for ii = 1:d
34 | xi = xx(ii);
35 | new = -sin(2*xi) * (sin(ii*xi^2/(pi)))^(2*m);
36 | sum = sum + new;
37 | end
38 |
39 | y = -sum- 0.2*xx(1);
40 | end
41 |
42 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Michalewicz_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Michalewicz_3d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | lb = 0*ones(n,1); % lower bound
7 | ub = pi*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Michalewicz_3d_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Michalewicz_3d_function(xx)
26 | if (nargin == 1)
27 | m = 10;
28 | end
29 |
30 | d = length(xx);
31 | sum = 0;
32 |
33 | for ii = 1:d
34 | xi = xx(ii);
35 | new = sin(xi) * (sin(ii*xi^2/pi))^(2*m);
36 | sum = sum + new;
37 | end
38 |
39 | y = -sum;
40 | end
41 |
42 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Mod_Gramacy_Lee_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Gramacy_Lee3_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -1.5*ones(n,1); % lower bound
7 | ub = 6*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Gramacy_Lee_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Gramacy_Lee_function(xx)
26 |
27 | term1 = 50*sin(6*pi*xx) / (cos(xx));
28 | term2 = (xx-1)^4;
29 |
30 | if xx>0.5 && xx<=2.5
31 | term2 = 0.8*(xx-1)^4;
32 | term1 = 20*sin(6*pi*xx)/(3*(xx));
33 | epsilon = -100.0;
34 | elseif xx>2.5
35 | term2 = 0.5*(xx-1)^3;
36 | term1 = 100*sin(4*pi*xx)/((xx));
37 | epsilon = -50.0;
38 | else
39 |
40 | epsilon= -30.0;
41 | end
42 | y = term1 + term2+epsilon;
43 | end
44 |
45 |
46 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Perm0db_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Perm0db_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -1*ones(n,1); % lower bound
7 | ub = 2.0*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | % x = scaled_TPLHD(3,lb,ub);
12 | x = [-0.5; 0.5; 1.5; 1.7];
13 |
14 | M = @(xx) perm0db_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function [y] = perm0db_function(xx)
27 |
28 |
29 | if (nargin == 1)
30 | b = 10;
31 | end
32 |
33 | d = length(xx);
34 | outer = 0;
35 |
36 | for ii = 1:d
37 | inner = 0;
38 | for jj = 1:d
39 | xj = xx(jj);
40 | inner = inner + (jj+b)*(xj^ii-(1/jj)^ii);
41 | end
42 | outer = outer + inner^2;
43 | end
44 |
45 | y = outer;
46 |
47 |
48 | end
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Rastrigin_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Rastrigin_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = -6*ones(n,1); % lower bound
7 | ub = 2*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Rastrigin_2d_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 | end
22 |
23 |
24 | function y = Rastrigin_2d_function(xx)
25 |
26 |
27 | d = length(xx);
28 | sum = 0;
29 |
30 | if xx(1)<-2.5 && xx(2) <-2.5
31 | for ii = 1:d
32 | xi = xx(ii);
33 | sum = sum + (0.2*xi^3 - 10*cos(2*pi*xi));
34 | end
35 | else
36 | for ii = 1:d
37 | xi = xx(ii);
38 | sum = sum + 0.2*xi^3+(3*abs(xi) - 30*sin(pi*abs(xi)));
39 | end
40 | end
41 |
42 | y = 10*d + sum;
43 | end
44 |
45 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Rosenbrock_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Rosenbrock_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = -2.5*ones(n,1); % lower bound
7 | ub = 2.5*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10,lb,ub);
12 |
13 | M = @(xx) rosenbrock_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function [y] = rosenbrock_function(xx)
26 |
27 | d = length(xx);
28 | sum = 0;
29 | for ii = 1:(d-1)
30 | xi = xx(ii);
31 | xnext = xx(ii+1);
32 | new = 100*(xnext-xi^2)^2 + (xi-1)^2;
33 | if xx(2) > 1.5
34 | new = new + 700*xx(2)*xx(1);
35 | end
36 | sum = sum + new;
37 | end
38 |
39 | y = sum;
40 | end
41 |
--------------------------------------------------------------------------------
/src/benchmark_functions/SHCamel_2d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = SHCamel_2d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 2;
6 | lb = [-2;-1]; % lower bound
7 | ub = [2;1];
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(10,lb,ub);
12 |
13 | M = @(xx) SHCamel_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function [y] = SHCamel_function(xx)
26 |
27 | x1 = xx(1);
28 | x2 = xx(2);
29 |
30 | term1 = (4-2.1*x1^2+(x1^4)/3) * x1^2;
31 | term2 = x1*x2;
32 | term3 = (-4+4*x2^2) * x2^2;
33 |
34 | y = term1 + term2 + term3;
35 |
36 | end
37 |
38 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Schwefel_5d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Schwefel_5d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 5;
6 | lb = -100*ones(n,1); % lower bound
7 | ub = 100*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Schwefel_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Schwefel_function(xx)
26 | d = length(xx);
27 | sum = 0;
28 | for ii = 1:d
29 | xi = xx(ii);
30 | sum = sum + xi*sin(sqrt(abs(xi)));
31 | end
32 |
33 | y = 420*d - sum;
34 | end
--------------------------------------------------------------------------------
/src/benchmark_functions/Shekel_4d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Shekel_4d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 4;
6 | lb = 0*ones(n,1); % lower bound
7 | ub = 10*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Shekel_4d_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Shekel_4d_function(xx)
26 | m = 10;
27 | b = 0.1 * [1, 2, 2, 4, 4, 6, 3, 7, 5, 5]';
28 | C = [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0;
29 | 4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6;
30 | 4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0;
31 | 4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6];
32 |
33 | outer = 0;
34 | for ii = 1:m
35 | bi = b(ii);
36 | inner = 0;
37 | for jj = 1:4
38 | xj = xx(jj);
39 | Cji = C(jj, ii);
40 | inner = inner + (xj-Cji)^2;
41 | end
42 | outer = outer + 1/(inner+bi);
43 | end
44 |
45 | y = -outer;
46 |
47 | end
48 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Single_hump_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Single_hump_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -1.5*ones(n,1); % lower bound
7 | ub = 5.0*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | % x = scaled_TPLHD(5,lb,ub);
12 | x = [-1.5;0.;1.4;2.5;3.8;4.4];
13 | %x = [4.4;4.5;4.6];
14 | M = @(xx) Single_hump_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function y = Single_hump_function(x)
27 |
28 | y = 3*x -0.5./ ((x-4.75).^2 + .04) -0.07./ ((x-4.45).^2 + .005) - 6;
29 | end
30 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Sphere_3d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Sphere_3d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 3;
6 | % lb = [-5.12;-5.12; -5.12]; % lower bound
7 | % ub = [5.12;5.12; 5.12];
8 | lb = -5.12*ones(n,1); % lower bound
9 | ub = 5.12*ones(n,1);
10 |
11 | % Initial samples
12 | x = scaled_TPLHD(10,lb,ub);
13 |
14 | M = @(xx) sphere_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function y = sphere_function(xx)
27 | d = length(xx);
28 | sum = 0;
29 | for ii = 1:d
30 | xi = xx(ii);
31 | sum = sum + xi^2;
32 | end
33 |
34 | y = sum;
35 | end
36 |
37 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Sphere_4d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Sphere_4d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 4;
6 | % lb = [-5.12;-5.12; -5.12]; % lower bound
7 | % ub = [5.12;5.12; 5.12];
8 | lb = -5.12*ones(n,1); % lower bound
9 | ub = 5.12*ones(n,1);
10 |
11 | % Initial samples
12 | x = scaled_TPLHD(10,lb,ub);
13 |
14 | M = @(xx) sphere_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function y = sphere_function(xx)
27 | d = length(xx);
28 | sum = 0;
29 | for ii = 1:d
30 | xi = xx(ii);
31 | sum = sum + xi^2;
32 | end
33 |
34 | y = sum;
35 | end
36 |
37 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Sphere_5d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Sphere_5d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 5;
6 | % lb = [-5.12;-5.12; -5.12]; % lower bound
7 | % ub = [5.12;5.12; 5.12];
8 | lb = -5.12*ones(n,1); % lower bound
9 | ub = 5.12*ones(n,1);
10 |
11 | % Initial samples
12 | x = scaled_TPLHD(10,lb,ub);
13 |
14 | M = @(xx) sphere_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function y = sphere_function(xx)
27 | d = length(xx);
28 | sum = 0;
29 | for ii = 1:d
30 | xi = xx(ii);
31 | sum = sum + xi^2;
32 | end
33 |
34 | y = sum;
35 | end
36 |
37 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Styblinski_Tang_5d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Styblinski_Tang_5d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 5;
6 | lb = -5*ones(n,1); % lower bound
7 | ub = 5*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Styblinski_Tang_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Styblinski_Tang_function(xx)
26 | d = length(xx);
27 | sum = 0;
28 | for ii = 1:d
29 | xi = xx(ii);
30 | new = xi^4 - 16*xi^2 + 5*xi;
31 | sum = sum + new;
32 | end
33 |
34 | y = sum/2;
35 |
36 | end
37 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Two_humps_function_1d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub,x, M ] = Two_humps_function_1d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 1;
6 | lb = -0.5*ones(n,1); % lower bound
7 | ub = 5.0*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | % x = scaled_TPLHD(n*8,lb,ub);
12 | % x = [0.0;0.92;1.56250000000000;2.25000000000000;2.93750000000000;3.62500000000000;4.31250000000000;5];
13 | x = [3.2;3.4;3.6];
14 | M = @(xx) Two_humps_function(xx);
15 |
16 | y = zeros(size(x,1),1);
17 | for i=1:size(x,1)
18 | y(i,1) = M(x(i,:));
19 | end
20 |
21 |
22 |
23 | end
24 |
25 |
26 | function y = Two_humps_function(x)
27 |
28 | y = 5*x +0.05./ ((x-0.45).^2 + .002) - 0.5 ./ ((x-3.5).^2 + .03) - 6;
29 | end
30 |
31 |
32 |
--------------------------------------------------------------------------------
/src/benchmark_functions/Zakharov_4d.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = Zakharov_4d()
2 | addpath('help_functions')
3 | addpath('TPLHD')
4 |
5 | n = 4;
6 | lb = -10*ones(n,1); % lower bound
7 | ub = 10*ones(n,1);
8 |
9 |
10 | % Initial samples
11 | x = scaled_TPLHD(n*10,lb,ub);
12 |
13 | M = @(xx) Zakharov_function(xx);
14 |
15 | y = zeros(size(x,1),1);
16 | for i=1:size(x,1)
17 | y(i,1) = M(x(i,:));
18 | end
19 |
20 |
21 |
22 | end
23 |
24 |
25 | function y = Zakharov_function(xx)
26 | d = length(xx);
27 | sum1 = 0;
28 | sum2 = 0;
29 |
30 | for ii = 1:d
31 | xi = xx(ii);
32 | sum1 = sum1 + xi^2;
33 | sum2 = sum2 + 0.5*ii*xi;
34 | end
35 |
36 | y = sum1 + sum2^2 + sum2^4;
37 | end
38 |
39 |
40 |
--------------------------------------------------------------------------------
/src/call_benchmarkFunctions.m:
--------------------------------------------------------------------------------
1 | function [ y, lb, ub ,x, M ] = call_benchmarkFunctions( ID )
2 | % Details: Utility function that finds the correct benchmark function from
3 | % input prompt
4 | %
5 | % inputs:
6 | % ID - Input prompt call ID
7 | %
8 | % outputs:
9 | % y - Sample outputs
10 | % lb - Lower bound for sampling
11 | % ub - Higher bound for sampling
12 | % x - Sample inputs
13 | % M - Matlab function to call
14 |
15 | %%% 1D Functions
16 | % Single Hump function
17 | if ID ==1
18 | [ y, lb, ub, x, M ] = Single_hump_1d();
19 | elseif ID == 2
20 | % Modified Two humps function
21 | [ y, lb, ub,x, M ] = Two_humps_function_1d();
22 | % Gramacy-Lee https://www.sfu.ca/~ssurjano/grlee12.html
23 | elseif ID ==3
24 | [ y, lb, ub, x, M ] = Gramacy_Lee_1d();
25 | elseif ID == 4
26 | % Modified Gramacy-Lee
27 | [ y, lb, ub, x, M ] = Mod_Gramacy_Lee_1d();
28 | elseif ID == 5
29 | % Perm function 1,0 https://www.sfu.ca/~ssurjano/permdb.html
30 | [ y, lb, ub, x, M ] = Perm0db_1d();
31 | elseif ID == 6
32 | % Damped Cosinus function https://www.sfu.ca/~ssurjano/santetal03dc.html
33 | [ y, lb, ub, x, M ] = DampedCos_1d();
34 | elseif ID == 7
35 | % Exploit function
36 | [ y, lb, ub, x, M ] = Exploit_1d();
37 |
38 |
39 |
40 | %%% 2D Functions
41 | % Michalewicz function https://www.sfu.ca/~ssurjano/michal.html
42 | elseif ID == 8
43 | [ y, lb, ub, x, M ] = Micha_2d();
44 | % Drop-Wave https://www.sfu.ca/~ssurjano/drop.html
45 | elseif ID == 9
46 | [ y, lb, ub, x, M ] = Drop_wave_2d();
47 | % Booth function https://www.sfu.ca/~ssurjano/booth.html
48 | elseif ID == 10
49 | [ y, lb, ub, x, M ] = Booth_2d();
50 | % Bohachevsky function https://www.sfu.ca/~ssurjano/boha.html
51 | elseif ID == 11
52 | [ y, lb, ub, x, M ] = Boha_2d();
53 | % Branin function https://www.sfu.ca/~ssurjano/branin.html
54 | elseif ID == 12
55 | [ y, lb, ub, x, M ] = Branin_2d();
56 | % Franke function https://www.sfu.ca/~ssurjano/franke2d.html
57 | elseif ID == 13
58 | [ y, lb, ub, x, M ] = Franke_2d();
59 | % Rosenbrock function https://www.sfu.ca/~ssurjano/rosen.html
60 | elseif ID == 14
61 | [ y, lb, ub, x, M ] = Rosenbrock_2d();
62 | % Six Hump Camel function https://www.sfu.ca/~ssurjano/camel6.html
63 | elseif ID == 15
64 | [ y, lb, ub, x, M ] = SHCamel_2d();
65 | % Rastrigin function https://www.sfu.ca/~ssurjano/rastr.html
66 | elseif ID == 16
67 | [ y, lb, ub, x, M ] = Rastrigin_2d();
68 | % Griewank function https://www.sfu.ca/~ssurjano/griewank.html
69 | elseif ID == 17
70 | [ y, lb, ub, x, M ] = Griewank_2d();
71 |
72 |
73 | %%% 3D Functions
74 | % Sphere 3D https://www.sfu.ca/~ssurjano/spheref.html
75 | elseif ID == 18
76 | [ y, lb, ub, x, M ] = Sphere_3d();
77 | % Hartmann https://www.sfu.ca/~ssurjano/hart3.html
78 | elseif ID == 19
79 | [ y, lb, ub, x, M ] = Hartmann_3d();
80 | % Ishigami https://www.sfu.ca/~ssurjano/ishigami.html
81 | elseif ID == 20
82 | [ y, lb, ub, x, M ] = Ishigami_3d();
83 |
84 | %%% 4D Functions
85 | % Sphere 4D https://www.sfu.ca/~ssurjano/spheref.html
86 | elseif ID == 21
87 | [ y, lb, ub, x, M ] = Sphere_4d();
88 | % Dixon-Price function https://www.sfu.ca/~ssurjano/dixonpr.html
89 | elseif ID == 22
90 | [ y, lb, ub, x, M ] = DixonP_4d();
91 |
92 |
93 |
94 | %%% 5D Functions
95 | % Sphere 5D https://www.sfu.ca/~ssurjano/spheref.html
96 | elseif ID == 23
97 | [ y, lb, ub, x, M ] = Sphere_5d();
98 |
99 |
100 | %%% 6D Functions
101 | % Hartmann https://www.sfu.ca/~ssurjano/hart6.html
102 | elseif ID == 24
103 | [ y, lb, ub ,x, M ] = Hartmann_6d();
104 | else
105 | error('Error. \nBenchmark ID unknown');
106 | end
107 |
108 |
109 | end
110 |
111 |
--------------------------------------------------------------------------------
/src/chooseSamplingMethod.m:
--------------------------------------------------------------------------------
1 | function [output] = chooseSamplingMethod(M, x, y,A_sampling, number_of_adaptive_iterations,max_iteration, opti_strategy,methodID)
2 | %%
3 | % Details: Defines the chosen adaptive sampling techniques
4 | %
5 | % inputs:
6 | % M - response function
7 | % x - samples in parametric space
8 | % y - observations
9 | % A_sampling - parameter space
10 | % number_of_adaptive_iterations - maximum number of iterations
11 | % max_iteration - number of repetitions
12 | % class_limit - Sets limit value to distinguish classes in 2d
13 | %
14 | % outputs:
15 | % output - adaptive method, stored metamodels, final errors, single error
16 |
17 | %% Choose autocorrelation function
18 | %addpath('Autocorrelation_functions')
19 | % Matern 3/2 Autocorrelation
20 | af = @Matern32_matrix;
21 |
22 | %% Smart Sampling Algorithm (SSA)
23 | % Garud, Sushant Suhas, Iftekhar A. Karimi, and Markus Kraft.
24 | % "Smart sampling algorithm for surrogate model development." Computers & Chemical Engineering 96 (2017): 103-114
25 | adaptive_methods{1} ='SSA';
26 |
27 | %% Cross-Validation-Voronoi (CVVOR)
28 | % Xu, Shengli, et al. "A robust error-pursuing sequential sampling approach for global metamodeling based on voronoi diagram and cross validation."
29 | % Journal of Mechanical Design 136.7 (2014): 071009.
30 | adaptive_methods{2} ='CVVor';
31 |
32 | %% ACcumulative Error (ACE)
33 | % Li, Genzi, Vikrant Aute, and Shapour Azarm. "An accumulative error based adaptive design of experiments for offline metamodeling."
34 | % Structural and Multidisciplinary Optimization 40.1-6 (2010): 137.
35 | adaptive_methods{3} ='ACE';
36 |
37 | %% MC-intersite-proj-th (MIPT)
38 | % Crombecq, Karel, Eric Laermans, and Tom Dhaene. "Efficient space-filling and non-collapsing sequential design strategies for simulation-based modeling."
39 | % European Journal of Operational Research 214.3 (2011): 683-696.
40 | adaptive_methods{4} ='MIPT';
41 |
42 | %% LOLA-Voronoi (LOLA)
43 | % Crombecq, Karel, et al. "A novel hybrid sequential design strategy for global surrogate modeling of computer experiments."
44 | % SIAM Journal on Scientific Computing 33.4 (2011): 1948-1974.
45 | adaptive_methods{5} ='LOLA';
46 |
47 | %% Adaptive Maximum Entropy (AME)
48 | % Liu, Haitao, et al. "An adaptive Bayesian sequential sampling approach for global metamodeling."
49 | % Journal of Mechanical Design 138.1 (2016): 011404.
50 | adaptive_methods{6} ='AME';
51 |
52 | %% Maximizing Expected Prediction Error (MEPE)
53 | %Liu, Haitao, Jianfei Cai, and Yew-Soon Ong. "An adaptive sampling approach for kriging metamodeling by maximizing expected prediction error."
54 | %Computers & Chemical Engineering 106 (2017): 171-182.
55 | adaptive_methods{7} ='MEPE';
56 |
57 | %% Mixed Adaptive Sampling Algorithm (MASA)
58 | % Eason, John, and Selen Cremaschi. "Adaptive sequential sampling for surrogate model generation with artificial neural networks."
59 | % Computers & Chemical Engineering 68 (2014): 220-232.
60 | adaptive_methods{8} ='MASA';
61 |
62 | %% Weighted Accumulative Error (WAE)
63 | % Jiang, Ping, et al. "A novel sequential exploration-exploitation sampling strategy for global metamodeling."
64 | % IFAC-PapersOnLine 48.28 (2015): 532-537.
65 | adaptive_methods{9} ='WAE';
66 |
67 | %% Sampling with Lipschitz Criterion (LIP)
68 | % Lovison, Alberto, and Enrico Rigoni. "Adaptive sampling with a Lipschitz criterion for accurate metamodeling."
69 | % Communications in Applied and Industrial Mathematics 1.2 (2011): 110-126.
70 | adaptive_methods{10} ='LIP';
71 |
72 | %% Taylor expansion-based adaptive design (TEAD)
73 | % Mo, Shaoxing, et al. "A Taylor expansion‐based adaptive design strategy for global surrogate modeling with applications in groundwater modeling."
74 | % Water Resources Research 53.12 (2017): 10802-10823.
75 | adaptive_methods{11} ='TEAD';
76 |
77 | %% Space-Filling Cross Validation Tradeoff (SFCVT)
78 | % Aute, Vikrant, et al. "Cross-validation based single response adaptive design of experiments for Kriging metamodeling of deterministic computer simulations."
79 | % Structural and Multidisciplinary Optimization 48.3 (2013): 581-605.
80 | adaptive_methods{12} ='SFVCT';
81 |
82 | %% Expected improvement (EI)
83 | % Jones, Donald R., Matthias Schonlau, and William J. Welch. "Efficient global optimization of expensive black-box functions."
84 | % Journal of Global optimization 13.4 (1998): 455-492.
85 | adaptive_methods{13} ='EI';
86 |
87 | %% Expected improvement for global fit (EIGF)
88 | % Lam, Chen Quin. "Sequential adaptive designs in computer experiments for response surface model fit."
89 | % Diss. The Ohio State University, 2008.
90 | adaptive_methods{14} ='EIGF';
91 |
92 |
93 |
94 | %% TPLHD
95 | % Liao, Xiaoping, et al. "A fast optimal latin hypercube design for Gaussian process regression modeling."
96 | % Third International Workshop on Advanced Computational Intelligence. IEEE, 2010.
97 | adaptive_methods{15} ='TPLHD';
98 |
99 | %% Obtains the error from the initial samples
100 | adaptive_methods{16} ='Initial_error';
101 |
102 | %% Jin's Cross validation approach (CVA)
103 | % Jin, Ruichen, Wei Chen, and Agus Sudjianto. "On sequential sampling for global metamodeling in engineering design."
104 | % ASME 2002 International Design Engineering Technical Conferences and Computers and Information in Engineering Conference. American Society of Mechanical Engineers, 2002.
105 | adaptive_methods{17} ='Jin_CV';
106 |
107 | %% Maximin Distance design (MSD)
108 | % Johnson, Mark E., Leslie M. Moore, and Donald Ylvisaker. "Minimax and maximin distance designs."
109 | % Journal of statistical planning and inference 26.2 (1990): 131-148.
110 | adaptive_methods{18} ='MSD';
111 |
112 | %% Weighted expected improvement (WEI)
113 | % Jones, Donald R., Matthias Schonlau, and William J. Welch. "Efficient global optimization of expensive black-box functions."
114 | % Journal of Global optimization 13.4 (1998): 455-492.
115 | adaptive_methods{19} ='WEI';
116 |
117 | % Choose one or multiple sampling techniques by setting j value. Can be
118 | % parallelized via parfor.
119 | for j=methodID:methodID
120 | mean_errors = zeros(number_of_adaptive_iterations,5);
121 | stored_metamodels = {};
122 | final_errors = {};
123 | single_errors = {};
124 | for i=1:max_iteration
125 | warning('off','all')
126 | St = ['Computation of ', adaptive_methods{j},' Iteration number: ', num2str(i), ' of ', num2str(max_iteration)];
127 | disp(St)
128 |
129 | [stored_metamodels{i}, final_errors{i}, single_errors{i}] = initial_metamodel(M,A_sampling,adaptive_methods{j}, x, y,number_of_adaptive_iterations, opti_strategy,i, af);
130 | temp = single_errors{i}(2);
131 | mean_errors = mean_errors + temp{1}.error_data.Variables;
132 |
133 | end
134 | mean_errors = mean_errors/max_iteration;
135 | output{j,1}= {adaptive_methods{j}, stored_metamodels, final_errors,single_errors, mean_errors};
136 |
137 | result = output{j,1};
138 | ST = ['Sol_', adaptive_methods{j},'.mat'];
139 | save(ST,'result');
140 |
141 | end
142 |
143 |
144 | end
145 |
146 |
--------------------------------------------------------------------------------
/src/help_functions/Read_me_helpfunctions.py:
--------------------------------------------------------------------------------
1 | This folder contains some useful functions used for the adaptive scheme.
2 |
--------------------------------------------------------------------------------
/src/help_functions/intersite_proj_th.m:
--------------------------------------------------------------------------------
1 | function val = intersite_proj_th(dmin, X,p)
2 | % intersite proj TH method
3 |
4 | for i=1:size(X,1)
5 | if norm((X(i,:) - p),-inf) < dmin
6 | val =0;
7 | end
8 | end
9 | val = inf;
10 | for i=1:size(X,1)
11 | calc = norm(X(i,:) - p);
12 | if calc < val
13 | val = calc;
14 | end
15 | end
16 | end
--------------------------------------------------------------------------------
/src/help_functions/lhs_scaled.m:
--------------------------------------------------------------------------------
1 | function [x_scaled]=lhs_scaled(n,lb,ub)
2 | % Create n samples with Matlab's Latin hypercube method within bounds lb and ub
3 | p=numel(lb);
4 | upper=ub-lb;
5 | lower=lb;
6 | M_upper=ones(n,p);
7 | M_lower=ones(n,p);
8 | for i=1:p
9 | M_upper(:,i)=ones(n,1).*upper(i);
10 | M_lower(:,i)=ones(n,1).*lower(i);
11 | end
12 | x_normalized = lhsdesign(n,p,'criterion','maximin');
13 | x_scaled=M_upper.*x_normalized+M_lower;
14 |
15 | end
16 |
--------------------------------------------------------------------------------
/src/help_functions/randomVoronoi.m:
--------------------------------------------------------------------------------
1 | function C = randomVoronoi(X,lb,ub)
2 | % Build Voronoi cells
3 |
4 | addpath('help_functions')
5 |
6 | n_of_existing_P = size(X,1);
7 | dimension = size(X,2);
8 | w = 1000;
9 |
10 | n = n_of_existing_P * dimension * w;
11 |
12 | xn = lhs_scaled(n,lb,ub);
13 | C = {};
14 |
15 | for j=1:size(X,1)
16 | C{j,1} = X(j,:);
17 | C{j,2} = X(j,:);
18 | end
19 |
20 | for i=1:size(xn,1)
21 | k = dsearchn(X,xn(i,:));
22 | C{k,2} = [C{k,2}; xn(i,:)];
23 | end
24 | end
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/src/help_functions/scale_rand.m:
--------------------------------------------------------------------------------
1 | function S = scale_rand(n,lb,ub)
2 |
3 |
4 | S = rand(n,numel(lb));
5 | for i=1:numel(lb)
6 | c = lb(i);
7 | d = ub(i);
8 |
9 | S(:,i) = scale(c,d,S(:,i));
10 | end
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/src/help_functions/scale_to_unity.m:
--------------------------------------------------------------------------------
1 | function scaled_value = scale_to_unity(a,b,x)
2 | % b upper bound , a lower bound
3 | scaled_value = (1/(b-a)) * (x-a);
4 | end
5 |
6 |
--------------------------------------------------------------------------------
/src/help_functions/scale_vector_from_unity.m:
--------------------------------------------------------------------------------
1 | function scaled_value = scale_vector_from_unity(lb,ub,x)
2 | % lb lower, ub upper
3 | n = size(x,1);
4 | d = size(x,2);
5 | scaled_value = zeros(n,d);
6 | if (numel(lb) ~= d) || (numel(ub) ~= d)
7 | error('Scaling to unity not possible.')
8 | end
9 | for i=1:n
10 | for j=1:d
11 | scaled_value(i,j) = lb(j) + (ub(j)-lb(j)) * x(i,j);
12 | end
13 | end
14 | end
15 |
16 |
17 |
--------------------------------------------------------------------------------
/src/help_functions/scale_vector_to_unity.m:
--------------------------------------------------------------------------------
1 | function scaled_value = scale_vector_to_unity(a,b,x)
2 | % b upper bound , a lower bound
3 | n = size(x,1);
4 | d = size(x,2);
5 | scaled_value = zeros(n,d);
6 | if (numel(a) ~= d) || (numel(b) ~= d)
7 | error('Scaling to unity not possible.')
8 | end
9 | for i=1:n
10 | for j=1:d
11 | scaled_value(i,j) = (1/(b(j)-a(j))) * (x(i,j)-a(j));
12 | end
13 | end
14 |
15 | end
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/initial_metamodel.m:
--------------------------------------------------------------------------------
1 | function [stored_metamodels, final_errors, single_errors] = initial_metamodel(M,A_sampling,adaptive_method, x, y,max_iteration,opti_strategy, iteration_number, af)
2 | % Details: Define the initial metamodel for the adaptive process
3 | %
4 | % inputs:
5 | % M - response function
6 | % A_sampling - parameter space
7 | % adaptive_method - chosen adaptive method (MiVor here)
8 | % x - samples in parametric space
9 | % y - observations
10 | % max_iteration - number of repetitions
11 | % class_limit - Sets limit value to distinguish classes in 2d
12 | %
13 | % outputs:
14 | % stored_metamodels - stored metamodels over adaptive process
15 | % final_errors - final errors after adaptive process
16 | % single_errors - single errors after each adaptive step
17 | n_of_Variables = size(x,2);
18 |
19 |
20 | metamodel_ini = OK_model(af,x,y,opti_strategy);
21 |
22 | if strcmp(adaptive_method,'Initial_error')
23 | stored_metamodels{1} = metamodel_ini;
24 | single_errors = 0;
25 | else
26 | [stored_metamodels,single_errors] = adaptive_sampling_process(metamodel_ini,M,adaptive_method,A_sampling,max_iteration, opti_strategy, iteration_number);
27 | end
28 | data_errors = Error_Saver();
29 |
30 | lb = A_sampling(1,:);
31 | ub = A_sampling(2,:);
32 |
33 | no_test_points = 5000 * n_of_Variables;
34 | test_points = lhs_scaled(no_test_points,lb,ub);
35 |
36 | for i=1:no_test_points
37 | test_points_response(i) = M(test_points(i,:));
38 | end
39 |
40 | test_points = scale_vector_to_unity(lb, ub, test_points);
41 |
42 | %% Errors
43 | for i=1:no_test_points
44 | [metamodel_response(i),~] = stored_metamodels{end}.predict(test_points(i,:));
45 | end
46 |
47 | final_errors=data_errors.update(stored_metamodels{end}.m,test_points_response , metamodel_response);
48 |
49 |
50 |
51 | end
52 |
53 |
54 |
--------------------------------------------------------------------------------
/src/optimizationTools.m:
--------------------------------------------------------------------------------
1 | function x_opti = optimizationTools(fun,strategy,AA,b,Aeq,beq,lb,ub,nonlcon)
2 | % Details: Utility function that connects to different matlab optimization schemes
3 | %
4 | % inputs:
5 | % fun - Function handler for optimizable function
6 | % strategy - Optimization strategy
7 | % AA - Linear inequality matrix
8 | % b - Right hand side vector of linear inequality
9 | % Aeq - Linear equality matrix
10 | % beq - Linear equality right hand side
11 | % lb - Lower optimization bound
12 | % ub - Uppper optimization bound
13 | % nonlcon - Nonlinear constraint definition
14 | %
15 | % outputs:
16 | % x_opti - Optimized point
17 |
18 | addpath('help_functions')
19 |
20 | n = numel(lb);
21 |
22 | %% FMincon
23 | if strcmp(strategy,'fmincon')
24 | options = optimoptions('fmincon','Display','none');
25 | addpath('help_functions')
26 | x0 = scale_rand(1,lb,ub);
27 | x_opti = fmincon(fun,x0,AA,b,Aeq,beq,lb,ub,nonlcon,options);
28 |
29 | %% Particle Swarm Optimization
30 | elseif strcmp(strategy,'PSO')
31 | options = optimoptions('particleswarm','SwarmSize',1000*n,'Display','off');
32 | %options.HybridFcn = @fmincon;
33 | [x_opti] = particleswarm(fun,n,lb,ub,options);
34 |
35 | %% Genetic Algorithm
36 | elseif strcmp(strategy,'GA')
37 | options = optimoptions('ga','PopulationSize',1000*n, 'Display','off','ConstraintTolerance', 10^(-6));
38 | %options.HybridFcn = @fmincon;
39 | x_opti = ga(fun,n,AA,b,Aeq,beq,lb,ub,nonlcon,options);
40 |
41 | %% Patternsearch
42 | elseif strcmp(strategy,'patternsearch')
43 | options = optimoptions('patternsearch','Display','off','ConstraintTolerance', 10^(-8),'MeshTolerance', 10^(-12),'MaxIterations',1000*n);
44 | %options.HybridFcn = @fmincon;
45 |
46 | x_opti = patternsearch(fun,n,AA,b,Aeq,beq,lb,ub,nonlcon,options);
47 |
48 | %% Multistart algorithm
49 | elseif strcmp(strategy,'MS')
50 |
51 | for i=1:10
52 |
53 | x0(i,:) = scale_rand(1,lb,ub);
54 | opts = optimoptions(@fmincon,'Algorithm','sqp','Display','none');
55 |
56 |
57 | problem = createOptimProblem('fmincon','objective',...
58 | fun,'x0',x0(i,:),'Aineq',AA,'bineq',b,'Aeq',Aeq,'beq',beq,'lb',lb,'ub',ub,'nonlcon',nonlcon,'options',opts);
59 | ms = MultiStart('Display', 'off');
60 |
61 | [x_opti2(i,:),f(i,1)] = run(ms,problem,20);
62 | end
63 | [~,ind] = min(f);
64 | x_opti = x_opti2(ind,:);
65 |
66 |
67 | %% Simulated annealing
68 | elseif strcmp(strategy,'AN')
69 |
70 | options = optimoptions('simulannealbnd','Display','none','FunctionTolerance', 1e-08);
71 |
72 | for i=1:10
73 | x0(i,:) = scale_rand(1,lb,ub);
74 |
75 | [x_opti2(i,1), f(i,1), ~, ~]= simulannealbnd(fun,x0,lb,ub,options);
76 | end
77 | [~,ind] = min(f);
78 | x_opti = x_opti2(ind,:);
79 | end
80 | end
81 |
82 |
--------------------------------------------------------------------------------
/src/run_input_prompts.m:
--------------------------------------------------------------------------------
1 | function [ methodID, benchmarkId, numberSamples, numberRepetitions,Vis ] = run_input_prompts()
2 | % Details: Utility function that runs input prompt
3 | %
4 | % inputs:
5 | %
6 | % outputs:
7 | % methodID - ID of sampling strategy
8 | % benchmarkId - ID of benchmark test
9 | % numberSamples - Number of samples to add
10 | % numberRepetitions - Number of repetitions of the sampling procedure
11 | % Vis - Flag for visualization of output (only 1D and 2D)
12 |
13 | clc
14 | disp('Choose an adaptive technique.')
15 | prompt = '1. Smart Sampling Algorithm (SSA) \n2. Cross-Validation-Voronoi (CVVOR) \n3. ACcumulative Error (ACE) \n4. MC-intersite-proj-th (MIPT) \n5. LOLA-Voronoi (LOLA) \n6. Adaptive Maximum Entropy (AME) \n7. Maximizing Expected Prediction Error (MEPE) \n8. Mixed Adaptive Sampling Algorithm (MASA) \n9. Weighted Accumulative Error (WAE) \n10. Sampling with Lipschitz Criterion (LIP) \n11. Taylor expansion-based adaptive design (TEAD) \n12. Space-Filling Cross Validation Tradeoff (SFCVT) \n13. Expected improvement (EI) \n14. Expected improvement for global fit (EIGF) \n \nEnter number and press enter: ';
16 | methodID = input(prompt);
17 |
18 | clc
19 | disp('Choose a dimension for the benchmark.')
20 | prompt = '1. 1D \n2. 2D \n3. 3D \n4. 4D \n5. 5D \n6. 6D \n \nEnter number and press enter: ';
21 | benchDim = input(prompt);
22 | prompt = '\n \nEnter number and press enter: ';
23 |
24 | clc
25 | disp('We refer to the documentation /docs/Benchmark_Tests.pdf for references for all functions.')
26 | if benchDim==1
27 |
28 | disp('One dimensional benchmark cases are:')
29 | ST = {' P1 : Single Hump function',' P2 : Hump Function',' P3 : Gramacy & Lee Function',' P4 : Adjusted Gramacy & Lee Function', ' P5 : Perm 0,1 function', ' P6 : Damped Cosinus function', ' P7 : Exploitation function'};
30 | for i=1:numel(ST)
31 | StOut = [num2str(i),'. ', ST{i}];
32 | disp(StOut);
33 | end
34 |
35 |
36 | benchDim1 = input(prompt);
37 | benchmarkId = benchDim1;
38 |
39 | method = ST{benchDim1};
40 | elseif benchDim == 2
41 |
42 | disp('Two dimensional benchmark cases are:')
43 |
44 | ST = {' P8 : Michalewicz function', ' P9 : Drop-Wave function', ' P10: Booth function', ' P11: Bohachevsky function', ' P12: Branin function', ' P13: Franke function', ' P14: Rosenbrock function', ' P15: Six-Hump Camel function' , ' P16: Rastrigin function', 'P17: Griewank function'};
45 | for i=1:numel(ST)
46 | StOut = [num2str(i),'. ', ST{i}];
47 | disp(StOut);
48 | end
49 | benchDim2 = input(prompt);
50 | benchmarkId = benchDim2 + 7;
51 |
52 | method = ST{benchDim2};
53 | elseif benchDim == 3
54 |
55 | disp('Three dimensional benchmark cases are:')
56 |
57 | ST = {' P18: Sphere 3D function', ' P19: Hartman 3D function', ' P20: Ishigami function'};
58 | for i=1:numel(ST)
59 | StOut = [num2str(i),'. ', ST{i}];
60 | disp(StOut);
61 | end
62 | benchDim3 = input(prompt);
63 | benchmarkId = benchDim3 + 17;
64 |
65 | method = ST{benchDim3};
66 | elseif benchDim == 4
67 |
68 | disp('Four dimensional benchmark cases are:')
69 | ST = {' P21: Sphere 4D function',' P22: Dixon-Price function'};
70 | for i=1:numel(ST)
71 | StOut = [num2str(i),'. ', ST{i}];
72 | disp(StOut);
73 | end
74 |
75 | benchDim4 = input(prompt);
76 | benchmarkId = benchDim4 + 20;
77 |
78 | method = ST{benchDim4};
79 | elseif benchDim == 5
80 |
81 | disp('Five dimensional benchmark cases are:')
82 | ST = {' P23: Sphere 5D function'};
83 | for i=1:numel(ST)
84 | StOut = [num2str(i),'. ', ST{i}];
85 | disp(StOut);
86 | end
87 |
88 | benchDim5 = input(prompt);
89 | benchmarkId = benchDim5 + 22;
90 |
91 | method = ST{benchDim5};
92 | elseif benchDim == 6
93 |
94 | disp('Six dimensional benchmark cases are:')
95 |
96 | ST = {'Hartmann 6D function'};
97 | for i=1:numel(ST)
98 | StOut = [num2str(i),'. ', ST{i}];
99 | disp(StOut);
100 | end
101 | benchDim6 = input(prompt);
102 | benchmarkId = benchDim6 + 23;
103 |
104 | method = ST{benchDim6};
105 | end
106 |
107 | if benchmarkId<=17
108 | clc
109 | ST = ['Do you want to see a visualization of the sampling process (y/n)?'];
110 | disp(ST);
111 | prompt = '\n \nEnter yes or no (y/n): ';
112 | Vis = input(prompt,'s');
113 | else
114 | Vis = 'n';
115 | end
116 |
117 | [ ~, ~, ~ ,init_x, ~ ] = call_benchmarkFunctions( benchmarkId );
118 | numberInit = size(init_x,1);
119 | clc
120 | ST = ['How many samples do you want to add from an intial: ', num2str(numberInit)];
121 | disp(ST);
122 | prompt = '\n \nEnter number and press enter: ';
123 | numberSamples = input(prompt);
124 |
125 | clc
126 | disp('How many repitions of the sampling process would you like to run.');
127 | prompt = '\n \nEnter number and press enter: ';
128 | numberRepetitions = input(prompt);
129 | clc
130 |
131 | ST = ['Running benchmark test: ', method];
132 | disp(ST);
133 | end
134 |
135 |
--------------------------------------------------------------------------------