├── .gitignore ├── GP.py ├── MACE.py ├── README.md ├── circuit ├── cec2014 │ ├── .gitignore │ ├── CEC14_EOTP_C.cpp │ ├── CMakeLists.txt │ ├── cec14_eotp.cpp │ └── cec14_eotp.h ├── param ├── result.po └── run.pl ├── conf.toml ├── main.py └── obj.py /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # vim swap file 3 | *.swp 4 | work/ 5 | dbx 6 | dby 7 | ps* 8 | pf* 9 | log 10 | ref/ 11 | 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | MANIFEST 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | 70 | # Flask stuff: 71 | instance/ 72 | .webassets-cache 73 | 74 | # Scrapy stuff: 75 | .scrapy 76 | 77 | # Sphinx documentation 78 | docs/_build/ 79 | 80 | # PyBuilder 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # pyenv 87 | .python-version 88 | 89 | # celery beat schedule file 90 | celerybeat-schedule 91 | 92 | # SageMath parsed files 93 | *.sage.py 94 | 95 | # Environments 96 | .env 97 | .venv 98 | env/ 99 | venv/ 100 | ENV/ 101 | env.bak/ 102 | venv.bak/ 103 | 104 | # Spyder project settings 105 | .spyderproject 106 | .spyproject 107 | 108 | # Rope project settings 109 | .ropeproject 110 | 111 | # mkdocs documentation 112 | /site 113 | 114 | # mypy 115 | .mypy_cache/ 116 | -------------------------------------------------------------------------------- /GP.py: -------------------------------------------------------------------------------- 1 | import GPy 2 | from GPyOpt.util.general import get_quantiles 3 | import numpy as np 4 | from math import pow, log, sqrt 5 | import sys 6 | 7 | # TODO: standardize the training data 8 | class GP_MCMC: 9 | def __init__(self, train_x, train_y, B, num_init, warp = False, mcmc = True): 10 | 11 | self.mean = np.mean(train_y); 12 | self.std = np.std(train_y); 13 | 14 | self.train_x = train_x.copy() 15 | self.train_y = (train_y - self.mean) / self.std 16 | self.num_train = self.train_x.shape[0] 17 | self.dim = self.train_x.shape[1] 18 | self.B = B 19 | self.num_init = num_init 20 | self.warp = warp 21 | self.mcmc = mcmc 22 | 23 | kern = GPy.kern.Matern52(input_dim = self.dim, ARD = True) 24 | if self.warp: 25 | self.m = GPy.models.InputWarpedGP(self.train_x, self.train_y, kern) 26 | else: 27 | self.m = GPy.models.GPRegression(self.train_x, self.train_y, kern) 28 | 29 | # self.m.kern.variance.set_prior(GPy.priors.Gamma.from_EV(np.var(self.train_y), 120)) 30 | # self.m.likelihood.variance.set_prior(GPy.priors.Gamma.from_EV(1e-2 * np.var(self.train_y), 4)) 31 | # self.m.kern.lengthscale.set_prior(GPy.priors.Gamma.from_EV(np.std(self.train_x, 0), 1000 * np.ones(np.std(self.train_x, 0).shape))) 32 | 33 | self.m.kern.variance = np.var(self.train_y) 34 | self.m.kern.lengthscale = np.std(self.train_x, 0) 35 | self.m.likelihood.variance = np.maximum(2e-20, 1e-2 * np.var(self.train_y)) 36 | self.m.likelihood.variance.constrain_bounded(1e-20, 1e10) 37 | 38 | self.m.kern.variance.set_prior(GPy.priors.LogGaussian(0, 1)) 39 | self.m.likelihood.variance.set_prior(GPy.priors.Gamma.from_EV(0.02, 4)) 40 | self.m.kern.lengthscale.set_prior(GPy.priors.LogGaussian(0, 10)) 41 | 42 | self.eps = 1e-3; 43 | self.upsilon = 0.5; 44 | self.delta = 0.05 45 | self.tau = np.min(train_y) 46 | 47 | self.burnin = 200 48 | self.n_samples = 10 49 | self.subsample_interval = 10 50 | self.sample() 51 | 52 | def sample(self): 53 | self.m.optimize(max_iters=200, messages=False) 54 | if not self.mcmc: 55 | self.s = np.array(np.array(self.m[:])) 56 | self.s = self.s.reshape(1, self.s.size) 57 | self.ms = np.array([self.m]) 58 | else: 59 | hmc = GPy.inference.mcmc.HMC(self.m,stepsize=5e-2) 60 | s = hmc.sample(num_samples=self.burnin) # Burnin 61 | s = hmc.sample(num_samples=self.n_samples * self.subsample_interval) 62 | self.s = s[0::self.subsample_interval] 63 | self.ms = [] 64 | for i in range(self.s.shape[0]): 65 | samp_kern = GPy.kern.Matern52(input_dim = self.dim, ARD = True) 66 | if self.warp: 67 | samp_m = GPy.models.InputWarpedGP(self.train_x, self.train_y, samp_kern) 68 | else: 69 | samp_m = GPy.models.GPRegression(self.train_x, self.train_y, samp_kern) 70 | samp_m[:] = self.s[i] 71 | samp_m.parameters_changed() 72 | self.ms = np.append(self.ms, samp_m) 73 | 74 | 75 | def predict_sample(self, x, hyp_vec): 76 | self.m.kern.variance = hyp_vec[0] 77 | self.m.kern.lengthscale = hyp_vec[1:1+self.dim] 78 | self.m.likelihood.variance = hyp_vec[1+self.dim] 79 | py, ps2 = self.m.predict(x.reshape(1, x.size)) 80 | py = self.mean + (py * self.std) 81 | ps2 = ps2 * (self.std**2) 82 | return py, ps2; 83 | 84 | def set_kappa(self): 85 | num_train = self.num_train 86 | t = 1 + int((num_train - self.num_init) / self.B) 87 | self.kappa = sqrt(self.upsilon * 2 * log(pow(t, 2.0 + self.dim / 2.0) * 3 * pow(np.pi, 2) / (3 * self.delta))); 88 | 89 | def predict(self, x): 90 | num_samples = self.s.shape[0] 91 | pys = np.zeros((num_samples, 1)); 92 | pss = np.zeros((num_samples, 1)); 93 | for i in range(num_samples): 94 | m, v = self.ms[i].predict(x.reshape(1, x.size)) 95 | pys[i] = m[0][0] 96 | pss[i] = v[0][0] 97 | pys = self.mean + (pys * self.std) 98 | pss = pss * (self.std**2) 99 | return pys, np.sqrt(pss) 100 | 101 | def LCB(self, x, pys, pss): 102 | num_samples = pys.shape[0] 103 | self.set_kappa() 104 | acq = 0; 105 | for i in range(num_samples): 106 | y = pys[i] 107 | s = pss[i] 108 | lcb = y - self.kappa * s 109 | acq += lcb 110 | acq /= self.s.shape[0] 111 | return acq 112 | 113 | def EI(self, x, pys, pss): 114 | num_samples = pys.shape[0] 115 | acq = 0; 116 | for i in range(num_samples): 117 | y = pys[i] 118 | s = pss[i] 119 | phi, Phi, u = get_quantiles(self.eps, self.tau, y, s) 120 | f_acqu = s * (u * Phi + phi) 121 | acq += f_acqu 122 | acq /= self.s.shape[0] 123 | return acq 124 | 125 | def PI(self, x, pys, pss): 126 | num_samples = pys.shape[0] 127 | acq = 0; 128 | for i in range(num_samples): 129 | y = pys[i] 130 | s = pss[i] 131 | _, Phi, _ = get_quantiles(self.eps, self.tau, y, s) 132 | f_acqu = Phi 133 | acq += f_acqu 134 | acq /= self.s.shape[0] 135 | return acq 136 | 137 | def MACE_acq(self, x): 138 | pys, pss = self.predict(x); 139 | lcb = self.LCB(x, pys, pss) 140 | ei = self.EI(x, pys, pss) 141 | pi = self.PI(x, pys, pss) 142 | return lcb, ei, pi 143 | -------------------------------------------------------------------------------- /MACE.py: -------------------------------------------------------------------------------- 1 | from GP import GP_MCMC 2 | import numpy as np 3 | from platypus import NSGAII, MOEAD, Problem, Real, SPEA2, NSGAIII, Solution, InjectedPopulation, Archive 4 | from math import pow, log, sqrt 5 | from scipy.special import erfc 6 | from scipy.optimize import fmin_l_bfgs_b 7 | from sobol_seq import i4_sobol_generate 8 | import os, sys 9 | 10 | class MACE: 11 | def __init__(self, f, lb, ub, num_init, max_iter, B, debug=True, sobol_init=True, warp = False, mo_eval = 25000, mcmc = True): 12 | """ 13 | f: the objective function: 14 | input: D row vector 15 | output: scalar value 16 | lb: lower bound 17 | ub: upper bound 18 | num_init: number of initial random sampling 19 | max_iter: number of iterations 20 | B: batch size, the total number of function evaluations would be num_init + B * max_iter 21 | """ 22 | self.f = f 23 | self.lb = lb.reshape(lb.size) 24 | self.ub = ub.reshape(ub.size) 25 | self.dim = self.lb.size 26 | self.num_init = num_init 27 | self.max_iter = max_iter 28 | self.B = B 29 | self.debug = debug 30 | self.sobol_init = sobol_init 31 | self.warp = warp 32 | self.mo_eval = mo_eval 33 | self.mcmc = mcmc 34 | 35 | def init(self): 36 | self.dbx = np.zeros((self.num_init, self.dim)) 37 | if self.sobol_init: 38 | self.dbx = (self.ub - self.lb) * i4_sobol_generate(self.dim, self.num_init) + self.lb 39 | else: 40 | self.dbx = np.random.uniform(self.lb, self.ub, (self.num_init, self.dim)) 41 | 42 | self.dby = np.zeros((self.num_init, 1)) 43 | self.best_y = np.inf 44 | for i in range(self.num_init): 45 | y = self.f(self.dbx[i]) 46 | if y < self.best_y: 47 | self.best_y = y 48 | self.best_x = self.dbx[i] 49 | self.dby[i] = y; 50 | np.savetxt('dbx', self.dbx) 51 | np.savetxt('dby', self.dby) 52 | print('Initialized, best is %g' % self.best_y) 53 | 54 | def gen_guess(self): 55 | num_guess = 1 + len(self.model.ms) 56 | guess_x = np.zeros((num_guess, self.dim)) 57 | guess_x[0, :] = self.best_x 58 | 59 | def obj(x, m): 60 | m, _ = m.predict(x[None, :]) 61 | return m 62 | def gobj(x, m): 63 | dmdx, _ = m.predictive_gradients(x[None, :]) 64 | return dmdx 65 | 66 | bounds = [(self.lb[i], self.ub[i]) for i in range(self.dim)] 67 | for i in range(1, num_guess): 68 | m = self.model.ms[i-1] 69 | xx = self.best_x + np.random.randn(self.best_x.size).reshape(self.best_x.shape) * 1e-3 70 | def mobj(x): 71 | return obj(x, m) 72 | def gmobj(x): 73 | return gobj(x, m) 74 | x, _, _ = fmin_l_bfgs_b(mobj, xx, gmobj, bounds=bounds) 75 | guess_x[i, :] = np.array(x) 76 | return guess_x 77 | 78 | 79 | 80 | def optimize(self): 81 | os.system("rm -f pf* ps* opt.log") 82 | f = open('opt.log', 'w'); 83 | self.best_y = np.min(self.dby) 84 | for iter in range(self.max_iter): 85 | self.model = GP_MCMC(self.dbx, self.dby, self.B, self.num_init, warp = self.warp, mcmc = self.mcmc) 86 | print("GP built") 87 | print(self.model.m, flush=True) 88 | 89 | guess_x = self.gen_guess() 90 | num_guess = guess_x.shape[0] 91 | 92 | def obj(x): 93 | lcb, ei, pi = self.model.MACE_acq(np.array([x])) 94 | log_ei = np.log(1e-40 + ei) 95 | log_pi = np.log(1e-40 + pi) 96 | return [lcb[0], -1*log_ei[0], -1*log_pi[0]] 97 | 98 | problem = Problem(self.dim, 3) 99 | for i in range(self.dim): 100 | problem.types[i] = Real(self.lb[i], self.ub[i]) 101 | 102 | init_s = [Solution(problem) for i in range(num_guess)] 103 | for i in range(num_guess): 104 | init_s[i].variables = [x for x in guess_x[i, :]] 105 | 106 | problem.function = obj 107 | gen = InjectedPopulation(init_s) 108 | arch = Archive() 109 | algorithm = NSGAII(problem, population = 100, generator = gen, archive = arch) 110 | def cb(a): 111 | print(a.nfe, len(a.archive), flush=True) 112 | algorithm.run(self.mo_eval, callback=cb) 113 | 114 | if len(algorithm.result) > self.B: 115 | optimized = algorithm.result 116 | else: 117 | optimized = algorithm.population 118 | 119 | idxs = np.arange(len(optimized)) 120 | idxs = np.random.permutation(idxs) 121 | idxs = idxs[0:self.B] 122 | for i in idxs: 123 | x = np.array(optimized[i].variables) 124 | y = self.f(x) 125 | if y < self.best_y: 126 | self.best_y = y 127 | self.best_x = x 128 | self.dbx = np.concatenate((self.dbx, x.reshape(1, x.size)), axis=0) 129 | self.dby = np.concatenate((self.dby, y.reshape(1, 1)), axis=0) 130 | pf = np.array([s.objectives for s in optimized]) 131 | ps = np.array([s.variables for s in optimized]) 132 | self.pf = pf; 133 | self.ps = ps; 134 | pf[:, 1] = np.exp(-1 * pf[:, 1]) # from -1*log_ei to ei 135 | pf[:, 2] = np.exp(-1 * pf[:, 2]) # from -1*log_pi to pi 136 | np.savetxt('pf%d' % iter, pf) 137 | np.savetxt('ps%d' % iter, ps) 138 | np.savetxt('dbx', self.dbx) 139 | np.savetxt('dby', self.dby) 140 | 141 | if self.debug: 142 | f.write("After iter %d, evaluated: %d, best is %g\n" % (iter, self.dby.size, np.min(self.dby))) 143 | best_lcb, best_ei, best_pi = self.model.MACE_acq(self.best_x) 144 | f.write('Best x, LCB: %g, EI: %g, PI: %g\n' % (best_lcb[0], best_ei[0], best_pi[0])) 145 | f.write('Tau = %g, eps = %g, kappa = %g, ystd = %g, ymean = %g\n' % (self.model.tau, self.model.eps, self.model.kappa, self.model.std, self.model.mean)) 146 | f.write('Hypers:\n' + str(self.model.s) + '\n') 147 | evaled_x = self.dbx[-1*self.B:, :] 148 | evaled_y = self.dby[-1*self.B:] 149 | evaled_pf = self.pf[idxs] 150 | 151 | for i in range(self.B): 152 | predy, preds = self.model.predict(evaled_x[i, :]); 153 | predy = predy.reshape(predy.size); 154 | preds = preds.reshape(preds.size); 155 | pred = [(predy[ii], preds[ii]) for ii in range(predy.size)] 156 | f.write('X: ') 157 | for d in range(self.dim): 158 | f.write(' ' + str(evaled_x[i, d]) + ' '); 159 | f.write('\n'); 160 | f.write('Y: ' + str(evaled_y[i, 0]) + '\n'); 161 | f.write('ACQ: ' + str(evaled_pf[i, :]) + '\n'); 162 | f.write('Pred:\n' + str(np.array(pred)) + '\n'); 163 | f.write('---------------\n') 164 | f.flush() 165 | f.close() 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | ## About 4 | 5 | The origin MACE algorithm, re-implemented in python 6 | 7 | - MCMC hyperparameter integration 8 | - Matern52 kernel 9 | 10 | ## Dependencies 11 | 12 | - [GPy](https://github.com/SheffieldML/GPy) for the GP regression 13 | - [toml](https://github.com/uiri/toml) for config file 14 | - [Platypus](https://github.com/Project-Platypus/Platypus) for multi-objective optimization 15 | 16 | ## TODO 17 | 18 | - Improve the interface 19 | - Combine MACE with other parallel strategy like kriging believer or local penalization 20 | -------------------------------------------------------------------------------- /circuit/cec2014/.gitignore: -------------------------------------------------------------------------------- 1 | # ignore file for CEC14 testbench 2 | CMakeFiles/ 3 | cec2014expensive 4 | cmake_install.cmake 5 | CMakeCache.txt 6 | param 7 | result.po 8 | *.swp 9 | -------------------------------------------------------------------------------- /circuit/cec2014/CEC14_EOTP_C.cpp: -------------------------------------------------------------------------------- 1 | // CEC14_EOTP_C.cpp : Defines the entry point for the console application. 2 | #include "cec14_eotp.h" 3 | #include 4 | #include // std::setprecision 5 | #include 6 | #include 7 | #include 8 | int test(); 9 | using namespace std; 10 | double* read_config(size_t dim); 11 | int main(int arg_num, char* args[]) 12 | { 13 | if(arg_num < 2) 14 | { 15 | cerr << "Need problem_no!" << endl; 16 | } 17 | bool is_good; 18 | size_t problem_no = atoi(args[1]); 19 | double* x = read_config(10); 20 | double ret = cec14_eotp_problems(x, 10, problem_no, &is_good); 21 | delete[] x; 22 | if(not is_good) 23 | { 24 | cerr << "Not good!" << endl; 25 | for(size_t i = 0; i < 10; ++i) 26 | cerr << x[i] << endl; 27 | cerr << "Ret: " << ret << endl; 28 | exit(EXIT_FAILURE); 29 | } 30 | else 31 | { 32 | cout << setprecision(16) << ret; 33 | } 34 | // double *x; 35 | // int dim, problem_no; 36 | // double ret; 37 | // bool isGood; 38 | 39 | // // test(); 40 | // // Read std::cin 41 | // std::cin >> dim >> problem_no; 42 | // x = (double *)malloc(dim*sizeof(double)); 43 | // for (int i = 0; i < dim; i++) 44 | // std::cin >> std::setprecision(16)>> x[i]; 45 | 46 | // // calculated the problem 47 | // ret = cec14_eotp_problems(x, dim, problem_no, &isGood); 48 | // // output to std::cout 49 | // if (isGood) 50 | // std::cout << std::setprecision(16) << ret; 51 | // else 52 | // std::cout << "NaN"; 53 | 54 | // free(x); 55 | return EXIT_SUCCESS; 56 | } 57 | 58 | double* read_config(size_t dim) 59 | { 60 | ifstream ifile; 61 | ifile.open("./param"); 62 | if((!ifile.is_open()) || ifile.fail()) 63 | { 64 | cerr << "param file can not open" << endl; 65 | exit(EXIT_FAILURE); 66 | } 67 | 68 | double param; 69 | vector tmp_params; 70 | while(ifile >> setprecision(16) >> param) 71 | { 72 | tmp_params.push_back(param); 73 | } 74 | if(tmp_params.size() != dim) 75 | { 76 | cerr << "Invalid dimension" << endl; 77 | exit(EXIT_FAILURE); 78 | } 79 | 80 | double* xs = new double[dim]; 81 | for(size_t i = 0; i < dim; ++i) 82 | xs[i] = tmp_params[i]; 83 | return xs; 84 | } 85 | 86 | 87 | // int test() 88 | // { 89 | 90 | // double x10[10]; 91 | // double x20[20]; 92 | // double x30[30]; 93 | // double *x; 94 | // double ret; 95 | // int problem_count; 96 | // bool isGood; 97 | 98 | // int function_count, dim_count; 99 | 100 | 101 | // memset(x10, 1, 10 * sizeof(double)); 102 | // memset(x20, 1, 20 * sizeof(double)); 103 | // memset(x30, 1, 30 * sizeof(double)); 104 | 105 | 106 | // for (function_count = 1; function_count <= 8; function_count++) 107 | // { 108 | // for (dim_count = 1; dim_count <= 3; dim_count++) 109 | // { 110 | // if (dim_count == 1) 111 | // x = x10; 112 | // if (dim_count == 2) 113 | // x = x20; 114 | // if (dim_count == 3) 115 | // x = x30; 116 | 117 | // problem_count = 3 * (function_count - 1) + dim_count; 118 | 119 | // ret = cec14_eotp_problems(x, dim_count*10, problem_count,&isGood); 120 | 121 | // if (!isGood) 122 | // printf("Error from cec14_eotp_problems.\n"); 123 | // else 124 | // printf("Problem f%d(0) = %f \n", problem_count, ret); 125 | // } 126 | // } 127 | // return 0; 128 | // } 129 | -------------------------------------------------------------------------------- /circuit/cec2014/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.2.1) 2 | project(cec2014expensive) 3 | 4 | set(SRC cec14_eotp.cpp CEC14_EOTP_C.cpp) 5 | set(EXE cec2014expensive) 6 | add_executable(${EXE} ${SRC}) 7 | -------------------------------------------------------------------------------- /circuit/cec2014/cec14_eotp.h: -------------------------------------------------------------------------------- 1 | /* 2 | %CEC14_EOTP_PROBLEMS Implement CEC14 expensive optimization test problems. 3 | % F = CEC14_EOTP_PROBLEMS(X, DIM, NO) with X as the design 4 | % points and NO as the problem' number (in [1,2,...,24]). F 5 | % should be the evaluation of F_no(x). X is a vector coresponds to 6 | % one design point with a dimension dim. 7 | % 8 | % Example 9 | % f=cec14_eotp_problems({0,0,0,0,0,0,0,0,0,0},10, 1); 10 | % calculate the 1st problem at (0;0;0;0;0;0;0;0;0;0) 11 | % 12 | % This function will verify the dims is consistent with the definitions. 13 | % 14 | % Details are to be found in B. Liu, Q. Chen and Q. Zhang, J. J. Liang, 15 | % P. N. Suganthan, B. Y. Qu, "Problem Definitions and Evaluation Criteria 16 | % for Computationally Expensive Single Objective Numerical Optimization", 17 | % Technical Report. 18 | % 19 | % If you have any question, please contact Qin Chen (cheqin1980@gmail.com) 20 | % 21 | % $Autor : Qin Chen$ 22 | % $E-mail : cheqin1980@gmail.com$ 23 | % $Revision: 1.0 $ $Date: 2013/12/21 20:14:00 $ 24 | 25 | 26 | 27 | 28 | Function name Function No. Problem No. Dimension 29 | 'Sphere function', 1 [1, 2, 3] 10,20,30 30 | 'Ellipsoid function', 2 [4, 5, 6] 10,20,30 31 | 'Rotated Ellipsoid function', 3 [7, 8, 9] 10,20,30 32 | 'Step function', 4 [10,11,12] 10,20,30 33 | 'Ackley function', 5 [13,14,15] 10,20,30 34 | 'Griewank function', 6 [16,17,18] 10,20,30 35 | 'Rosenbrock function', 7 [19,20,21] 10,20,30 36 | 'Rastrigin function', 8 [22,23,24] 10,20,30 37 | */ 38 | 39 | 40 | double cec14_eotp_problems(const double * x, int dim, int no, bool * isGood); -------------------------------------------------------------------------------- /circuit/param: -------------------------------------------------------------------------------- 1 | .param x10 = -2.18751362851159925 2 | .param x8 = -8.34606196653960097 3 | .param x9 = 1.73013643850205057 4 | .param x2 = -0.818033748491693302 5 | .param x3 = 3.81347557909288071 6 | .param x1 = 5.76859062106683496 7 | .param x6 = 2.69002913975016611 8 | .param x7 = -9.57651042005538145 9 | .param x4 = -8.01816851675678066 10 | .param x5 = -3.7100654030737914 11 | -------------------------------------------------------------------------------- /circuit/result.po: -------------------------------------------------------------------------------- 1 | 3.984584125673036 2 | -------------------------------------------------------------------------------- /circuit/run.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | use strict; 3 | use warnings; 4 | use 5.010; 5 | 6 | my @params; 7 | my $prob = 7; 8 | $prob = 3 * ($prob - 1) + 1; 9 | 10 | open my $param_f, "<", "./param" or die "Can't open param:$!\n"; 11 | while(my $line = <$param_f>) 12 | { 13 | chomp($line); 14 | if($line =~ /^\.param.*\=\s*(.*)/) 15 | { 16 | push @params, $1; 17 | } 18 | else 19 | { 20 | say "Invalid line in param:$line"; 21 | } 22 | } 23 | close $param_f; 24 | my $dim = scalar @params; 25 | 26 | 27 | open my $cec_param, ">", "./cec2014/param" or die "Can't create cec2014/param:$!\n"; 28 | say $cec_param $_ for(@params); 29 | close $cec_param; 30 | 31 | run_cmd("cd cec2014 && cec2014expensive $prob > result.po"); 32 | open my $fh, "<", "./cec2014/result.po"; 33 | chomp(my $fom = <$fh>); 34 | close $fh; 35 | 36 | open my $ofh, ">", "result.po"; 37 | say $ofh $fom; 38 | close $ofh; 39 | # run_cmd("cp ./cec2014/result.po ./"); 40 | 41 | sub run_cmd 42 | { 43 | my $cmd = shift; 44 | my $ret = system($cmd); 45 | if($ret != 0) 46 | { 47 | die "Fail to run cmd: $cmd\n"; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /conf.toml: -------------------------------------------------------------------------------- 1 | bounds = [ 2 | [-20, 20], 3 | [-20, 20], 4 | [-20, 20], 5 | [-20, 20], 6 | [-20, 20], 7 | [-20, 20], 8 | [-20, 20], 9 | [-20, 20], 10 | [-20, 20], 11 | [-20, 20] 12 | ] 13 | var_name = ["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10"] 14 | num_init = 4 15 | batch_size = 4 16 | max_iter = 100 17 | use_sobol = 1 18 | warp = 0 19 | mo_eval = 10000 20 | mcmc = 0 21 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import GPy 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from GP import GP_MCMC 5 | from MACE import MACE 6 | import obj 7 | import toml 8 | import multiprocessing 9 | import sys 10 | import os 11 | np.set_printoptions(precision=6, linewidth=500) 12 | 13 | # argv = sys.argv[1:] 14 | conf = toml.load("conf.toml") 15 | 16 | # TODO: default values for conf 17 | batch_size = conf["batch_size"] 18 | bounds = np.array(conf["bounds"]) 19 | max_iter = conf["max_iter"] 20 | num_init = conf["num_init"] 21 | var_name = conf["var_name"] 22 | use_sobol = conf["use_sobol"] 23 | warp = conf["warp"] 24 | mo_eval = conf["mo_eval"] 25 | mcmc = conf["mcmc"] 26 | 27 | os.system("rm -rf work") 28 | os.system("mkdir work") 29 | for i in range(batch_size): 30 | copy_cmd = "cp -r ./circuit work/%d" % i 31 | os.system(copy_cmd) 32 | 33 | 34 | obj_f = obj.Obj(bounds, num_init, var_name) 35 | def f(x): 36 | return obj_f.evaluate(0, x)[0] 37 | 38 | 39 | dim = len(bounds) 40 | lb = np.zeros(dim) 41 | ub = np.zeros(dim) 42 | for i in range(dim): 43 | lb[i] = bounds[i][0] 44 | ub[i] = bounds[i][1] 45 | 46 | print(f(lb)) 47 | print(f(ub)) 48 | 49 | 50 | optimizer = MACE(f, lb, ub, num_init, max_iter, batch_size, sobol_init = use_sobol, warp = warp, mo_eval = mo_eval, mcmc = mcmc); 51 | optimizer.init() 52 | optimizer.optimize() 53 | 54 | -------------------------------------------------------------------------------- /obj.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import numpy as np 3 | import os 4 | 5 | class Obj(object): 6 | def __init__(self, bounds, num_init, var_name): 7 | self._dim = len(bounds) 8 | self._search_domain = bounds 9 | self._num_init_pts = num_init 10 | self._sample_var = 0.0 11 | self._observations = [] 12 | self._num_fidelity = 0 13 | self.curr_best_y = np.inf 14 | self.curr_best_x = [] 15 | self.var_name = var_name 16 | 17 | def evaluate_true(self, id, x): 18 | work_dir = "work/%d" % id 19 | param_file = "%s/param" % work_dir 20 | f = open(param_file, 'w') 21 | for i in range(len(x)): 22 | name = self.var_name[i] 23 | val = x[i] 24 | f.write(".param %s = %.18g\n" % (name, val)) 25 | f.close() 26 | run_cmd = "cd %s; perl run.pl" % work_dir 27 | os.system(run_cmd) 28 | fom = np.loadtxt("%s/result.po" % work_dir) 29 | if(fom < self.curr_best_y): 30 | self.curr_best_y = fom 31 | self.curr_best_x = x 32 | return np.array([fom]) 33 | 34 | def evaluate(self, id, x): 35 | return self.evaluate_true(id, x) 36 | --------------------------------------------------------------------------------