├── .travis.yml ├── eoa.py ├── prob ├── problem.py └── problems.py ├── eoa_test.py ├── opti ├── optimizer.py ├── de.py ├── cmaes_large.py ├── cmaes_maes.py ├── cmaes.py ├── cmaes_origin.py └── cmaes_bipop.py ├── LICENSE └── README.md /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "3.6" 5 | 6 | before_install: 7 | - pip install codecov 8 | - pip install pytest==4.3.0 9 | - pip install pytest-cov 10 | 11 | # command to install dependencies 12 | install: true 13 | 14 | # command to run tests 15 | script: 16 | - pytest --cov=./ 17 | 18 | after_success: 19 | - codecov 20 | -------------------------------------------------------------------------------- /eoa.py: -------------------------------------------------------------------------------- 1 | from prob.problems import * 2 | from opti.de import DE 3 | 4 | from opti.cmaes import CMAES 5 | from opti.cmaes_origin import CMAESO 6 | from opti.cmaes_maes import CMAESM 7 | from opti.cmaes_large import CMAESL 8 | 9 | # beta 10 | from opti.cmaes_bipop import CMAESB 11 | 12 | if __name__ == "__main__": 13 | TaskProb = Sphere(50, -50, 50) 14 | Task = DE(TaskProb, 1000) 15 | Task.run() 16 | -------------------------------------------------------------------------------- /prob/problem.py: -------------------------------------------------------------------------------- 1 | class Problem(object): 2 | def __init__(self, dim, lb, up, *arg1, **arg2): 3 | """ 4 | initialize attributes of the problem 5 | :argument 6 | D : Dimension 7 | lb : lower bound 8 | ub : upper bound 9 | """ 10 | self.D = dim 11 | self.lb = lb 12 | self.ub = up 13 | 14 | def evaluate(self, x): 15 | """ 16 | define the evaluate methods 17 | :argument 18 | x : input 19 | """ 20 | pass 21 | 22 | -------------------------------------------------------------------------------- /eoa_test.py: -------------------------------------------------------------------------------- 1 | from prob.problems import * 2 | 3 | from opti.de import DE 4 | from opti.cmaes import CMAES 5 | from opti.cmaes_maes import CMAESM 6 | from opti.cmaes_large import CMAESL 7 | 8 | 9 | class TestClass(object): 10 | def test_1(self): 11 | task_p = Sphere(50, -50, 50) 12 | task = DE(task_p, 1000) 13 | task.run() 14 | assert task.opti_f < 1 15 | 16 | def test_2(self): 17 | task_p = Sphere(50, -50, 50) 18 | task = CMAES(task_p, 1000) 19 | task.run() 20 | assert task.opti_f < 1 21 | 22 | def test_3(self): 23 | task_p = Sphere(50, -50, 50) 24 | task = CMAESM(task_p, 1000) 25 | task.run() 26 | assert task.opti_f < 1 27 | 28 | def test_4(self): 29 | task_p = Sphere(50, -50, 50) 30 | task = CMAESL(task_p, 1000) 31 | task.run() 32 | assert task.opti_f < 1 33 | -------------------------------------------------------------------------------- /opti/optimizer.py: -------------------------------------------------------------------------------- 1 | class Optimizer(object): 2 | def __init__(self, func, maxgen, *arg1, **arg2): 3 | """ 4 | initialize attributes of the instance 5 | :argument 6 | self.f : problems to be solved 7 | self.count : max generation number 8 | self.opti_x : optimal solution 9 | self.opti_f : optimal value 10 | """ 11 | self.f = func 12 | self.maxgen = maxgen 13 | self.opti_x = [] 14 | self.opti_f = 1e10 15 | 16 | def step(self): 17 | """ 18 | a single step of the evolution process 19 | """ 20 | pass 21 | 22 | def run(self): 23 | """ 24 | control the generation number 25 | """ 26 | pass 27 | 28 | def output(self): 29 | """ 30 | return the solution and optimal value 31 | return: 32 | self.opti_x, self.opti_f 33 | """ 34 | pass 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 LDNN97 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /opti/de.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .optimizer import * 3 | 4 | 5 | class DE(Optimizer): 6 | def __init__(self, func, count, popsize=50): 7 | super().__init__(func, count) 8 | self.popsize = popsize 9 | self.pop = np.random.random((self.popsize, self.f.D)) 10 | self.fit = np.zeros(self.popsize) 11 | for i in range(self.popsize): 12 | self.fit[i] = self.f.evaluate(self.pop[i]) 13 | 14 | def step(self): 15 | newpop = np.zeros((self.popsize, self.f.D)) 16 | newfit = np.zeros(self.popsize) 17 | 18 | cr, f = np.random.random(), np.random.random() 19 | for i in range(self.popsize): 20 | ind = np.random.choice(np.arange(self.popsize), 3, replace=False) 21 | j = np.random.randint(self.f.D) 22 | 23 | trial = np.zeros(self.f.D) 24 | for k in range(self.f.D): 25 | if (np.random.random() < cr) or (k == self.f.D): 26 | trial[j] = self.pop[ind[0]][j] + f * (self.pop[ind[1]][j] - self.pop[ind[2]][j]) 27 | # if trial[j] > 1: 28 | # trial[j] = self.pop[i][j] + np.random.random() * (1 - self.pop[i][j]) 29 | # if trial[j] < 0: 30 | # trial[j] = np.random.random() * self.pop[i][j] 31 | else: 32 | trial[j] = self.pop[i][j] 33 | j = (j + 1) % self.f.D 34 | 35 | trial_value = self.f.evaluate(trial) 36 | newpop[i] = trial if trial_value <= self.fit[i] else self.pop[i] 37 | newfit[i] = trial_value if trial_value <= self.fit[i] else self.fit[i] 38 | 39 | [self.opti_f, self.opti_x] = [newfit[i], newpop[i]] if newfit[i] <= self.opti_f \ 40 | else [self.opti_f, self.opti_x] 41 | 42 | self.pop = newpop 43 | self.fit = newfit 44 | 45 | def run(self): 46 | for i in range(self.maxgen): 47 | self.step() 48 | print(i, self.opti_f) 49 | 50 | def output(self): 51 | return self.opti_f, self.opti_x 52 | -------------------------------------------------------------------------------- /opti/cmaes_large.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .optimizer import * 3 | 4 | 5 | class CMAESL(Optimizer): 6 | def __init__(self, func, maxgen): 7 | super().__init__(func, maxgen) 8 | 9 | self.nn = self.f.D 10 | self.sigma = 0.2 11 | self.xmean = np.random.random(self.nn) 12 | 13 | self.lam = 4 + int(3 * np.log(self.nn)) 14 | self.mm = 4 + int(3 * np.log(self.nn)) 15 | self.mu = int(self.lam / 2) 16 | self.weights = np.array([np.log(self.mu + 0.5) - np.log(i + 1) for i in range(self.mu)]) 17 | self.weights = np.array([w / np.sum(self.weights) for w in self.weights]) 18 | self.mueff = 1 / np.sum(np.power(w, 2) for w in self.weights) 19 | 20 | self.cs = 2 * self.lam / self.nn 21 | self.c_d = np.array([1/(np.power(1.5, i) * self.nn) for i in range(self.mm)]) 22 | self.c_c = np.array([self.lam/(np.power(4, i) * self.nn) for i in range(self.mm)]) 23 | 24 | self.gen = 0 25 | self.ps = np.zeros(self.nn) 26 | self.m = np.zeros((self.mm, self.nn)) 27 | 28 | def step(self): 29 | # sample 30 | pop = np.zeros((self.mm, self.nn)) 31 | z = np.zeros((self.mm, self.nn)) 32 | d = np.zeros((self.mm, self.nn)) 33 | 34 | fitvals = np.zeros(self.mm) 35 | for i in range(self.mm): 36 | z[i] = np.random.normal(0, 1, self.nn) 37 | d[i] = z[i] 38 | for j in range(min(self.gen, self.mm)): 39 | d[i] = (1 - self.c_d[j]) * d[i] + \ 40 | self.c_d[j] * self.m[j] * (np.dot(self.m[j].T, d[i])) 41 | pop[i] = self.xmean + self.sigma * d[i] 42 | # boundary check 43 | 44 | # select 45 | for i in range(self.mm): 46 | fitvals[i] = self.f.evaluate(pop[i]) 47 | argx = np.argsort(fitvals) 48 | if fitvals[argx[0]] < self.opti_f: 49 | self.opti_x = pop[argx[0]] 50 | self.opti_f = fitvals[argx[0]] 51 | 52 | # update 53 | # mean 54 | self.xmean = self.xmean + self.sigma * np.sum(self.weights[i] * d[argx[i]] for i in range(self.mu)) 55 | 56 | # evolution path 57 | zz = np.sum(self.weights[i] * z[argx[i]] for i in range(self.mu)) 58 | c = np.sqrt(self.cs * (2 - self.cs) * self.mueff) 59 | self.ps = (1 - self.cs) * self.ps + c * zz 60 | 61 | # covariance matrix 62 | for i in range(self.mm): 63 | c = np.sqrt(self.mueff * self.c_c[i] * (2 - self.c_c[i])) 64 | self.m[i] = (1 - self.c_c[i]) * self.m[i] + c * zz 65 | 66 | # step-size 67 | self.sigma *= np.exp((self.cs / 2) * (np.sum(np.power(x, 2) for x in self.ps) / self.nn - 1)) 68 | 69 | self.gen += 1 70 | 71 | def run(self): 72 | for i in range(self.maxgen): 73 | self.step() 74 | print(i, self.opti_f) 75 | 76 | def output(self): 77 | return self.opti_f, self.opti_x 78 | -------------------------------------------------------------------------------- /opti/cmaes_maes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .optimizer import * 3 | 4 | 5 | class CMAESM(Optimizer): 6 | def __init__(self, func, count): 7 | super().__init__(func, count) 8 | self.nn = self.f.D 9 | 10 | self.xx = np.random.random(self.nn) 11 | self.xmean = np.copy(self.xx) 12 | self.sigma = 1 13 | 14 | self.lam = 4 + int(3 * np.log(self.nn)) 15 | self.mu = int(self.lam / 2) 16 | self.weights = np.array([np.log(self.mu + 0.5) - np.log(i + 1) for i in range(self.mu)]) 17 | self.weights = np.array([w / np.sum(self.weights) for w in self.weights]) 18 | self.mueff = 1 / np.sum(np.power(w, 2) for w in self.weights) 19 | 20 | self.cs = (self.mueff + 2) / (self.nn + self.mueff + 5) 21 | self.c1 = 2 / ((self.nn + 1.3) ** 2 + self.mueff) 22 | self.cmu = min([1 - self.c1, 2 * (self.mueff - 2 + 1 / self.mueff) / ((self.nn + 2) ** 2 + self.mueff)]) 23 | 24 | self.ps = np.zeros(self.nn) 25 | self.M = np.eye(self.nn) 26 | 27 | def step(self): 28 | # Sample 29 | pop = np.zeros((self.lam, self.nn)) 30 | z = np.zeros((self.lam, self.nn)) 31 | d = np.zeros((self.lam, self.nn)) 32 | for i in range(self.lam): 33 | z[i] = np.random.normal(0, 1, self.nn) 34 | d[i] = np.dot(self.M, z[i]) 35 | pop[i] = self.xmean + self.sigma * d[i] 36 | 37 | # sort and update mean 38 | fitvals = np.zeros(self.lam) 39 | for i in range(self.lam): 40 | fitvals[i] = self.f.evaluate(pop[i]) 41 | argx = np.argsort(fitvals) 42 | if fitvals[argx[0]] < self.opti_f: 43 | self.opti_x = pop[argx[0]] 44 | self.opti_f = fitvals[argx[0]] 45 | self.xmean = self.xmean + self.sigma * np.sum(self.weights[i] * d[argx[i]] for i in range(self.mu)) 46 | 47 | # update evolution path 48 | zz = np.sum(self.weights[i] * z[argx[i]] for i in range(self.mu)) 49 | c = np.sqrt(self.cs * (2 - self.cs) * self.mueff) 50 | self.ps -= self.cs * self.ps 51 | self.ps += c * zz 52 | 53 | # update matrix 54 | one = np.eye(self.nn, self.nn) 55 | part1 = one 56 | part2o = self.ps.reshape((self.nn, 1)) 57 | part2t = self.ps.reshape((1, self.nn)) 58 | part2 = self.c1 / 2 * (np.dot(part2o, part2t) - one) 59 | part3 = np.zeros((self.nn, self.nn)) 60 | for i in range(self.mu): 61 | part3o = z[argx[i]].reshape((self.nn, 1)) 62 | part3t = z[argx[i]].reshape((1, self.nn)) 63 | part3 += self.weights[i] * np.dot(part3o, part3t) 64 | part3 = self.cmu / 2 * (part3 - one) 65 | self.M = np.dot(self.M, part1 + part2 + part3) 66 | 67 | # update step-size 68 | self.sigma *= np.exp((self.cs / 2) * (np.sum(np.power(x, 2) for x in self.ps) / self.nn - 1)) 69 | 70 | def run(self): 71 | for i in range(self.maxgen): 72 | self.step() 73 | print(i, self.opti_f) 74 | 75 | def output(self): 76 | return self.opti_f, self.opti_x 77 | -------------------------------------------------------------------------------- /prob/problems.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .problem import Problem 3 | 4 | 5 | class Sphere(Problem): 6 | 7 | def __init__(self, dim, lb, ub): 8 | super().__init__(dim, lb, ub) 9 | 10 | def evaluate(self, x): 11 | x = self.lb + x * (self.ub - self.lb) 12 | re = sum(np.power(x[i], 2) for i in range(self.D)) 13 | return re 14 | 15 | 16 | class Rosenbrock(Problem): 17 | 18 | def __init__(self, dim, lb, ub): 19 | super().__init__(dim, lb, ub) 20 | 21 | def evaluate(self, x): 22 | x = self.lb + x * (self.ub - self.lb) 23 | re = 0 24 | for i in range(self.D - 1): 25 | re += 100 * np.power((np.power(x[i], 2) - x[i + 1]), 2) + np.power((x[i] - 1), 2) 26 | return re 27 | 28 | 29 | class Ackley(Problem): 30 | 31 | def __init__(self, dim, lb, ub): 32 | super().__init__(dim, lb, ub) 33 | 34 | def evaluate(self, x): 35 | x = self.lb + x * (self.ub - self.lb) 36 | 37 | # shift operation 38 | # x = x - 42.0969 39 | 40 | part1 = 0 41 | for i in range(self.D): 42 | part1 += np.power(x[i], 2) 43 | part2 = 0 44 | for i in range(self.D): 45 | part2 += np.cos(2 * np.pi * x[i]) 46 | re = -20 * np.exp(-0.2 * np.sqrt(part1 / self.D)) \ 47 | - np.exp(part2 / self.D) + 20 + np.e 48 | return re 49 | 50 | 51 | class Rastrgin(Problem): 52 | 53 | def __init__(self, dim, lb, ub): 54 | super().__init__(dim, lb, ub) 55 | 56 | def evaluate(self, x): 57 | x = self.lb + x * (self.ub - self.lb) 58 | re = 0 59 | for i in range(self.D): 60 | re += x[i] ** 2 - 10 * np.cos(2 * np.pi * x[i]) + 10 61 | return re 62 | 63 | 64 | class Griewank(Problem): 65 | 66 | def __init__(self, dim, lb, ub): 67 | super().__init__(dim, lb, ub) 68 | 69 | def evaluate(self, x): 70 | x = self.lb + x * (self.ub - self.lb) 71 | part1, part2 = 0, 1 72 | for i in range(self.D): 73 | part1 += x[i] ** 2 74 | part2 *= np.cos(x[i] / np.sqrt(i + 1)) 75 | re = 1 + part1 / 4000 - part2 76 | return re 77 | 78 | 79 | class Weierstrass(Problem): 80 | 81 | def __init__(self, dim, lb, ub): 82 | super().__init__(dim, lb, ub) 83 | 84 | def evaluate(self, x): 85 | x = self.lb + x * (self.ub - self.lb) 86 | part1 = 0 87 | for i in range(self.D): 88 | for j in range(21): 89 | part1 += np.power(0.5, j) * np.cos(2 * np.pi * np.power(3, j) * (x[i] + 0.5)) 90 | part2 = 0 91 | for i in range(21): 92 | part2 += np.power(0.5, i) * np.cos(2 * np.pi * np.power(3, i) * 0.5) 93 | re = part1 - self.D * part2 94 | return re 95 | 96 | 97 | class Schwefel(Problem): 98 | 99 | def __init__(self, dim, lb, ub): 100 | super().__init__(dim, lb, ub) 101 | 102 | def evaluate(self, x): 103 | x = self.lb + x * (self.ub - self.lb) 104 | part1 = 0 105 | for i in range(self.D): 106 | part1 += x[i] * np.sin(np.sqrt(np.abs(x[i]))) 107 | re = 418.9829 * self.D - part1 108 | return re 109 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Evolutionary Optimization Algorithms 2 | 3 | [![Build Status](https://travis-ci.com/LDNN97/Evolutionary-Optimization-Algorithms.svg?branch=master)](https://travis-ci.com/LDNN97/Evolutionary-Optimization-Algorithms) [![codecov](https://codecov.io/gh/LDNN97/Evolutionary-Optimization-Algorithms/branch/master/graph/badge.svg)](https://codecov.io/gh/LDNN97/Evolutionary-Optimization-Algorithms) 4 | 5 | Papers: 6 | 7 | DE: [Differential evolution–a simple and efficient heuristic for global optimization over continuous spaces](https://link.springer.com/article/10.1023/A:1008202821328) 8 | 9 | CMA-ES: [The CMA Evolution Strategy: A Tutorial](https://arxiv.org/pdf/1604.00772) 10 | 11 | Restart CMA-ES: [A Restart CMA Evolution Strategy With Increasing Population Size](https://ieeexplore.ieee.org/abstract/document/1554902/) 12 | 13 | MA-ES: [Simplify your covariance matrix adaptation evolution strategy](https://ieeexplore.ieee.org/abstract/document/7875115/) 14 | 15 | LM-CMA: [LM-CMA: An alternative to L-BFGS for large-scale black box optimization](https://www.mitpressjournals.org/doi/abs/10.1162/EVCO_a_00168) 16 | 17 | LM-MA: [Large Scale Black-box Optimization by Limited-Memory Matrix Adaptation](https://ieeexplore.ieee.org/abstract/document/8410043/) 18 | 19 | ES for RL: [Evolution Strategies as A Scalable Alternative to Reinforcement Learning](https://arxiv.org/abs/1703.03864) 20 | 21 | ## Usage 22 | 23 | 1. clone the repository 24 | 25 | 2. `import eoa` 26 | 27 | 3. select a problem already existed or define your only problem 28 | 29 | 4. select an algorithm to find the optimum. 30 | 31 | You can look into `eoa.py` to find more information about the usage. 32 | 33 | ## Example 34 | 35 | ``` python 36 | TaskProb = Sphere(50, -50, 50) 37 | Task = DE(TaskProb, 1000) 38 | Task.run() 39 | ``` 40 | 41 | ### Class for optimizer 42 | 43 | ``` python 44 | class Optimizer(object): 45 | def __init__(self, func, maxgen, *arg1, **arg2): 46 | """ 47 | initialize attributes of the instance 48 | :argument 49 | self.f : problems to be solved 50 | self.count : max generation number 51 | self.opti_x : optimal solution 52 | self.opti_f : optimal value 53 | """ 54 | self.f = func 55 | self.maxgen = maxgen 56 | self.opti_x = [] 57 | self.opti_f = 1e10 58 | 59 | def step(self): 60 | """ 61 | a single step of the evolution process 62 | """ 63 | pass 64 | 65 | def run(self): 66 | """ 67 | control the generation number 68 | """ 69 | pass 70 | 71 | def output(self): 72 | """ 73 | return the solution and optimal value 74 | return: 75 | self.opti_x, self.opti_f 76 | """ 77 | pass 78 | ``` 79 | 80 | ### Class for problem 81 | 82 | ``` python 83 | class Problem(object): 84 | def __init__(self, dim, lb, up, *arg1, **arg2): 85 | """ 86 | initialize attributes of the problem 87 | :argument 88 | D : Dimension 89 | lb : lower bound 90 | ub : upper bound 91 | """ 92 | self.D = dim 93 | self.lb = lb 94 | self.ub = up 95 | 96 | def evaluate(self, x): 97 | """ 98 | define the evaluate methods 99 | :argument 100 | x : input 101 | """ 102 | pass 103 | ``` 104 | -------------------------------------------------------------------------------- /opti/cmaes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .optimizer import * 3 | 4 | 5 | class CMAES(Optimizer): 6 | def __init__(self, func, count): 7 | super().__init__(func, count) 8 | 9 | self.nn = self.f.D 10 | self.xx = np.random.random(self.nn) 11 | self.xmean = np.copy(self.xx) 12 | self.sigma = 1 13 | 14 | self.lam = 4 + int(3 * np.log(self.nn)) 15 | self.mu = int(self.lam / 2) 16 | self.weights = np.array([np.log(self.mu + 0.5) - np.log(i + 1) for i in range(self.mu)]) 17 | self.weights = np.array([w / sum(self.weights) for w in self.weights]) 18 | self.mueff = 1 / np.sum(np.power(w, 2) for w in self.weights) 19 | 20 | self.cc = (4 + self.mueff / self.nn) / (self.nn + 4 + 2 * self.mueff / self.nn) 21 | self.cs = (self.mueff + 2) / (self.nn + self.mueff + 5) 22 | self.c1 = 2 / ((self.nn + 1.3) ** 2 + self.mueff) 23 | self.cmu = min([1 - self.c1, 2 * (self.mueff - 2 + 1 / self.mueff) / ((self.nn + 2) ** 2 + self.mueff)]) 24 | self.damps = 1 + self.cs + 2 * max([0, ((self.mueff - 1) / self.nn) ** 0.5 - 1]) 25 | 26 | self.pc, self.ps = np.zeros(self.nn), np.zeros(self.nn) 27 | self.B = np.eye(self.nn) 28 | self.D = np.ones(self.nn) 29 | self.C = np.eye(self.nn) 30 | self.M = np.eye(self.nn) 31 | 32 | def step(self): 33 | # Sample 34 | self.D, self.B = np.linalg.eigh(self.C) 35 | self.D = self.D ** 0.5 36 | self.M = self.B * self.D 37 | pop = np.zeros((self.lam, self.nn)) 38 | z = np.zeros((self.lam, self.nn)) 39 | d = np.zeros((self.lam, self.nn)) 40 | for i in range(self.lam): 41 | z[i] = np.random.normal(0, 1, self.nn) 42 | d[i] = np.dot(self.M, z[i]) 43 | pop[i] = self.xmean + self.sigma * d[i] 44 | 45 | # sort and update mean 46 | fitvals = np.zeros(self.lam) 47 | for i in range(self.lam): 48 | fitvals[i] = self.f.evaluate(pop[i]) 49 | argx = np.argsort(fitvals) 50 | if fitvals[argx[0]] < self.opti_f: 51 | self.opti_x = pop[argx[0]] 52 | self.opti_f = fitvals[argx[0]] 53 | 54 | self.xmean = self.xmean + self.sigma * np.sum(self.weights[i] * d[argx[i]] for i in range(self.mu)) 55 | 56 | # update evolution path 57 | zz = np.sum(self.weights[i] * z[argx[i]] for i in range(self.mu)) 58 | c = np.sqrt(self.cs * (2 - self.cs) * self.mueff) 59 | self.ps -= self.cs * self.ps 60 | self.ps += c * zz 61 | dd = np.sum(self.weights[i] * d[argx[i]] for i in range(self.mu)) 62 | c = np.sqrt(self.cc * (2 - self.cc) * self.mueff) 63 | self.pc -= self.cc * self.pc 64 | self.pc += c * dd 65 | 66 | # update covariance matrix 67 | part1 = (1 - self.c1 - self.cmu) * self.C 68 | part2o = self.pc.reshape(self.nn, 1) 69 | part2t = self.pc.reshape(1, self.nn) 70 | part2 = self.c1 * np.dot(part2o, part2t) 71 | part3 = np.zeros((self.nn, self.nn)) 72 | for i in range(self.mu): 73 | part3o = d[argx[i]].reshape(self.nn, 1) 74 | part3t = d[argx[i]].reshape(1, self.nn) 75 | part3 += self.cmu * self.weights[i] * np.dot(part3o, part3t) 76 | self.C = part1 + part2 + part3 77 | 78 | # update step-size 79 | self.sigma *= np.exp((self.cs / 2) * (np.sum(np.power(x, 2) for x in self.ps) / self.nn - 1)) 80 | 81 | def run(self): 82 | for i in range(self.maxgen): 83 | self.step() 84 | print(i, self.opti_f) 85 | 86 | def output(self): 87 | return self.opti_f, self.opti_x 88 | -------------------------------------------------------------------------------- /opti/cmaes_origin.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | from .optimizer import * 4 | 5 | 6 | class CMAESO(Optimizer): 7 | def __init__(self, func, count): 8 | super().__init__(func, count) 9 | 10 | self.nn = self.f.D 11 | self.xx = np.random.random(self.nn) 12 | self.xmean = self.xx[:] 13 | self.sigma = 0.2 14 | 15 | self.lam = 4 + int(3 * np.log(self.nn)) 16 | self.mu = int(self.lam / 2) 17 | self.weights = [np.log(self.mu + 0.5) - np.log(i + 1) for i in range(self.mu)] 18 | self.weights = [w / sum(self.weights) for w in self.weights] 19 | self.mueff = sum(self.weights) ** 2 / sum(w ** 2 for w in self.weights) # ??? 20 | 21 | self.cc = (4 + self.mueff / self.nn) / (self.nn + 4 + 2 * self.mueff / self.nn) 22 | self.cs = (self.mueff + 2) / (self.nn + self.mueff + 5) 23 | self.c1 = 2 / ((self.nn + 1.3) ** 2 + self.mueff) 24 | self.cmu = min([1 - self.c1, 2 * (self.mueff - 2 + 1 / self.mueff) / ((self.nn + 2) ** 2 + self.mueff)]) 25 | self.damps = 1 + self.cs + 2 * max([0, ((self.mueff - 1) / self.nn) ** 0.5 - 1]) # ??? 26 | 27 | self.pc, self.ps = np.zeros(self.nn), np.zeros(self.nn) 28 | self.B = np.eye(self.nn) 29 | self.D = np.ones(self.nn) 30 | self.C = np.eye(self.nn) 31 | self.invsqrtc = np.eye(self.nn) 32 | 33 | def step(self): 34 | # Sample 35 | self.D, self.B = np.linalg.eigh(self.C) 36 | self.D = self.D ** 0.5 37 | for i in range(self.nn): 38 | for j in range(self.nn): 39 | self.invsqrtc[i][j] = sum(self.B[i][k] * self.B[j][k] / self.D[k] for k in range(self.nn)) 40 | 41 | newpop = [] 42 | for i in range(self.lam): 43 | z = self.D * np.random.normal(0, 1, len(self.D)) 44 | nn = self.xmean + self.sigma * np.dot(self.B, z) 45 | newpop.append(nn) 46 | 47 | # Selection and Recombination 48 | xmeanold = copy.deepcopy(self.xmean) 49 | fitvals = [] 50 | for xx in newpop: 51 | fit = self.f.evaluate(xx) 52 | fitvals.append(fit) 53 | argx = np.argsort(fitvals) 54 | if fitvals[argx[0]] < self.opti_f: 55 | self.opti_x = newpop[argx[0]] 56 | self.opti_f = fitvals[argx[0]] 57 | self.xmean = sum(self.weights[j] * newpop[argx[j]] for j in range(self.mu)) 58 | 59 | # for i in range(self.nn): 60 | # f.write(str(self.xmean[i]) + ' ') 61 | # f.write('\n') 62 | 63 | # update evolution path 64 | y = self.xmean - xmeanold 65 | z = np.dot(self.invsqrtc, y) 66 | c = (self.cs * (2 - self.cs) * self.mueff) ** 0.5 / self.sigma 67 | self.ps -= self.cs * self.ps 68 | self.ps += c * z 69 | c = (self.cc * (2 - self.cc) * self.mueff) ** 0.5 / self.sigma 70 | self.pc -= self.cc * self.pc 71 | self.pc += c * y 72 | 73 | # for i in range(self.nn): 74 | # f.write(str(y[i]/self.sigma) + ' ') 75 | # f.write('\n') 76 | # for i in range(self.nn): 77 | # f.write(str(self.ps[i]) + ' ') 78 | # f.write('\n') 79 | # for i in range(self.nn): 80 | # f.write(str(self.pc[i]) + ' ') 81 | # f.write('\n') 82 | 83 | # update covariance matrix 84 | c1a = self.c1 85 | for i in range(self.nn): 86 | for j in range(self.nn): 87 | cmuij = sum(self.weights[k] * (newpop[argx[k]][i] - xmeanold[i]) 88 | * (newpop[argx[k]][j] - xmeanold[j]) for k in range(self.mu)) / self.sigma ** 2 89 | self.C[i][j] += (-c1a - self.cmu) * self.C[i][j] + self.c1 * self.pc[i] * self.pc[j] + self.cmu * cmuij 90 | 91 | # for i in range(self.nn): 92 | # for j in range(self.nn): 93 | # f.write(str(self.C[i][j]) + ' ') 94 | # f.write('\n') 95 | 96 | # update step-size 97 | self.sigma *= np.exp(min(0.6, (self.cs / self.damps) * (sum(x ** 2 for x in self.ps) / self.nn - 1) / 2)) 98 | 99 | def run(self): 100 | for i in range(self.maxgen): 101 | self.step() 102 | print(i, self.opti_f) 103 | 104 | def output(self): 105 | return self.opti_f, self.opti_x 106 | -------------------------------------------------------------------------------- /opti/cmaes_bipop.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .optimizer import * 3 | 4 | 5 | class CMAES(Optimizer): 6 | def __init__(self, func, maxgen, maxeval, pop_size, s_size): 7 | super().__init__(func, maxgen) 8 | 9 | self.nn = self.f.D 10 | self.xx = np.random.random(self.nn) 11 | self.xmean = self.xx[:] 12 | self.sigma = s_size 13 | 14 | self.lam = pop_size 15 | self.mu = int(self.lam / 2) 16 | self.weights = [np.log(self.mu + 0.5) - np.log(i + 1) for i in range(self.mu)] 17 | self.weights = [w / sum(self.weights) for w in self.weights] 18 | self.mueff = sum(self.weights) ** 2 / sum(w ** 2 for w in self.weights) # ??? 19 | 20 | self.cc = (4 + self.mueff / self.nn) / (self.nn + 4 + 2 * self.mueff / self.nn) 21 | self.cs = (self.mueff + 2) / (self.nn + self.mueff + 5) 22 | self.c1 = 2 / ((self.nn + 1.3) ** 2 + self.mueff) 23 | self.cmu = min([1 - self.c1, 2 * (self.mueff - 2 + 1 / self.mueff) / ((self.nn + 2) ** 2 + self.mueff)]) 24 | self.damps = 1 + self.cs + 2 * max([0, ((self.mueff - 1) / self.nn) ** 0.5 - 1]) # ??? 25 | 26 | self.pc, self.ps = np.zeros(self.nn), np.zeros(self.nn) 27 | self.B = np.eye(self.nn) 28 | self.D = np.ones(self.nn) 29 | self.C = np.eye(self.nn) 30 | self.M = np.eye(self.nn) 31 | 32 | self.evals = 0 33 | self.maxeval = maxeval 34 | self.gen_fit = [] 35 | self.tc = False 36 | 37 | def step(self): 38 | # Sample 39 | self.D, self.B = np.linalg.eigh(self.C) 40 | self.D = self.D ** 0.5 41 | self.M = self.B * self.D 42 | newpop, z, d, fitvals = [], [], [], [] 43 | for i in range(self.lam): 44 | zz = np.random.normal(0, 1, self.nn) 45 | dd = np.dot(self.M, zz) 46 | nn = self.xmean + self.sigma * dd 47 | 48 | # check boundary 49 | for j in range(len(nn)): 50 | if nn[j] > 1: 51 | nn[j] = self.xmean[j] + np.random.random() * (1 - self.xmean[j]) 52 | if nn[j] < 0: 53 | nn[j] = np.random.random() * (self.xmean[j]) 54 | dd = (nn - self.xmean) / self.sigma 55 | invm = np.linalg.inv(self.M) 56 | zz = np.dot(invm, dd) 57 | 58 | z.append(zz), d.append(dd), newpop.append(nn) 59 | 60 | # sort and update mean 61 | fitvals = [] 62 | for xx in newpop: 63 | self.evals += 1 64 | 65 | if self.evals >= self.maxeval: 66 | self.tc = True 67 | 68 | fit = self.f.evaluate(xx) 69 | fitvals.append(fit) 70 | argx = np.argsort(fitvals) 71 | if fitvals[argx[0]] < self.opti_f: 72 | self.opti_x = newpop[argx[0]] 73 | self.opti_f = fitvals[argx[0]] 74 | self.xmean = sum(self.weights[i] * newpop[argx[i]] for i in range(self.mu)) # Important 75 | 76 | deltax = self.sigma * d[argx[0]] 77 | dist = np.linalg.norm(deltax) 78 | if dist < 1e-7: 79 | self.tc = True 80 | 81 | # update evolution path 82 | zz = sum(self.weights[i] * z[argx[i]] for i in range(self.mu)) 83 | c = (self.cs * (2 - self.cs) * self.mueff) ** 0.5 84 | self.ps -= self.cs * self.ps 85 | self.ps += c * zz 86 | dd = sum(self.weights[i] * d[argx[i]] for i in range(self.mu)) 87 | c = (self.cc * (2 - self.cc) * self.mueff) ** 0.5 88 | self.pc -= self.cc * self.pc 89 | self.pc += c * dd 90 | 91 | # update covariance matrix 92 | part1 = (1 - self.c1 - self.cmu) * self.C 93 | part2o = self.pc.reshape(self.nn, 1) 94 | part2t = self.pc.reshape(1, self.nn) 95 | part2 = self.c1 * np.dot(part2o, part2t) 96 | part3 = np.zeros((self.nn, self.nn)) 97 | for i in range(self.mu): 98 | part3o = d[argx[i]].reshape(self.nn, 1) 99 | part3t = d[argx[i]].reshape(1, self.nn) 100 | part3 += self.cmu * self.weights[i] * np.dot(part3o, part3t) 101 | self.C = part1 + part2 + part3 102 | 103 | # update step-size 104 | self.sigma *= np.exp(min(0.6, (self.cs / self.damps) * (sum(x ** 2 for x in self.ps) / self.nn - 1) / 2)) 105 | 106 | def run(self): 107 | for i in range(self.maxgen): 108 | self.step() 109 | 110 | print(i, "%.6f" % self.opti_f) 111 | 112 | self.gen_fit.append(self.opti_f) 113 | if i > 9: 114 | if self.gen_fit[i - 20] - self.gen_fit[i] < 0.000001: 115 | self.tc = True 116 | 117 | if self.tc: 118 | break 119 | 120 | def output(self): 121 | return self.opti_f, self.opti_x 122 | 123 | 124 | class CMAESB(Optimizer): 125 | def __init__(self, func, maxgen, maxeval): 126 | super().__init__(func, maxgen) 127 | self.max_eval = maxeval 128 | self.pop_size = 4 + int(3 * np.log(self.f.D)) 129 | self.s_size = 1 / np.sqrt(self.f.D) 130 | 131 | self.opti_f = 1e10 132 | self.opti_x = [] 133 | 134 | def update(self, f, x): 135 | if f < self.opti_f: 136 | self.opti_f = f 137 | self.opti_x = x 138 | 139 | def output(self): 140 | return self.opti_f, self.opti_x 141 | 142 | def run(self): 143 | print("Normal", self.pop_size) 144 | cma_normal = CMAES(self.f, self.maxgen, self.max_eval, self.pop_size, self.s_size) 145 | cma_normal.run() 146 | self.update(cma_normal.opti_f, cma_normal.opti_x) 147 | n, n_s = 0, 0 148 | b_1 = cma_normal.evals 149 | b_l, b_s = 0, 0 150 | while True: 151 | n = n + 1 152 | pop_size = np.power(2, n - n_s) * self.pop_size 153 | if n > 2 and b_s < b_l: 154 | s_size = self.s_size / np.power(100, np.random.random()) 155 | pops_size = int(self.pop_size * np.power((pop_size / (2 * self.pop_size)), np.random.random() ** 2)) 156 | gen = int(b_l / (2 * pops_size)) 157 | print("Local", pops_size) 158 | cma_local = CMAES(self.f, gen, self.max_eval, pops_size, s_size) 159 | cma_local.run() 160 | self.update(cma_local.opti_f, cma_local.opti_x) 161 | b_s += cma_local.evals 162 | n_s += 1 163 | else: 164 | print("Global", pop_size) 165 | cma_global = CMAES(self.f, self.maxgen, self.max_eval, pop_size, self.s_size) 166 | cma_global.run() 167 | self.update(cma_global.opti_f, cma_global.opti_x) 168 | b_l += cma_global.evals 169 | if b_1 + b_s + b_l >= self.max_eval: 170 | break 171 | --------------------------------------------------------------------------------