├── .gitignore ├── LICENSE.txt ├── PUREPLES.png ├── README.md ├── pureples ├── __init__.py ├── es_hyperneat │ ├── __init__.py │ └── es_hyperneat.py ├── experiments │ ├── mountain_car │ │ ├── config_cppn_mountain_car │ │ ├── config_neat_mountain_car │ │ ├── es_hyperneat_mountain_car.py │ │ ├── hyperneat_mountain_car.py │ │ ├── neat_mountain_car.py │ │ └── run_all_mountain_car.py │ ├── pole_balancing │ │ ├── config_cppn_pole_balancing │ │ ├── config_neat_pole_balancing │ │ ├── es_hyperneat_pole_balancing.py │ │ ├── hyperneat_pole_balancing.py │ │ ├── neat_pole_balancing.py │ │ └── run_all_pole_balancing.py │ └── xor │ │ ├── config_cppn_xor │ │ ├── config_neat_xor │ │ ├── es_hyperneat_xor.py │ │ ├── hyperneat_xor.py │ │ ├── neat_xor.py │ │ └── run_all_xor.py ├── hyperneat │ ├── __init__.py │ └── hyperneat.py └── shared │ ├── __init__.py │ ├── create_cppn.py │ ├── gym_runner.py │ ├── substrate.py │ ├── test_cppn.py │ └── visualize.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | pureples/experiments/*/es_hyperneat_*_small_cppn 7 | pureples/experiments/*/es_hyperneat_*_small_cppn.pdf 8 | pureples/experiments/*/es_hyperneat_*_small_cppn.pkl 9 | pureples/experiments/*/es_hyperneat_*_small_winner.png 10 | pureples/experiments/*/hyperneat_*_cppn 11 | pureples/experiments/*/hyperneat_*_cppn.pdf 12 | pureples/experiments/*/hyperneat_*_cppn.pkl 13 | pureples/experiments/*/hyperneat_*_winner 14 | pureples/experiments/*/hyperneat_*_winner.pdf 15 | pureples/experiments/*/neat_*_winner 16 | pureples/experiments/*/neat_*_winner.pdf 17 | pureples/experiments/*/winner_neat_*.pkl 18 | 19 | # C extensions 20 | *.so 21 | 22 | # Distribution / packaging 23 | .Python 24 | env/ 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .coverage 54 | .coverage.* 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | *,cover 59 | .hypothesis/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | target/ 81 | 82 | # IPython Notebook 83 | .ipynb_checkpoints 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # celery beat schedule file 89 | celerybeat-schedule 90 | 91 | # dotenv 92 | .env 93 | 94 | # virtualenv 95 | venv/ 96 | ENV/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # Trash 105 | .png -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Adrian Westh & Simon Krabbe Munck 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PUREPLES.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ukuleleplayer/pureples/312f6c7c6f6dc8352365fd6543725cc6b935a1f6/PUREPLES.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | REPLES LOGO 2 | 3 | PUREPLES - Pure Python Library for ES-HyperNEAT 4 | =============================================== 5 | 6 | About 7 | ----- 8 | This is a library of evolutionary algorithms with a focus on neuroevolution, implemented in pure python, depending on the [neat-python](https://github.com/CodeReclaimers/neat-python) implementation. It contains a faithful implementation of both HyperNEAT and ES-HyperNEAT which are briefly described below. 9 | 10 | **NEAT** (NeuroEvolution of Augmenting Topologies) is a method developed by Kenneth O. Stanley for evolving arbitrary neural networks. 11 | **HyperNEAT** (Hypercube-based NEAT) is a method developed by Kenneth O. Stanley utilizing NEAT. It is a technique for evolving large-scale neural networks using the geometric regularities of the task domain. 12 | **ES-HyperNEAT** (Evolvable-substrate HyperNEAT) is a method developed by Sebastian Risi and Kenneth O. Stanley utilizing HyperNEAT. It is a technique for evolving large-scale neural networks using the geometric regularities of the task domain. In contrast to HyperNEAT, the substrate used during evolution is able to evolve. This rids the user of some initial work and often creates a more suitable substrate. 13 | 14 | The library is extensible in regards to easy transition between experimental domains. 15 | 16 | Getting started 17 | --------------- 18 | This section briefly describes how to install and run experiments. 19 | 20 | ### Installation Guide 21 | First, make sure you have the dependencies installed: `numpy`, `neat-python`, `graphviz`, `matplotlib` and `gym`. 22 | All the above can be installed using [pip](https://pip.pypa.io/en/stable/installing/). 23 | Next, download the source code and run `setup.py` (`pip install .`) from the root folder. Now you're able to use **PUREPLES**! 24 | 25 | ### Experimenting 26 | How to experiment using NEAT will not be described, since this is the responsibility of the `neat-python` library. 27 | 28 | Setting up an experiment for **HyperNEAT**: 29 | * Define a substrate with input nodes and output nodes as a list of tuples. The hidden nodes is a list of lists of tuples where the inner lists represent layers. The first list is the topmost layer, the last the bottommost. 30 | * Create a configuration file defining various NEAT specific parameters which are used for the CPPN. 31 | * Define a fitness function setting the fitness of each genome. This is where the CPPN and the ANN is constructed for each generation - use the `create_phenotype_network` method from the `hyperneat` module. 32 | * Create a population with the configuration file made in (2). 33 | * Run the population with the fitness function made in (3) and the configuration file made in (2). The output is the genome solving the task or the one closest to solving it. 34 | 35 | Setting up an experiment for **ES-HyperNEAT**: 36 | Use the same setup as HyperNEAT except for: 37 | * Not declaring hidden nodes when defining the substrate. 38 | * Declaring ES-HyperNEAT specific parameters. 39 | * Using the `create_phenotype_network` method residing in the `es_hyperneat` module when creating the ANN. 40 | 41 | If one is trying to solve an experiment defined by the [OpenAI Gym](https://gym.openai.com/) it is even easier to experiment. In the `shared` module a file called `gym_runner` is able to do most of the work. Given the number of generations, the environment to run, a configuration file, and a substrate, the relevant runner will take care of everything regarding population, fitness function etc. 42 | 43 | Please refer to the sample experiments included for further details on experimenting. 44 | 45 | -------------------------------------------------------------------------------- /pureples/__init__.py: -------------------------------------------------------------------------------- 1 | import pureples.shared as shared 2 | import pureples.hyperneat as hyperneat 3 | import pureples.es_hyperneat as es_hyperneat 4 | 5 | -------------------------------------------------------------------------------- /pureples/es_hyperneat/__init__.py: -------------------------------------------------------------------------------- 1 | from pureples.es_hyperneat.es_hyperneat import ESNetwork, find_pattern 2 | -------------------------------------------------------------------------------- /pureples/es_hyperneat/es_hyperneat.py: -------------------------------------------------------------------------------- 1 | """ 2 | All logic concerning ES-HyperNEAT resides here. 3 | """ 4 | import copy 5 | import neat 6 | import numpy as np 7 | from pureples.hyperneat.hyperneat import query_cppn 8 | from pureples.shared.visualize import draw_es 9 | 10 | 11 | class ESNetwork: 12 | """ 13 | The evolvable substrate network. 14 | """ 15 | 16 | def __init__(self, substrate, cppn, params): 17 | self.substrate = substrate 18 | self.cppn = cppn 19 | self.initial_depth = params["initial_depth"] 20 | self.max_depth = params["max_depth"] 21 | self.variance_threshold = params["variance_threshold"] 22 | self.band_threshold = params["band_threshold"] 23 | self.iteration_level = params["iteration_level"] 24 | self.division_threshold = params["division_threshold"] 25 | self.max_weight = params["max_weight"] 26 | self.connections = set() 27 | # Number of layers in the network. 28 | self.activations = 2 ** params["max_depth"] + 1 29 | activation_functions = neat.activations.ActivationFunctionSet() 30 | self.activation = activation_functions.get(params["activation"]) 31 | 32 | def create_phenotype_network(self, filename=None): 33 | """ 34 | Create a RecurrentNetwork using the ES-HyperNEAT approach. 35 | """ 36 | input_coordinates = self.substrate.input_coordinates 37 | output_coordinates = self.substrate.output_coordinates 38 | 39 | input_nodes = list(range(len(input_coordinates))) 40 | output_nodes = list(range(len(input_nodes), len( 41 | input_nodes)+len(output_coordinates))) 42 | hidden_idx = len(input_coordinates)+len(output_coordinates) 43 | 44 | coordinates, indices, draw_connections, node_evals = [], [], [], [] 45 | nodes = {} 46 | 47 | coordinates.extend(input_coordinates) 48 | coordinates.extend(output_coordinates) 49 | indices.extend(input_nodes) 50 | indices.extend(output_nodes) 51 | 52 | # Map input and output coordinates to their IDs. 53 | coords_to_id = dict(zip(coordinates, indices)) 54 | 55 | # Where the magic happens. 56 | hidden_nodes, connections = self.es_hyperneat() 57 | 58 | # Map hidden coordinates to their IDs. 59 | for x, y in hidden_nodes: 60 | coords_to_id[x, y] = hidden_idx 61 | hidden_idx += 1 62 | 63 | # For every coordinate: 64 | # Check the connections and create a node with corresponding connections if appropriate. 65 | for (x, y), idx in coords_to_id.items(): 66 | for c in connections: 67 | if c.x2 == x and c.y2 == y: 68 | draw_connections.append(c) 69 | if idx in nodes: 70 | initial = nodes[idx] 71 | initial.append((coords_to_id[c.x1, c.y1], c.weight)) 72 | nodes[idx] = initial 73 | else: 74 | nodes[idx] = [(coords_to_id[c.x1, c.y1], c.weight)] 75 | 76 | # Combine the indices with the connections/links; 77 | # forming node_evals used by the RecurrentNetwork. 78 | for idx, links in nodes.items(): 79 | node_evals.append((idx, self.activation, sum, 0.0, 1.0, links)) 80 | 81 | # Visualize the network? 82 | if filename is not None: 83 | draw_es(coords_to_id, draw_connections, filename) 84 | 85 | # This is actually a feedforward network. 86 | return neat.nn.RecurrentNetwork(input_nodes, output_nodes, node_evals) 87 | 88 | @staticmethod 89 | def get_weights(p): 90 | """ 91 | Recursively collect all weights for a given QuadPoint. 92 | """ 93 | temp = [] 94 | 95 | def loop(pp): 96 | if pp is not None and all(child is not None for child in pp.cs): 97 | for i in range(0, 4): 98 | loop(pp.cs[i]) 99 | else: 100 | if pp is not None: 101 | temp.append(pp.w) 102 | loop(p) 103 | return temp 104 | 105 | def variance(self, p): 106 | """ 107 | Find the variance of a given QuadPoint. 108 | """ 109 | if not p: 110 | return 0.0 111 | return np.var(self.get_weights(p)) 112 | 113 | def division_initialization(self, coord, outgoing): 114 | """ 115 | Initialize the quadtree by dividing it in appropriate quads. 116 | """ 117 | root = QuadPoint(0.0, 0.0, 1.0, 1) 118 | q = [root] 119 | 120 | while q: 121 | p = q.pop(0) 122 | 123 | p.cs[0] = QuadPoint(p.x - p.width/2.0, p.y - 124 | p.width/2.0, p.width/2.0, p.lvl + 1) 125 | p.cs[1] = QuadPoint(p.x - p.width/2.0, p.y + 126 | p.width/2.0, p.width/2.0, p.lvl + 1) 127 | p.cs[2] = QuadPoint(p.x + p.width/2.0, p.y + 128 | p.width/2.0, p.width/2.0, p.lvl + 1) 129 | p.cs[3] = QuadPoint(p.x + p.width/2.0, p.y - 130 | p.width/2.0, p.width/2.0, p.lvl + 1) 131 | 132 | for c in p.cs: 133 | c.w = query_cppn(coord, (c.x, c.y), outgoing, 134 | self.cppn, self.max_weight) 135 | 136 | if (p.lvl < self.initial_depth) or (p.lvl < self.max_depth and self.variance(p) 137 | > self.division_threshold): 138 | for child in p.cs: 139 | q.append(child) 140 | 141 | return root 142 | 143 | def pruning_extraction(self, coord, p, outgoing): 144 | """ 145 | Determines which connections to express - high variance = more connetions. 146 | """ 147 | for c in p.cs: 148 | d_left, d_right, d_top, d_bottom = None, None, None, None 149 | 150 | if self.variance(c) > self.variance_threshold: 151 | self.pruning_extraction(coord, c, outgoing) 152 | else: 153 | d_left = abs(c.w - query_cppn(coord, (c.x - p.width, 154 | c.y), outgoing, self.cppn, self.max_weight)) 155 | d_right = abs(c.w - query_cppn(coord, (c.x + p.width, 156 | c.y), outgoing, self.cppn, self.max_weight)) 157 | d_top = abs(c.w - query_cppn(coord, (c.x, c.y - p.width), 158 | outgoing, self.cppn, self.max_weight)) 159 | d_bottom = abs(c.w - query_cppn(coord, (c.x, c.y + 160 | p.width), outgoing, self.cppn, self.max_weight)) 161 | 162 | con = None 163 | if max(min(d_top, d_bottom), min(d_left, d_right)) > self.band_threshold: 164 | if outgoing: 165 | con = Connection(coord[0], coord[1], c.x, c.y, c.w) 166 | else: 167 | con = Connection(c.x, c.y, coord[0], coord[1], c.w) 168 | if con is not None: 169 | # Nodes will only connect upwards. 170 | # If connections to same layer is wanted, change to con.y1 <= con.y2. 171 | if not c.w == 0.0 and con.y1 < con.y2 and not (con.x1 == con.x2 and con.y1 == con.y2): 172 | self.connections.add(con) 173 | 174 | def es_hyperneat(self): 175 | """ 176 | Explores the hidden nodes and their connections. 177 | """ 178 | inputs = self.substrate.input_coordinates 179 | outputs = self.substrate.output_coordinates 180 | hidden_nodes, unexplored_hidden_nodes = set(), set() 181 | connections1, connections2, connections3 = set(), set(), set() 182 | 183 | for x, y in inputs: # Explore from inputs. 184 | root = self.division_initialization((x, y), True) 185 | self.pruning_extraction((x, y), root, True) 186 | connections1 = connections1.union(self.connections) 187 | for c in connections1: 188 | hidden_nodes.add((c.x2, c.y2)) 189 | self.connections = set() 190 | 191 | unexplored_hidden_nodes = copy.deepcopy(hidden_nodes) 192 | 193 | for _ in range(self.iteration_level): # Explore from hidden. 194 | for x, y in unexplored_hidden_nodes: 195 | root = self.division_initialization((x, y), True) 196 | self.pruning_extraction((x, y), root, True) 197 | connections2 = connections2.union(self.connections) 198 | for c in connections2: 199 | hidden_nodes.add((c.x2, c.y2)) 200 | self.connections = set() 201 | 202 | unexplored_hidden_nodes = hidden_nodes - unexplored_hidden_nodes 203 | 204 | for x, y in outputs: # Explore to outputs. 205 | root = self.division_initialization((x, y), False) 206 | self.pruning_extraction((x, y), root, False) 207 | connections3 = connections3.union(self.connections) 208 | self.connections = set() 209 | 210 | connections = connections1.union(connections2.union(connections3)) 211 | 212 | return self.clean_net(connections) 213 | 214 | def clean_net(self, connections): 215 | """ 216 | Clean a net for dangling connections: 217 | Intersects paths from input nodes with paths to output. 218 | """ 219 | connected_to_inputs = set(tuple(i) 220 | for i in self.substrate.input_coordinates) 221 | connected_to_outputs = set(tuple(i) 222 | for i in self.substrate.output_coordinates) 223 | true_connections = set() 224 | 225 | initial_input_connections = copy.deepcopy(connections) 226 | initial_output_connections = copy.deepcopy(connections) 227 | 228 | add_happened = True 229 | while add_happened: # The path from inputs. 230 | add_happened = False 231 | temp_input_connections = copy.deepcopy(initial_input_connections) 232 | for c in temp_input_connections: 233 | if (c.x1, c.y1) in connected_to_inputs: 234 | connected_to_inputs.add((c.x2, c.y2)) 235 | initial_input_connections.remove(c) 236 | add_happened = True 237 | 238 | add_happened = True 239 | while add_happened: # The path to outputs. 240 | add_happened = False 241 | temp_output_connections = copy.deepcopy(initial_output_connections) 242 | for c in temp_output_connections: 243 | if (c.x2, c.y2) in connected_to_outputs: 244 | connected_to_outputs.add((c.x1, c.y1)) 245 | initial_output_connections.remove(c) 246 | add_happened = True 247 | 248 | true_nodes = connected_to_inputs.intersection(connected_to_outputs) 249 | for c in connections: 250 | # Only include connection if both source and target node resides in the real path from input to output 251 | if (c.x1, c.y1) in true_nodes and (c.x2, c.y2) in true_nodes: 252 | true_connections.add(c) 253 | 254 | true_nodes -= (set(self.substrate.input_coordinates) 255 | .union(set(self.substrate.output_coordinates))) 256 | 257 | return true_nodes, true_connections 258 | 259 | 260 | class QuadPoint: 261 | """ 262 | Class representing an area in the quadtree. 263 | Defined by a center coordinate and the distance to the edges of the area. 264 | """ 265 | 266 | def __init__(self, x, y, width, lvl): 267 | self.x = x 268 | self.y = y 269 | self.w = 0.0 270 | self.width = width 271 | self.cs = [None] * 4 272 | self.lvl = lvl 273 | 274 | 275 | class Connection: 276 | """ 277 | Class representing a connection from one point to another with a certain weight. 278 | """ 279 | 280 | def __init__(self, x1, y1, x2, y2, weight): 281 | self.x1 = x1 282 | self.y1 = y1 283 | self.x2 = x2 284 | self.y2 = y2 285 | self.weight = weight 286 | 287 | # Below is needed for use in set. 288 | def __eq__(self, other): 289 | if not isinstance(other, Connection): 290 | return NotImplemented 291 | return (self.x1, self.y1, self.x2, self.y2) == (other.x1, other.y1, other.x2, other.y2) 292 | 293 | def __hash__(self): 294 | return hash((self.x1, self.y1, self.x2, self.y2, self.weight)) 295 | 296 | 297 | def find_pattern(cppn, coord, res=60, max_weight=5.0): 298 | """ 299 | From a given point, query the cppn for weights to all other points. 300 | This can be visualized as a connectivity pattern. 301 | """ 302 | im = np.zeros((res, res)) 303 | 304 | for x2 in range(res): 305 | for y2 in range(res): 306 | 307 | x2_scaled = -1.0 + (x2/float(res))*2.0 308 | y2_scaled = -1.0 + (y2/float(res))*2.0 309 | 310 | i = [coord[0], coord[1], x2_scaled, y2_scaled, 1.0] 311 | n = cppn.activate(i)[0] 312 | 313 | im[x2][y2] = n * max_weight 314 | 315 | return im 316 | -------------------------------------------------------------------------------- /pureples/experiments/mountain_car/config_cppn_mountain_car: -------------------------------------------------------------------------------- 1 | #--- parameters for the CPPN regarding the mountain car experiment ---# 2 | 3 | [NEAT] 4 | fitness_criterion = max 5 | fitness_threshold = -110 6 | pop_size = 100 7 | reset_on_extinction = False 8 | 9 | [DefaultGenome] 10 | # node activation options 11 | activation_default = tanh 12 | activation_mutate_rate = 0.5 13 | activation_options = gauss sin tanh 14 | 15 | # node aggregation options 16 | aggregation_default = sum 17 | aggregation_mutate_rate = 0.0 18 | aggregation_options = sum 19 | 20 | # node bias options 21 | bias_init_mean = 0.0 22 | bias_init_stdev = 1.0 23 | bias_max_value = 30.0 24 | bias_min_value = -30.0 25 | bias_mutate_power = 0.5 26 | bias_mutate_rate = 0.7 27 | bias_replace_rate = 0.1 28 | 29 | # genome compatibility options 30 | compatibility_disjoint_coefficient = 1.0 31 | compatibility_weight_coefficient = 0.5 32 | 33 | # connection add/remove rates 34 | conn_add_prob = 0.5 35 | conn_delete_prob = 0.5 36 | 37 | # connection enable options 38 | enabled_default = True 39 | enabled_mutate_rate = 0.01 40 | 41 | feed_forward = True 42 | initial_connection = full 43 | 44 | # node add/remove rates 45 | node_add_prob = 0.2 46 | node_delete_prob = 0.2 47 | 48 | # network parameters 49 | num_hidden = 0 50 | num_inputs = 5 51 | num_outputs = 1 52 | 53 | # node response options 54 | response_init_mean = 1.0 55 | response_init_stdev = 0.0 56 | response_max_value = 30.0 57 | response_min_value = -30.0 58 | response_mutate_power = 0.0 59 | response_mutate_rate = 0.0 60 | response_replace_rate = 0.0 61 | 62 | # connection weight options 63 | weight_init_mean = 0.0 64 | weight_init_stdev = 1.0 65 | weight_max_value = 30 66 | weight_min_value = -30 67 | weight_mutate_power = 0.5 68 | weight_mutate_rate = 0.8 69 | weight_replace_rate = 0.1 70 | 71 | [DefaultSpeciesSet] 72 | compatibility_threshold = 3.0 73 | 74 | [DefaultStagnation] 75 | species_fitness_func = max 76 | max_stagnation = 20 77 | species_elitism = 10 78 | 79 | [DefaultReproduction] 80 | elitism = 10 81 | survival_threshold = 0.2 82 | -------------------------------------------------------------------------------- /pureples/experiments/mountain_car/config_neat_mountain_car: -------------------------------------------------------------------------------- 1 | #--- parameters for the NEAT mountain car experiment ---# 2 | 3 | [NEAT] 4 | fitness_criterion = max 5 | fitness_threshold = -110 6 | pop_size = 100 7 | reset_on_extinction = False 8 | 9 | [DefaultGenome] 10 | # node activation options 11 | activation_default = sigmoid 12 | activation_mutate_rate = 0.0 13 | activation_options = sigmoid 14 | 15 | # node aggregation options 16 | aggregation_default = sum 17 | aggregation_mutate_rate = 0.0 18 | aggregation_options = sum 19 | 20 | # node bias options 21 | bias_init_mean = 0.0 22 | bias_init_stdev = 1.0 23 | bias_max_value = 30.0 24 | bias_min_value = -30.0 25 | bias_mutate_power = 0.5 26 | bias_mutate_rate = 0.7 27 | bias_replace_rate = 0.1 28 | 29 | # genome compatibility options 30 | compatibility_disjoint_coefficient = 1.0 31 | compatibility_weight_coefficient = 0.5 32 | 33 | # connection add/remove rates 34 | conn_add_prob = 0.5 35 | conn_delete_prob = 0.5 36 | 37 | # connection enable options 38 | enabled_default = True 39 | enabled_mutate_rate = 0.01 40 | 41 | feed_forward = True 42 | initial_connection = full 43 | 44 | # node add/remove rates 45 | node_add_prob = 0.2 46 | node_delete_prob = 0.2 47 | 48 | # network parameters 49 | num_hidden = 0 50 | num_inputs = 2 51 | num_outputs = 3 52 | 53 | # node response options 54 | response_init_mean = 1.0 55 | response_init_stdev = 0.0 56 | response_max_value = 30.0 57 | response_min_value = -30.0 58 | response_mutate_power = 0.0 59 | response_mutate_rate = 0.0 60 | response_replace_rate = 0.0 61 | 62 | # connection weight options 63 | weight_init_mean = 0.0 64 | weight_init_stdev = 1.0 65 | weight_max_value = 30 66 | weight_min_value = -30 67 | weight_mutate_power = 0.5 68 | weight_mutate_rate = 0.8 69 | weight_replace_rate = 0.1 70 | 71 | [DefaultSpeciesSet] 72 | compatibility_threshold = 3.0 73 | 74 | [DefaultStagnation] 75 | species_fitness_func = max 76 | max_stagnation = 20 77 | species_elitism = 10 78 | 79 | [DefaultReproduction] 80 | elitism = 10 81 | survival_threshold = 0.2 82 | -------------------------------------------------------------------------------- /pureples/experiments/mountain_car/es_hyperneat_mountain_car.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using a variable-sized ES-HyperNEAT network to perform a mountain car task. 3 | """ 4 | 5 | import logging 6 | import pickle 7 | import neat 8 | import gym 9 | from pureples.shared.visualize import draw_net 10 | from pureples.shared.substrate import Substrate 11 | from pureples.shared.gym_runner import run_es 12 | from pureples.es_hyperneat.es_hyperneat import ESNetwork 13 | 14 | # S, M or L; Small, Medium or Large (logic implemented as "Not 'S' or 'M' then Large"). 15 | VERSION = "S" 16 | VERSION_TEXT = "small" if VERSION == "S" else "medium" if VERSION == "M" else "large" 17 | 18 | # Network input and output coordinates. 19 | INPUT_COORDINATES = [(-0.33, -1.), (0.33, -1.)] 20 | OUTPUT_COORDINATES = [(-0.5, 1.), (0., 1.), (0.5, 1.)] 21 | 22 | SUBSTRATE = Substrate(INPUT_COORDINATES, OUTPUT_COORDINATES) 23 | 24 | 25 | def params(version): 26 | """ 27 | ES-HyperNEAT specific parameters. 28 | """ 29 | return {"initial_depth": 0 if version == "S" else 1 if version == "M" else 2, 30 | "max_depth": 1 if version == "S" else 2 if version == "M" else 3, 31 | "variance_threshold": 0.03, 32 | "band_threshold": 0.3, 33 | "iteration_level": 1, 34 | "division_threshold": 0.5, 35 | "max_weight": 8.0, 36 | "activation": "sigmoid"} 37 | 38 | 39 | # Config for CPPN. 40 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 41 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 42 | 'pureples/experiments/mountain_car/config_cppn_mountain_car') 43 | 44 | 45 | def run(gens, env, version): 46 | """ 47 | Run the pole balancing task using the Gym environment 48 | Returns the winning genome and the statistics of the run. 49 | """ 50 | winner, stats = run_es(gens, env, 200, CONFIG, params( 51 | version), SUBSTRATE, max_trials=0) 52 | print(f"es_hyperneat_mountain_car_{VERSION_TEXT} done") 53 | return winner, stats 54 | 55 | 56 | # If run as script. 57 | if __name__ == '__main__': 58 | # Setup logger and environment. 59 | LOGGER = logging.getLogger() 60 | LOGGER.setLevel(logging.INFO) 61 | ENVIRONMENT = gym.make("MountainCar-v0") 62 | 63 | # Run! Only relevant to look at the winner. 64 | WINNER = run(200, ENVIRONMENT, VERSION)[0] 65 | 66 | # Save CPPN if wished reused and draw it + winner to file. 67 | CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 68 | NETWORK = ESNetwork(SUBSTRATE, CPPN, params) 69 | NET = NETWORK.create_phenotype_network( 70 | filename=f"es_hyperneat_mountain_car_{VERSION_TEXT}_winner") 71 | draw_net(CPPN, filename=f"es_hyperneat_mountain_car_{VERSION_TEXT}_cppn") 72 | with open(f'es_hyperneat_mountain_car_{VERSION_TEXT}_cppn.pkl', 'wb') as output: 73 | pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL) 74 | -------------------------------------------------------------------------------- /pureples/experiments/mountain_car/hyperneat_mountain_car.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using HyperNEAT to perform a mountain car task. 3 | """ 4 | 5 | import logging 6 | import pickle 7 | import gym 8 | import neat 9 | from pureples.shared.visualize import draw_net 10 | from pureples.shared.substrate import Substrate 11 | from pureples.shared.gym_runner import run_hyper 12 | from pureples.hyperneat.hyperneat import create_phenotype_network 13 | 14 | # Network input and output coordinates. 15 | input_coordinates = [(-0.33, -1.), (0.33, -1.)] 16 | OUTPUT_COORDINATES = [(-0.5, 1.), (0., 1.), (0.5, 1.)] 17 | HIDDEN_COORDINATES = [[(-0.5, 0.5), (0.5, 0.5)], 18 | [(0.0, 0.0)], [(-0.5, -0.5), (0.5, -0.5)]] 19 | 20 | SUBSTRATE = Substrate( 21 | input_coordinates, OUTPUT_COORDINATES, HIDDEN_COORDINATES) 22 | ACTIVATIONS = len(HIDDEN_COORDINATES) + 2 23 | 24 | # Config for CPPN. 25 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 26 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 27 | 'config_cppn_mountain_car') 28 | 29 | 30 | def run(gens, env): 31 | """ 32 | Run the pole balancing task using the Gym environment 33 | Returns the winning genome and the statistics of the run. 34 | """ 35 | winner, stats = run_hyper(gens, env, 200, CONFIG, 36 | SUBSTRATE, ACTIVATIONS, max_trials=0) 37 | print("hyperneat_mountain_car done") 38 | return winner, stats 39 | 40 | 41 | # If run as script. 42 | if __name__ == '__main__': 43 | # Setup logger and environment. 44 | LOGGER = logging.getLogger() 45 | LOGGER.setLevel(logging.INFO) 46 | ENVIRONMENT = gym.make("MountainCar-v0") 47 | 48 | # Run! Only relevant to look at the winner. 49 | WINNER = run(200, ENVIRONMENT)[0] 50 | 51 | # Save CPPN if wished reused and draw it + winner to file. 52 | cppn = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 53 | NET = create_phenotype_network(cppn, SUBSTRATE) 54 | draw_net(cppn, filename="hyperneat_mountain_car_cppn") 55 | draw_net(NET, filename="hyperneat_mountain_car_winner") 56 | with open('hyperneat_mountain_car_cppn.pkl', 'wb') as output: 57 | pickle.dump(cppn, output, pickle.HIGHEST_PROTOCOL) 58 | -------------------------------------------------------------------------------- /pureples/experiments/mountain_car/neat_mountain_car.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using NEAT to perform a mountain car task. 3 | """ 4 | 5 | import logging 6 | import pickle 7 | import gym 8 | import neat 9 | from pureples.shared.visualize import draw_net 10 | from pureples.shared.gym_runner import run_neat 11 | 12 | # Config for NEAT. 13 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 14 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 15 | 'config_neat_mountain_car') 16 | 17 | 18 | def run(gens, env): 19 | """ 20 | Run the pole balancing task using the Gym environment 21 | Returns the winning genome and the statistics of the run. 22 | """ 23 | winner, stats = run_neat(gens, env, 200, CONFIG, max_trials=0) 24 | print("neat_mountain_car done") 25 | return winner, stats 26 | 27 | 28 | # If run as script. 29 | if __name__ == '__main__': 30 | # Setup logger and environment. 31 | LOGGER = logging.getLogger() 32 | LOGGER.setLevel(logging.INFO) 33 | ENVIRONMENT = gym.make("MountainCar-v0") 34 | 35 | # Run! Only relevant to look at the winner. 36 | WINNER = run(200, ENVIRONMENT)[0] 37 | 38 | # Save net if wished reused and draw it to file. 39 | NET = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 40 | draw_net(NET, filename="neat_mountain_car_winner") 41 | with open('neat_mountain_car_winner.pkl', 'wb') as output: 42 | pickle.dump(NET, output, pickle.HIGHEST_PROTOCOL) 43 | -------------------------------------------------------------------------------- /pureples/experiments/mountain_car/run_all_mountain_car.py: -------------------------------------------------------------------------------- 1 | """ 2 | Runs ALL mountain car tasks using ES-HyperNEAT, HyperNEAT and NEAT. 3 | Reports everything to text files. 4 | """ 5 | 6 | from multiprocessing import Manager 7 | from itertools import repeat 8 | import multiprocessing as multi 9 | import gym 10 | import matplotlib.pyplot as plt 11 | import matplotlib 12 | import es_hyperneat_mountain_car 13 | import hyperneat_mountain_car 14 | import neat_mountain_car 15 | matplotlib.use('Agg') 16 | 17 | 18 | def run(number, gens, env, neat_stats, hyperneat_stats, 19 | es_hyperneat_small_stats, es_hyperneat_medium_stats, es_hyperneat_large_stats): 20 | """ 21 | Run the experiments. 22 | """ 23 | print(f"This is run #{str(number)}") 24 | neat_stats.append(neat_mountain_car.run(gens, env)[1]) 25 | hyperneat_stats.append(hyperneat_mountain_car.run(gens, env)[1]) 26 | es_hyperneat_small_stats.append( 27 | es_hyperneat_mountain_car.run(gens, env, "S")[1]) 28 | es_hyperneat_medium_stats.append( 29 | es_hyperneat_mountain_car.run(gens, env, "M")[1]) 30 | es_hyperneat_large_stats.append( 31 | es_hyperneat_mountain_car.run(gens, env, "L")[1]) 32 | 33 | 34 | if __name__ == '__main__': 35 | # Initialize lists to keep track during run. 36 | MANAGER = Manager() 37 | 38 | NEAT_STATS, HYPERNEAT_STATS, ES_HYPERNEAT_SMALL_STATS = MANAGER.list( 39 | []), MANAGER.list([]), MANAGER.list([]) 40 | ES_HYPERNEAT_MEDIUM_STATS, ES_HYPERNEAT_LARGE_STATS = MANAGER.list( 41 | []), MANAGER.list([]) 42 | NEAT_RUN_ONE_FITNESS, HYPERNEAT_RUN_ONE_FITNESSES = [], [] 43 | ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES, ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES = [], [] 44 | ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES = [] 45 | 46 | NEAT_RUN_TEN_FITNESSES, HYPERNEAT_RUN_TEN_FITNESSES = [], [] 47 | ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES, ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES = [], [] 48 | ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES = [] 49 | 50 | NEAT_RUN_HUNDRED_FITNESSES, HYPERNEAT_RUN_HUNDRED_FITNESSES = [], [] 51 | ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES, ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES = [], [] 52 | ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES = [] 53 | 54 | NEAT_ONE_SOLVED, HYPERNEAT_ONE_SOLVED, ES_HYPERNEAT_SMALL_ONE_SOLVED = 0, 0, 0 55 | ES_HYPERNEAT_MEDIUM_ONE_SOLVED, ES_HYPERNEAT_LARGE_ONE_SOLVED = 0, 0 56 | 57 | NEAT_TEN_SOLVED, HYPERNEAT_TEN_SOLVED, ES_HYPERNEAT_SMALL_TEN_SOLVED = 0, 0, 0 58 | ES_HYPERNEAT_MEDIUM_TEN_SOLVED, ES_HYPERNEAT_LARGE_TEN_SOLVED = 0, 0 59 | 60 | NEAT_HUNDRED_SOLVED, HYPERNEAT_HUNDRED_SOLVED, ES_HYPERNEAT_SMALL_HUNDRED_SOLVED = 0, 0, 0 61 | ES_HYPERNEAT_MEDIUM_HUNDRED_SOLVED, ES_HYPERNEAT_LARGE_HUNDRED_SOLVED = 0, 0 62 | 63 | RUNS = 16 64 | INPUTS = range(RUNS) 65 | GENS = 200 66 | FIT_THRESHOLD = -110 67 | MAX_FIT = -110 68 | ENV = gym.make("MountainCar-v0") 69 | 70 | P = multi.Pool(multi.cpu_count()) 71 | P.starmap(run, zip(range(RUNS), repeat(GENS), repeat(ENV), repeat(NEAT_STATS), 72 | repeat(HYPERNEAT_STATS), repeat( 73 | ES_HYPERNEAT_SMALL_STATS), repeat(ES_HYPERNEAT_MEDIUM_STATS), 74 | repeat(ES_HYPERNEAT_LARGE_STATS))) 75 | 76 | # Average the NEAT runs. 77 | TEMP_FIT_ONE = [0.0] * GENS 78 | TEMP_FIT_TEN = [0.0] * GENS 79 | TEMP_FIT_HUNDRED = [0.0] * GENS 80 | 81 | for (stat_one, stat_ten, stat_hundred) in NEAT_STATS: 82 | if stat_one.best_genome().fitness > MAX_FIT: 83 | NEAT_RUN_ONE_FITNESS.append(MAX_FIT) 84 | else: 85 | NEAT_RUN_ONE_FITNESS.append(stat_one.best_genome().fitness) 86 | if stat_ten.best_genome().fitness > MAX_FIT: 87 | NEAT_RUN_TEN_FITNESSES.append(MAX_FIT) 88 | else: 89 | NEAT_RUN_TEN_FITNESSES.append(stat_one.best_genome().fitness) 90 | if stat_hundred.best_genome().fitness > MAX_FIT: 91 | NEAT_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 92 | else: 93 | NEAT_RUN_HUNDRED_FITNESSES.append(stat_one.best_genome().fitness) 94 | 95 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 96 | NEAT_ONE_SOLVED += 1 97 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 98 | NEAT_TEN_SOLVED += 1 99 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 100 | NEAT_HUNDRED_SOLVED += 1 101 | 102 | for i in range(GENS): 103 | if i < len(stat_one.most_fit_genomes): 104 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 105 | TEMP_FIT_ONE[i] += MAX_FIT 106 | else: 107 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 108 | else: 109 | TEMP_FIT_ONE[i] += MAX_FIT 110 | if i < len(stat_ten.most_fit_genomes): 111 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 112 | TEMP_FIT_TEN[i] += MAX_FIT 113 | else: 114 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 115 | else: 116 | TEMP_FIT_TEN[i] += MAX_FIT 117 | if i < len(stat_hundred.most_fit_genomes): 118 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 119 | TEMP_FIT_HUNDRED[i] += MAX_FIT 120 | else: 121 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 122 | else: 123 | TEMP_FIT_HUNDRED[i] += MAX_FIT 124 | 125 | NEAT_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 126 | NEAT_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 127 | NEAT_HUNDRED_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_HUNDRED] 128 | 129 | # Average the HyperNEAT runs. 130 | TEMP_FIT_ONE = [0.0] * GENS 131 | TEMP_FIT_TEN = [0.0] * GENS 132 | TEMP_FIT_HUNDRED = [0.0] * GENS 133 | 134 | for (stat_one, stat_ten, stat_hundred) in HYPERNEAT_STATS: 135 | if stat_one.best_genome().fitness > MAX_FIT: 136 | HYPERNEAT_RUN_ONE_FITNESSES.append(MAX_FIT) 137 | else: 138 | HYPERNEAT_RUN_ONE_FITNESSES.append(stat_one.best_genome().fitness) 139 | if stat_ten.best_genome().fitness > MAX_FIT: 140 | HYPERNEAT_RUN_TEN_FITNESSES.append(MAX_FIT) 141 | else: 142 | HYPERNEAT_RUN_TEN_FITNESSES.append(stat_one.best_genome().fitness) 143 | if stat_hundred.best_genome().fitness > MAX_FIT: 144 | HYPERNEAT_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 145 | else: 146 | HYPERNEAT_RUN_HUNDRED_FITNESSES.append( 147 | stat_one.best_genome().fitness) 148 | 149 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 150 | HYPERNEAT_ONE_SOLVED += 1 151 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 152 | HYPERNEAT_TEN_SOLVED += 1 153 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 154 | HYPERNEAT_HUNDRED_SOLVED += 1 155 | 156 | for i in range(GENS): 157 | if i < len(stat_one.most_fit_genomes): 158 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 159 | TEMP_FIT_ONE[i] += MAX_FIT 160 | else: 161 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 162 | else: 163 | TEMP_FIT_ONE[i] += MAX_FIT 164 | if i < len(stat_ten.most_fit_genomes): 165 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 166 | TEMP_FIT_TEN[i] += MAX_FIT 167 | else: 168 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 169 | else: 170 | TEMP_FIT_TEN[i] += MAX_FIT 171 | if i < len(stat_hundred.most_fit_genomes): 172 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 173 | TEMP_FIT_HUNDRED[i] += MAX_FIT 174 | else: 175 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 176 | else: 177 | TEMP_FIT_HUNDRED[i] += MAX_FIT 178 | 179 | HYPERNEAT_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 180 | HYPERNEAT_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 181 | HYPERNEAT_HUNDRED_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_HUNDRED] 182 | 183 | # Average the small ES-HyperNEAT runs. 184 | TEMP_FIT_ONE = [0.0] * GENS 185 | TEMP_FIT_TEN = [0.0] * GENS 186 | TEMP_FIT_HUNDRED = [0.0] * GENS 187 | 188 | for (stat_one, stat_ten, stat_hundred) in ES_HYPERNEAT_SMALL_STATS: 189 | if stat_one.best_genome().fitness > MAX_FIT: 190 | ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES.append(MAX_FIT) 191 | else: 192 | ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES.append( 193 | stat_one.best_genome().fitness) 194 | if stat_ten.best_genome().fitness > MAX_FIT: 195 | ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES.append(MAX_FIT) 196 | else: 197 | ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES.append( 198 | stat_one.best_genome().fitness) 199 | if stat_hundred.best_genome().fitness > MAX_FIT: 200 | ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 201 | else: 202 | ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES.append( 203 | stat_one.best_genome().fitness) 204 | 205 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 206 | ES_HYPERNEAT_SMALL_ONE_SOLVED += 1 207 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 208 | ES_HYPERNEAT_SMALL_TEN_SOLVED += 1 209 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 210 | ES_HYPERNEAT_SMALL_HUNDRED_SOLVED += 1 211 | 212 | for i in range(GENS): 213 | if i < len(stat_one.most_fit_genomes): 214 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 215 | TEMP_FIT_ONE[i] += MAX_FIT 216 | else: 217 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 218 | else: 219 | TEMP_FIT_ONE[i] += MAX_FIT 220 | if i < len(stat_ten.most_fit_genomes): 221 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 222 | TEMP_FIT_TEN[i] += MAX_FIT 223 | else: 224 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 225 | else: 226 | TEMP_FIT_TEN[i] += MAX_FIT 227 | if i < len(stat_hundred.most_fit_genomes): 228 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 229 | TEMP_FIT_HUNDRED[i] += MAX_FIT 230 | else: 231 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 232 | else: 233 | TEMP_FIT_HUNDRED[i] += MAX_FIT 234 | 235 | ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 236 | ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 237 | ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT = [ 238 | x / RUNS for x in TEMP_FIT_HUNDRED] 239 | 240 | # Average the medium ES-HyperNEAT runs. 241 | TEMP_FIT_ONE = [0.0] * GENS 242 | TEMP_FIT_TEN = [0.0] * GENS 243 | TEMP_FIT_HUNDRED = [0.0] * GENS 244 | 245 | for (stat_one, stat_ten, stat_hundred) in ES_HYPERNEAT_MEDIUM_STATS: 246 | if stat_one.best_genome().fitness > MAX_FIT: 247 | ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES.append(MAX_FIT) 248 | else: 249 | ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES.append( 250 | stat_one.best_genome().fitness) 251 | if stat_ten.best_genome().fitness > MAX_FIT: 252 | ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES.append(MAX_FIT) 253 | else: 254 | ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES.append( 255 | stat_one.best_genome().fitness) 256 | if stat_hundred.best_genome().fitness > MAX_FIT: 257 | ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 258 | else: 259 | ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES.append( 260 | stat_one.best_genome().fitness) 261 | 262 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 263 | ES_HYPERNEAT_MEDIUM_ONE_SOLVED += 1 264 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 265 | ES_HYPERNEAT_MEDIUM_TEN_SOLVED += 1 266 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 267 | ES_HYPERNEAT_MEDIUM_HUNDRED_SOLVED += 1 268 | 269 | for i in range(GENS): 270 | if i < len(stat_one.most_fit_genomes): 271 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 272 | TEMP_FIT_ONE[i] += MAX_FIT 273 | else: 274 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 275 | else: 276 | TEMP_FIT_ONE[i] += MAX_FIT 277 | if i < len(stat_ten.most_fit_genomes): 278 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 279 | TEMP_FIT_TEN[i] += MAX_FIT 280 | else: 281 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 282 | else: 283 | TEMP_FIT_TEN[i] += MAX_FIT 284 | if i < len(stat_hundred.most_fit_genomes): 285 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 286 | TEMP_FIT_HUNDRED[i] += MAX_FIT 287 | else: 288 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 289 | else: 290 | TEMP_FIT_HUNDRED[i] += MAX_FIT 291 | 292 | ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 293 | ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 294 | ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT = [ 295 | x / RUNS for x in TEMP_FIT_HUNDRED] 296 | 297 | # Average the large ES-HyperNEAT runs. 298 | TEMP_FIT_ONE = [0.0] * GENS 299 | TEMP_FIT_TEN = [0.0] * GENS 300 | TEMP_FIT_HUNDRED = [0.0] * GENS 301 | 302 | for (stat_one, stat_ten, stat_hundred) in ES_HYPERNEAT_LARGE_STATS: 303 | if stat_one.best_genome().fitness > MAX_FIT: 304 | ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES.append(MAX_FIT) 305 | else: 306 | ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES.append( 307 | stat_one.best_genome().fitness) 308 | if stat_ten.best_genome().fitness > MAX_FIT: 309 | ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES.append(MAX_FIT) 310 | else: 311 | ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES.append( 312 | stat_one.best_genome().fitness) 313 | if stat_hundred.best_genome().fitness > MAX_FIT: 314 | ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 315 | else: 316 | ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES.append( 317 | stat_one.best_genome().fitness) 318 | 319 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 320 | ES_HYPERNEAT_LARGE_ONE_SOLVED += 1 321 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 322 | ES_HYPERNEAT_LARGE_TEN_SOLVED += 1 323 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 324 | ES_HYPERNEAT_LARGE_HUNDRED_SOLVED += 1 325 | 326 | for i in range(GENS): 327 | if i < len(stat_one.most_fit_genomes): 328 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 329 | TEMP_FIT_ONE[i] += MAX_FIT 330 | else: 331 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 332 | else: 333 | TEMP_FIT_ONE[i] += MAX_FIT 334 | if i < len(stat_ten.most_fit_genomes): 335 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 336 | TEMP_FIT_TEN[i] += MAX_FIT 337 | else: 338 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 339 | else: 340 | TEMP_FIT_TEN[i] += MAX_FIT 341 | if i < len(stat_hundred.most_fit_genomes): 342 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 343 | TEMP_FIT_HUNDRED[i] += MAX_FIT 344 | else: 345 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 346 | else: 347 | TEMP_FIT_HUNDRED[i] += MAX_FIT 348 | 349 | ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 350 | ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 351 | ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT = [ 352 | x / RUNS for x in TEMP_FIT_HUNDRED] 353 | 354 | # Write fitnesses to files. 355 | # NEAT. 356 | THEFILE = open('neat_mountain_car_run_fitnesses.txt', 'w+') 357 | THEFILE.write("NEAT one\n") 358 | 359 | for item in NEAT_RUN_ONE_FITNESS: 360 | THEFILE.write("%s\n" % item) 361 | 362 | if MAX_FIT in NEAT_ONE_AVERAGE_FIT: 363 | THEFILE.write("NEAT one solves mountain_car at generation: " + 364 | str(NEAT_ONE_AVERAGE_FIT.index(MAX_FIT))) 365 | else: 366 | THEFILE.write("NEAT one does not solve mountain_car with best fitness: " + 367 | str(NEAT_ONE_AVERAGE_FIT[GENS-1])) 368 | THEFILE.write("\nNEAT one solves mountain_car in " + 369 | str(NEAT_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 370 | THEFILE.write("NEAT ten\n") 371 | 372 | for item in NEAT_RUN_TEN_FITNESSES: 373 | THEFILE.write("%s\n" % item) 374 | 375 | if MAX_FIT in NEAT_TEN_AVERAGE_FIT: 376 | THEFILE.write("NEAT ten solves mountain_car at generation: " + 377 | str(NEAT_TEN_AVERAGE_FIT.index(MAX_FIT))) 378 | else: 379 | THEFILE.write("NEAT ten does not solve mountain_car with best fitness: " + 380 | str(NEAT_TEN_AVERAGE_FIT[GENS-1])) 381 | THEFILE.write("\nNEAT ten solves mountain_car in " + 382 | str(NEAT_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 383 | THEFILE.write("NEAT hundred\n") 384 | 385 | for item in NEAT_RUN_HUNDRED_FITNESSES: 386 | THEFILE.write("%s\n" % item) 387 | 388 | if MAX_FIT in NEAT_HUNDRED_AVERAGE_FIT: 389 | THEFILE.write("NEAT hundred solves mountain_car at generation: " + 390 | str(NEAT_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 391 | else: 392 | THEFILE.write("NEAT hundred does not solve mountain_car with best fitness: " + 393 | str(NEAT_HUNDRED_AVERAGE_FIT[GENS-1])) 394 | THEFILE.write("\nNEAT hundred solves mountain_car in " + 395 | str(NEAT_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 396 | 397 | # HyperNEAT. 398 | THEFILE = open('hyperneat_mountain_car_run_fitnesses.txt', 'w+') 399 | THEFILE.write("HyperNEAT one\n") 400 | 401 | for item in HYPERNEAT_RUN_ONE_FITNESSES: 402 | THEFILE.write("%s\n" % item) 403 | 404 | if MAX_FIT in HYPERNEAT_ONE_AVERAGE_FIT: 405 | THEFILE.write("HyperNEAT one solves mountain_car at generation: " + 406 | str(HYPERNEAT_ONE_AVERAGE_FIT.index(MAX_FIT))) 407 | else: 408 | THEFILE.write("HyperNEAT one does not solve mountain_car with best fitness: " + 409 | str(HYPERNEAT_ONE_AVERAGE_FIT[GENS-1])) 410 | THEFILE.write("\nHyperNEAT one solves mountain_car in " + 411 | str(HYPERNEAT_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 412 | THEFILE.write("HyperNEAT ten\n") 413 | 414 | for item in HYPERNEAT_RUN_TEN_FITNESSES: 415 | THEFILE.write("%s\n" % item) 416 | 417 | if MAX_FIT in HYPERNEAT_TEN_AVERAGE_FIT: 418 | THEFILE.write("HyperNEAT ten solves mountain_car at generation: " + 419 | str(HYPERNEAT_TEN_AVERAGE_FIT.index(MAX_FIT))) 420 | else: 421 | THEFILE.write("HyperNEAT ten does not solve mountain_car with best fitness: " + 422 | str(HYPERNEAT_TEN_AVERAGE_FIT[GENS-1])) 423 | THEFILE.write("\nHyperNEAT ten solves mountain_car in " + 424 | str(HYPERNEAT_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 425 | THEFILE.write("HyperNEAT hundred\n") 426 | 427 | for item in HYPERNEAT_RUN_HUNDRED_FITNESSES: 428 | THEFILE.write("%s\n" % item) 429 | 430 | if MAX_FIT in HYPERNEAT_HUNDRED_AVERAGE_FIT: 431 | THEFILE.write("HyperNEAT hundred solves mountain_car at generation: " + 432 | str(HYPERNEAT_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 433 | else: 434 | THEFILE.write("HyperNEAT hundred does not solve mountain_car with best fitness: " + 435 | str(HYPERNEAT_HUNDRED_AVERAGE_FIT[GENS-1])) 436 | THEFILE.write("\nHyperNEAT hundred solves mountain_car in " + 437 | str(HYPERNEAT_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 438 | 439 | # ES-HyperNEAT small. 440 | THEFILE = open('es_hyperneat_mountain_car_small_run_fitnesses.txt', 'w+') 441 | THEFILE.write("ES-HyperNEAT small one\n") 442 | 443 | for item in ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES: 444 | THEFILE.write("%s\n" % item) 445 | 446 | if MAX_FIT in ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT: 447 | THEFILE.write("ES-HyperNEAT small one solves mountain_car at generation: " + 448 | str(ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT.index(MAX_FIT))) 449 | else: 450 | THEFILE.write("ES-HyperNEAT small one does not solve mountain_car with best fitness: " + 451 | str(ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT[GENS-1])) 452 | THEFILE.write("\nES-HyperNEAT small one solves mountain_car in " + 453 | str(ES_HYPERNEAT_SMALL_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 454 | THEFILE.write("ES-HyperNEAT small ten\n") 455 | 456 | for item in ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES: 457 | THEFILE.write("%s\n" % item) 458 | 459 | if MAX_FIT in ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT: 460 | THEFILE.write("ES-HyperNEAT small ten solves mountain_car at generation: " + 461 | str(ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT.index(MAX_FIT))) 462 | else: 463 | THEFILE.write("ES-HyperNEAT small ten does not solve mountain_car with best fitness: " + 464 | str(ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT[GENS-1])) 465 | THEFILE.write("\nES-HyperNEAT small ten solves mountain_car in " + 466 | str(ES_HYPERNEAT_SMALL_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 467 | THEFILE.write("ES-HyperNEAT small hundred\n") 468 | 469 | for item in ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES: 470 | THEFILE.write("%s\n" % item) 471 | 472 | if MAX_FIT in ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT: 473 | THEFILE.write("ES-HyperNEAT small hundred solves mountain_car at generation: " + 474 | str(ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 475 | else: 476 | THEFILE.write( 477 | "ES-HyperNEAT small hundred does not solve mountain_car with best fitness: " + 478 | str(ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT[GENS-1])) 479 | THEFILE.write("\nES-HyperNEAT small hundred solves mountain_car in " + 480 | str(ES_HYPERNEAT_SMALL_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 481 | 482 | # ES-HyperNEAT medium. 483 | THEFILE = open( 484 | 'es_hyperneat_mountain_car_medium_run_fitnesses.txt', 'w+') 485 | THEFILE.write("ES-HyperNEAT medium one\n") 486 | 487 | for item in ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES: 488 | THEFILE.write("%s\n" % item) 489 | 490 | if MAX_FIT in ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT: 491 | THEFILE.write("ES-HyperNEAT medium one solves mountain_car at generation: " + 492 | str(ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT.index(MAX_FIT))) 493 | else: 494 | THEFILE.write("ES-HyperNEAT medium one does not solve mountain_car with best fitness: " + 495 | str(ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT[GENS-1])) 496 | THEFILE.write("\nES-HyperNEAT medium one solves mountain_car in " + 497 | str(ES_HYPERNEAT_MEDIUM_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 498 | THEFILE.write("ES-HyperNEAT medium ten\n") 499 | 500 | for item in ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES: 501 | THEFILE.write("%s\n" % item) 502 | 503 | if MAX_FIT in ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT: 504 | THEFILE.write("ES-HyperNEAT medium ten solves mountain_car at generation: " + 505 | str(ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT.index(MAX_FIT))) 506 | else: 507 | THEFILE.write("ES-HyperNEAT medium ten does not solve mountain_car with best fitness: " + 508 | str(ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT[GENS-1])) 509 | THEFILE.write("\nES-HyperNEAT medium ten solves mountain_car in " + 510 | str(ES_HYPERNEAT_MEDIUM_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 511 | THEFILE.write("ES-HyperNEAT medium hundred\n") 512 | 513 | for item in ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES: 514 | THEFILE.write("%s\n" % item) 515 | 516 | if MAX_FIT in ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT: 517 | THEFILE.write("ES-HyperNEAT medium hundred solves mountain_car at generation: " + 518 | str(ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 519 | else: 520 | THEFILE.write( 521 | "ES-HyperNEAT medium hundred does not solve mountain_car with best fitness: " + 522 | str(ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT[GENS-1])) 523 | THEFILE.write("\nES-HyperNEAT medium hundred solves mountain_car in " + 524 | str(ES_HYPERNEAT_MEDIUM_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 525 | 526 | # ES-HyperNEAT large. 527 | THEFILE = open('es_hyperneat_mountain_car_large_run_fitnesses.txt', 'w+') 528 | THEFILE.write("ES-HyperNEAT large one\n") 529 | 530 | for item in ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES: 531 | THEFILE.write("%s\n" % item) 532 | 533 | if MAX_FIT in ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT: 534 | THEFILE.write("ES-HyperNEAT large one solves mountain_car at generation: " + 535 | str(ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT.index(MAX_FIT))) 536 | else: 537 | THEFILE.write("ES-HyperNEAT large one does not solve mountain_car with best fitness: " + 538 | str(ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT[GENS-1])) 539 | THEFILE.write("\nES-HyperNEAT large one solves mountain_car in " + 540 | str(ES_HYPERNEAT_LARGE_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 541 | THEFILE.write("ES-HyperNEAT large ten\n") 542 | 543 | for item in ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES: 544 | THEFILE.write("%s\n" % item) 545 | 546 | if MAX_FIT in ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT: 547 | THEFILE.write("ES-HyperNEAT large ten solves mountain_car at generation: " + 548 | str(ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT.index(MAX_FIT))) 549 | else: 550 | THEFILE.write("ES-HyperNEAT large ten does not solve mountain_car with best fitness: " + 551 | str(ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT[GENS-1])) 552 | THEFILE.write("\nES-HyperNEAT large ten solves mountain_car in " + 553 | str(ES_HYPERNEAT_LARGE_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 554 | THEFILE.write("ES-HyperNEAT large hundred\n") 555 | 556 | for item in ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES: 557 | THEFILE.write("%s\n" % item) 558 | 559 | if MAX_FIT in ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT: 560 | THEFILE.write("ES-HyperNEAT large hundred solves mountain_car at generation: " + 561 | str(ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 562 | else: 563 | THEFILE.write( 564 | "ES-HyperNEAT large hundred does not solve mountain_car with best fitness: " + 565 | str(ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT[GENS-1])) 566 | THEFILE.write("\nES-HyperNEAT large hundred solves mountain_car in " + 567 | str(ES_HYPERNEAT_LARGE_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 568 | 569 | # Plot one fitnesses. 570 | plt.plot(range(GENS), NEAT_ONE_AVERAGE_FIT, 'r-', label="NEAT") 571 | plt.plot(range(GENS), HYPERNEAT_ONE_AVERAGE_FIT, 'g--', label="HyperNEAT") 572 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT, 573 | 'b-.', label="ES-HyperNEAT small") 574 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT, 575 | 'c-.', label="ES-HyperNEAT medium") 576 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT, 577 | 'm-.', label="ES-HyperNEAT large") 578 | 579 | plt.title("Average mountain_car fitnesses one episode") 580 | plt.xlabel("Generations") 581 | plt.ylabel("Fitness") 582 | plt.grid() 583 | plt.legend(loc="best") 584 | 585 | plt.savefig('mountain_car_one_fitnesses.svg') 586 | 587 | plt.close() 588 | 589 | # Plot ten fitnesses. 590 | plt.plot(range(GENS), NEAT_TEN_AVERAGE_FIT, 'r-', label="NEAT") 591 | plt.plot(range(GENS), HYPERNEAT_TEN_AVERAGE_FIT, 'g--', label="HyperNEAT") 592 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT, 593 | 'b-.', label="ES-HyperNEAT small") 594 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT, 595 | 'c-.', label="ES-HyperNEAT medium") 596 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT, 597 | 'm-.', label="ES-HyperNEAT large") 598 | 599 | plt.title("Average mountain_car fitnesses ten episodes") 600 | plt.xlabel("Generations") 601 | plt.ylabel("Fitness") 602 | plt.grid() 603 | plt.legend(loc="best") 604 | 605 | plt.savefig('mountain_car_ten_fitnesses.svg') 606 | 607 | plt.close() 608 | 609 | # Plot hundred fitnesses. 610 | plt.plot(range(GENS), NEAT_HUNDRED_AVERAGE_FIT, 'r-', label="NEAT") 611 | plt.plot(range(GENS), HYPERNEAT_HUNDRED_AVERAGE_FIT, 612 | 'g--', label="HyperNEAT") 613 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT, 614 | 'b-.', label="ES-HyperNEAT small") 615 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT, 616 | 'c-.', label="ES-HyperNEAT medium") 617 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT, 618 | 'm-.', label="ES-HyperNEAT large") 619 | 620 | plt.title("Average mountain_car fitnesses hundred episodes") 621 | plt.xlabel("Generations") 622 | plt.ylabel("Fitness") 623 | plt.grid() 624 | plt.legend(loc="best") 625 | 626 | plt.savefig('mountain_car_hundred_fitnesses.svg') 627 | 628 | plt.close() 629 | -------------------------------------------------------------------------------- /pureples/experiments/pole_balancing/config_cppn_pole_balancing: -------------------------------------------------------------------------------- 1 | #--- parameters for the CPPN regarding the single pole balancing experiment ---# 2 | 3 | [NEAT] 4 | fitness_criterion = max 5 | fitness_threshold = 475 6 | pop_size = 50 7 | reset_on_extinction = False 8 | 9 | [DefaultGenome] 10 | # node activation options 11 | activation_default = tanh 12 | activation_mutate_rate = 0.5 13 | activation_options = gauss sin tanh 14 | 15 | # node aggregation options 16 | aggregation_default = sum 17 | aggregation_mutate_rate = 0.0 18 | aggregation_options = sum 19 | 20 | # node bias options 21 | bias_init_mean = 0.0 22 | bias_init_stdev = 1.0 23 | bias_max_value = 30.0 24 | bias_min_value = -30.0 25 | bias_mutate_power = 0.5 26 | bias_mutate_rate = 0.7 27 | bias_replace_rate = 0.1 28 | 29 | # genome compatibility options 30 | compatibility_disjoint_coefficient = 1.0 31 | compatibility_weight_coefficient = 0.5 32 | 33 | # connection add/remove rates 34 | conn_add_prob = 0.5 35 | conn_delete_prob = 0.5 36 | 37 | # connection enable options 38 | enabled_default = True 39 | enabled_mutate_rate = 0.01 40 | 41 | feed_forward = True 42 | initial_connection = full 43 | 44 | # node add/remove rates 45 | node_add_prob = 0.2 46 | node_delete_prob = 0.2 47 | 48 | # network parameters 49 | num_hidden = 0 50 | num_inputs = 5 51 | num_outputs = 1 52 | 53 | # node response options 54 | response_init_mean = 1.0 55 | response_init_stdev = 0.0 56 | response_max_value = 30.0 57 | response_min_value = -30.0 58 | response_mutate_power = 0.0 59 | response_mutate_rate = 0.0 60 | response_replace_rate = 0.0 61 | 62 | # connection weight options 63 | weight_init_mean = 0.0 64 | weight_init_stdev = 1.0 65 | weight_max_value = 30 66 | weight_min_value = -30 67 | weight_mutate_power = 0.5 68 | weight_mutate_rate = 0.8 69 | weight_replace_rate = 0.1 70 | 71 | [DefaultSpeciesSet] 72 | compatibility_threshold = 3.0 73 | 74 | [DefaultStagnation] 75 | species_fitness_func = max 76 | max_stagnation = 20 77 | species_elitism = 5 78 | 79 | [DefaultReproduction] 80 | elitism = 5 81 | survival_threshold = 0.2 -------------------------------------------------------------------------------- /pureples/experiments/pole_balancing/config_neat_pole_balancing: -------------------------------------------------------------------------------- 1 | #--- parameters for the NEAT single pole balancing experiment ---# 2 | 3 | [NEAT] 4 | fitness_criterion = max 5 | fitness_threshold = 475 6 | pop_size = 50 7 | reset_on_extinction = False 8 | 9 | [DefaultGenome] 10 | # node activation options 11 | activation_default = sigmoid 12 | activation_mutate_rate = 0.0 13 | activation_options = sigmoid 14 | 15 | # node aggregation options 16 | aggregation_default = sum 17 | aggregation_mutate_rate = 0.0 18 | aggregation_options = sum 19 | 20 | # node bias options 21 | bias_init_mean = 0.0 22 | bias_init_stdev = 1.0 23 | bias_max_value = 30.0 24 | bias_min_value = -30.0 25 | bias_mutate_power = 0.5 26 | bias_mutate_rate = 0.7 27 | bias_replace_rate = 0.1 28 | 29 | # genome compatibility options 30 | compatibility_disjoint_coefficient = 1.0 31 | compatibility_weight_coefficient = 0.5 32 | 33 | # connection add/remove rates 34 | conn_add_prob = 0.5 35 | conn_delete_prob = 0.5 36 | 37 | # connection enable options 38 | enabled_default = True 39 | enabled_mutate_rate = 0.01 40 | 41 | feed_forward = True 42 | initial_connection = full 43 | 44 | # node add/remove rates 45 | node_add_prob = 0.2 46 | node_delete_prob = 0.2 47 | 48 | # network parameters 49 | num_hidden = 0 50 | num_inputs = 4 51 | num_outputs = 2 52 | 53 | # node response options 54 | response_init_mean = 1.0 55 | response_init_stdev = 0.0 56 | response_max_value = 30.0 57 | response_min_value = -30.0 58 | response_mutate_power = 0.0 59 | response_mutate_rate = 0.0 60 | response_replace_rate = 0.0 61 | 62 | # connection weight options 63 | weight_init_mean = 0.0 64 | weight_init_stdev = 1.0 65 | weight_max_value = 30 66 | weight_min_value = -30 67 | weight_mutate_power = 0.5 68 | weight_mutate_rate = 0.8 69 | weight_replace_rate = 0.1 70 | 71 | [DefaultSpeciesSet] 72 | compatibility_threshold = 3.0 73 | 74 | [DefaultStagnation] 75 | species_fitness_func = max 76 | max_stagnation = 20 77 | species_elitism = 5 78 | 79 | [DefaultReproduction] 80 | elitism = 5 81 | survival_threshold = 0.2 -------------------------------------------------------------------------------- /pureples/experiments/pole_balancing/es_hyperneat_pole_balancing.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using a variable-sized ES-HyperNEAT network to perform a pole balancing task. 3 | """ 4 | 5 | import pickle 6 | import logging 7 | import neat 8 | import gym 9 | from pureples.shared.visualize import draw_net 10 | from pureples.shared.substrate import Substrate 11 | from pureples.shared.gym_runner import run_es 12 | from pureples.es_hyperneat.es_hyperneat import ESNetwork 13 | 14 | # S, M or L; Small, Medium or Large (logic implemented as "Not 'S' or 'M' then Large"). 15 | VERSION = "S" 16 | VERSION_TEXT = "small" if VERSION == "S" else "medium" if VERSION == "M" else "large" 17 | 18 | # Network coordinates and the resulting substrate. 19 | INPUT_COORDINATES = [] 20 | 21 | for i in range(0, 4): 22 | INPUT_COORDINATES.append((-1. + (2.*i/3.), -1.)) 23 | 24 | OUTPUT_COORDINATES = [(-1., 1.), (1., 1.)] 25 | SUBSTRATE = Substrate(INPUT_COORDINATES, OUTPUT_COORDINATES) 26 | 27 | 28 | def params(version): 29 | """ 30 | ES-HyperNEAT specific parameters. 31 | """ 32 | return {"initial_depth": 0 if version == "S" else 1 if version == "M" else 2, 33 | "max_depth": 1 if version == "S" else 2 if version == "M" else 3, 34 | "variance_threshold": 0.03, 35 | "band_threshold": 0.3, 36 | "iteration_level": 1, 37 | "division_threshold": 0.5, 38 | "max_weight": 8.0, 39 | "activation": "sigmoid"} 40 | 41 | 42 | # Config for CPPN. 43 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 44 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 45 | 'pureples/experiments/pole_balancing/config_cppn_pole_balancing') 46 | 47 | 48 | def run(gens, env, version): 49 | """ 50 | Run the pole balancing task using the Gym environment 51 | Returns the winning genome and the statistics of the run. 52 | """ 53 | winner, stats = run_es(gens, env, 500, CONFIG, params(version), SUBSTRATE) 54 | print(f"es_hyperneat_polebalancing_{VERSION_TEXT} done") 55 | return winner, stats 56 | 57 | 58 | # If run as script. 59 | if __name__ == '__main__': 60 | # Setup logger and environment. 61 | LOGGER = logging.getLogger() 62 | LOGGER.setLevel(logging.INFO) 63 | ENVIRONMENT = gym.make("CartPole-v1") 64 | 65 | # Run! Only relevant to look at the winner. 66 | WINNER = run(100, ENVIRONMENT, VERSION)[0] 67 | 68 | # Save CPPN if wished reused and draw it + winner to file. 69 | CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 70 | NETWORK = ESNetwork(SUBSTRATE, CPPN, params(VERSION)) 71 | NET = NETWORK.create_phenotype_network( 72 | filename=f"pureples/experiments/pole_balancing/es_hyperneat_pole_balancing_{VERSION_TEXT}_winner") 73 | draw_net( 74 | CPPN, filename=f"pureples/experiments/pole_balancing/es_hyperneat_pole_balancing_{VERSION_TEXT}_cppn") 75 | with open(f'pureples/experiments/pole_balancing/es_hyperneat_pole_balancing_{VERSION_TEXT}_cppn.pkl', 'wb') as output: 76 | pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL) 77 | -------------------------------------------------------------------------------- /pureples/experiments/pole_balancing/hyperneat_pole_balancing.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using HyperNEAT to perform a pole balancing task. 3 | """ 4 | 5 | import pickle 6 | import logging 7 | import neat 8 | import gym 9 | from pureples.shared.visualize import draw_net 10 | from pureples.shared.substrate import Substrate 11 | from pureples.shared.gym_runner import run_hyper 12 | from pureples.hyperneat.hyperneat import create_phenotype_network 13 | 14 | # Network input, hidden and output coordinates. 15 | INPUT_COORDINATES = [] 16 | for i in range(0, 4): 17 | INPUT_COORDINATES.append((-1. + (2.*i/3.), -1.)) 18 | HIDDEN_COORDINATES = [[(-0.5, 0.5), (0.5, 0.5)], [(-0.5, -0.5), (0.5, -0.5)]] 19 | OUTPUT_COORDINATES = [(-1., 1.), (1., 1.)] 20 | ACTIVATIONS = len(HIDDEN_COORDINATES) + 2 21 | 22 | SUBSTRATE = Substrate( 23 | INPUT_COORDINATES, OUTPUT_COORDINATES, HIDDEN_COORDINATES) 24 | 25 | # Config for CPPN. 26 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 27 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 28 | 'pureples/experiments/pole_balancing/config_cppn_pole_balancing') 29 | 30 | 31 | def run(gens, env): 32 | """ 33 | Run the pole balancing task using the Gym environment 34 | Returns the winning genome and the statistics of the run. 35 | """ 36 | winner, stats = run_hyper(gens, env, 500, CONFIG, SUBSTRATE, ACTIVATIONS) 37 | print("hyperneat_polebalancing done") 38 | return winner, stats 39 | 40 | 41 | # If run as script. 42 | if __name__ == '__main__': 43 | # Setup logger and environment. 44 | LOGGER = logging.getLogger() 45 | LOGGER.setLevel(logging.INFO) 46 | ENVIRONMENT = gym.make("CartPole-v1") 47 | 48 | # Run! Only relevant to look at the winner. 49 | WINNER = run(100, ENVIRONMENT)[0] 50 | 51 | # Save CPPN if wished reused and draw it + winner to file. 52 | CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 53 | WINNER_NET = create_phenotype_network(CPPN, SUBSTRATE) 54 | draw_net( 55 | CPPN, filename="pureples/experiments/pole_balancing/hyperneat_pole_balancing_cppn") 56 | with open('pureples/experiments/pole_balancing/hyperneat_pole_balancing_cppn.pkl', 'wb') as output: 57 | pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL) 58 | draw_net( 59 | WINNER_NET, filename="pureples/experiments/pole_balancing/hyperneat_pole_balancing_winner") 60 | -------------------------------------------------------------------------------- /pureples/experiments/pole_balancing/neat_pole_balancing.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using NEAT to perform a pole balancing task. 3 | """ 4 | 5 | import logging 6 | import pickle 7 | import neat 8 | import gym 9 | from pureples.shared.visualize import draw_net 10 | from pureples.shared.gym_runner import run_neat 11 | 12 | 13 | # Config for FeedForwardNetwork. 14 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 15 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 16 | 'pureples/experiments/pole_balancing/config_neat_pole_balancing') 17 | 18 | 19 | def run(gens, env): 20 | """ 21 | Create the population and run the XOR task by providing eval_fitness as the fitness function. 22 | Returns the winning genome and the statistics of the run. 23 | """ 24 | winner, stats = run_neat(gens, env, 500, CONFIG) 25 | print("neat_pole_balancing done") 26 | return winner, stats 27 | 28 | 29 | # If run as script. 30 | if __name__ == '__main__': 31 | # Setup logger and environment. 32 | LOGGER = logging.getLogger() 33 | LOGGER.setLevel(logging.INFO) 34 | ENVIRONMENT = gym.make("CartPole-v1") 35 | 36 | # Run! 37 | WINNER = run(500, ENVIRONMENT)[0] 38 | 39 | # Save net if wished reused and draw it + winner to file. 40 | WINNER_NET = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 41 | draw_net( 42 | WINNER_NET, filename="pureples/experiments/pole_balancing/neat_pole_balancing_winner") 43 | with open('pureples/experiments/pole_balancing/neat_pole_balancing_winner.pkl', 'wb') as output: 44 | pickle.dump(WINNER_NET, output, pickle.HIGHEST_PROTOCOL) 45 | -------------------------------------------------------------------------------- /pureples/experiments/pole_balancing/run_all_pole_balancing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Runs ALL pole balancing tasks using ES-HyperNEAT, HyperNEAT and NEAT. 3 | Reports everything to text files. 4 | """ 5 | 6 | from multiprocessing import Manager 7 | from itertools import repeat 8 | import multiprocessing as multi 9 | import gym 10 | import matplotlib.pyplot as plt 11 | import matplotlib 12 | import es_hyperneat_pole_balancing 13 | import hyperneat_pole_balancing 14 | import neat_pole_balancing 15 | matplotlib.use('Agg') 16 | 17 | 18 | def run(number, gens, env, neat_stats, hyperneat_stats, 19 | es_hyperneat_small_stats, es_hyperneat_medium_stats, es_hyperneat_large_stats): 20 | """ 21 | Run the experiments. 22 | """ 23 | print(f"This is run #{str(number)}") 24 | neat_stats.append(neat_pole_balancing.run(gens, env)[1]) 25 | hyperneat_stats.append(hyperneat_pole_balancing.run(gens, env)[1]) 26 | es_hyperneat_small_stats.append( 27 | es_hyperneat_pole_balancing.run(gens, env, "S")[1]) 28 | es_hyperneat_medium_stats.append( 29 | es_hyperneat_pole_balancing.run(gens, env, "M")[1]) 30 | es_hyperneat_large_stats.append( 31 | es_hyperneat_pole_balancing.run(gens, env, "L")[1]) 32 | 33 | 34 | if __name__ == '__main__': 35 | # Initialize lists to keep track during run. 36 | MANAGER = Manager() 37 | 38 | NEAT_STATS, HYPERNEAT_STATS, ES_HYPERNEAT_SMALL_STATS = MANAGER.list( 39 | []), MANAGER.list([]), MANAGER.list([]) 40 | ES_HYPERNEAT_MEDIUM_STATS, ES_HYPERNEAT_LARGE_STATS = MANAGER.list( 41 | []), MANAGER.list([]) 42 | NEAT_RUN_ONE_FITNESS, HYPERNEAT_RUN_ONE_FITNESSES = [], [] 43 | ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES, ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES = [], [] 44 | ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES = [] 45 | 46 | NEAT_RUN_TEN_FITNESSES, HYPERNEAT_RUN_TEN_FITNESSES = [], [] 47 | ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES, ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES = [], [] 48 | ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES = [] 49 | 50 | NEAT_RUN_HUNDRED_FITNESSES, HYPERNEAT_RUN_HUNDRED_FITNESSES = [], [] 51 | ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES, ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES = [], [] 52 | ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES = [] 53 | 54 | NEAT_ONE_SOLVED, HYPERNEAT_ONE_SOLVED, ES_HYPERNEAT_SMALL_ONE_SOLVED = 0, 0, 0 55 | ES_HYPERNEAT_MEDIUM_ONE_SOLVED, ES_HYPERNEAT_LARGE_ONE_SOLVED = 0, 0 56 | 57 | NEAT_TEN_SOLVED, HYPERNEAT_TEN_SOLVED, ES_HYPERNEAT_SMALL_TEN_SOLVED = 0, 0, 0 58 | ES_HYPERNEAT_MEDIUM_TEN_SOLVED, ES_HYPERNEAT_LARGE_TEN_SOLVED = 0, 0 59 | 60 | NEAT_HUNDRED_SOLVED, HYPERNEAT_HUNDRED_SOLVED, ES_HYPERNEAT_SMALL_HUNDRED_SOLVED = 0, 0, 0 61 | ES_HYPERNEAT_MEDIUM_HUNDRED_SOLVED, ES_HYPERNEAT_LARGE_HUNDRED_SOLVED = 0, 0 62 | 63 | RUNS = 16 64 | INPUTS = range(RUNS) 65 | GENS = 50 66 | FIT_THRESHOLD = 475 67 | MAX_FIT = 475 68 | ENV = gym.make("CartPole-v1") 69 | 70 | P = multi.Pool(multi.cpu_count()) 71 | P.starmap(run, zip(range(RUNS), repeat(GENS), repeat(ENV), repeat(NEAT_STATS), 72 | repeat(HYPERNEAT_STATS), repeat( 73 | ES_HYPERNEAT_SMALL_STATS), repeat(ES_HYPERNEAT_MEDIUM_STATS), 74 | repeat(ES_HYPERNEAT_LARGE_STATS))) 75 | 76 | # Average the NEAT runs. 77 | TEMP_FIT_ONE = [0.0] * GENS 78 | TEMP_FIT_TEN = [0.0] * GENS 79 | TEMP_FIT_HUNDRED = [0.0] * GENS 80 | 81 | for (stat_one, stat_ten, stat_hundred) in NEAT_STATS: 82 | if stat_one.best_genome().fitness > MAX_FIT: 83 | NEAT_RUN_ONE_FITNESS.append(MAX_FIT) 84 | else: 85 | NEAT_RUN_ONE_FITNESS.append(stat_one.best_genome().fitness) 86 | if stat_ten.best_genome().fitness > MAX_FIT: 87 | NEAT_RUN_TEN_FITNESSES.append(MAX_FIT) 88 | else: 89 | NEAT_RUN_TEN_FITNESSES.append(stat_one.best_genome().fitness) 90 | if stat_hundred.best_genome().fitness > MAX_FIT: 91 | NEAT_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 92 | else: 93 | NEAT_RUN_HUNDRED_FITNESSES.append(stat_one.best_genome().fitness) 94 | 95 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 96 | NEAT_ONE_SOLVED += 1 97 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 98 | NEAT_TEN_SOLVED += 1 99 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 100 | NEAT_HUNDRED_SOLVED += 1 101 | 102 | for i in range(GENS): 103 | if i < len(stat_one.most_fit_genomes): 104 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 105 | TEMP_FIT_ONE[i] += MAX_FIT 106 | else: 107 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 108 | else: 109 | TEMP_FIT_ONE[i] += MAX_FIT 110 | if i < len(stat_ten.most_fit_genomes): 111 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 112 | TEMP_FIT_TEN[i] += MAX_FIT 113 | else: 114 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 115 | else: 116 | TEMP_FIT_TEN[i] += MAX_FIT 117 | if i < len(stat_hundred.most_fit_genomes): 118 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 119 | TEMP_FIT_HUNDRED[i] += MAX_FIT 120 | else: 121 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 122 | else: 123 | TEMP_FIT_HUNDRED[i] += MAX_FIT 124 | 125 | NEAT_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 126 | NEAT_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 127 | NEAT_HUNDRED_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_HUNDRED] 128 | 129 | # Average the HyperNEAT runs. 130 | TEMP_FIT_ONE = [0.0] * GENS 131 | TEMP_FIT_TEN = [0.0] * GENS 132 | TEMP_FIT_HUNDRED = [0.0] * GENS 133 | 134 | for (stat_one, stat_ten, stat_hundred) in HYPERNEAT_STATS: 135 | if stat_one.best_genome().fitness > MAX_FIT: 136 | HYPERNEAT_RUN_ONE_FITNESSES.append(MAX_FIT) 137 | else: 138 | HYPERNEAT_RUN_ONE_FITNESSES.append(stat_one.best_genome().fitness) 139 | if stat_ten.best_genome().fitness > MAX_FIT: 140 | HYPERNEAT_RUN_TEN_FITNESSES.append(MAX_FIT) 141 | else: 142 | HYPERNEAT_RUN_TEN_FITNESSES.append(stat_one.best_genome().fitness) 143 | if stat_hundred.best_genome().fitness > MAX_FIT: 144 | HYPERNEAT_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 145 | else: 146 | HYPERNEAT_RUN_HUNDRED_FITNESSES.append( 147 | stat_one.best_genome().fitness) 148 | 149 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 150 | HYPERNEAT_ONE_SOLVED += 1 151 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 152 | HYPERNEAT_TEN_SOLVED += 1 153 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 154 | HYPERNEAT_HUNDRED_SOLVED += 1 155 | 156 | for i in range(GENS): 157 | if i < len(stat_one.most_fit_genomes): 158 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 159 | TEMP_FIT_ONE[i] += MAX_FIT 160 | else: 161 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 162 | else: 163 | TEMP_FIT_ONE[i] += MAX_FIT 164 | if i < len(stat_ten.most_fit_genomes): 165 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 166 | TEMP_FIT_TEN[i] += MAX_FIT 167 | else: 168 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 169 | else: 170 | TEMP_FIT_TEN[i] += MAX_FIT 171 | if i < len(stat_hundred.most_fit_genomes): 172 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 173 | TEMP_FIT_HUNDRED[i] += MAX_FIT 174 | else: 175 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 176 | else: 177 | TEMP_FIT_HUNDRED[i] += MAX_FIT 178 | 179 | HYPERNEAT_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 180 | HYPERNEAT_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 181 | HYPERNEAT_HUNDRED_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_HUNDRED] 182 | 183 | # Average the small ES-HyperNEAT runs. 184 | TEMP_FIT_ONE = [0.0] * GENS 185 | TEMP_FIT_TEN = [0.0] * GENS 186 | TEMP_FIT_HUNDRED = [0.0] * GENS 187 | 188 | for (stat_one, stat_ten, stat_hundred) in ES_HYPERNEAT_SMALL_STATS: 189 | if stat_one.best_genome().fitness > MAX_FIT: 190 | ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES.append(MAX_FIT) 191 | else: 192 | ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES.append( 193 | stat_one.best_genome().fitness) 194 | if stat_ten.best_genome().fitness > MAX_FIT: 195 | ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES.append(MAX_FIT) 196 | else: 197 | ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES.append( 198 | stat_one.best_genome().fitness) 199 | if stat_hundred.best_genome().fitness > MAX_FIT: 200 | ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 201 | else: 202 | ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES.append( 203 | stat_one.best_genome().fitness) 204 | 205 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 206 | ES_HYPERNEAT_SMALL_ONE_SOLVED += 1 207 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 208 | ES_HYPERNEAT_SMALL_TEN_SOLVED += 1 209 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 210 | ES_HYPERNEAT_SMALL_HUNDRED_SOLVED += 1 211 | 212 | for i in range(GENS): 213 | if i < len(stat_one.most_fit_genomes): 214 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 215 | TEMP_FIT_ONE[i] += MAX_FIT 216 | else: 217 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 218 | else: 219 | TEMP_FIT_ONE[i] += MAX_FIT 220 | if i < len(stat_ten.most_fit_genomes): 221 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 222 | TEMP_FIT_TEN[i] += MAX_FIT 223 | else: 224 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 225 | else: 226 | TEMP_FIT_TEN[i] += MAX_FIT 227 | if i < len(stat_hundred.most_fit_genomes): 228 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 229 | TEMP_FIT_HUNDRED[i] += MAX_FIT 230 | else: 231 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 232 | else: 233 | TEMP_FIT_HUNDRED[i] += MAX_FIT 234 | 235 | ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 236 | ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 237 | ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT = [ 238 | x / RUNS for x in TEMP_FIT_HUNDRED] 239 | 240 | # Average the medium ES-HyperNEAT runs. 241 | TEMP_FIT_ONE = [0.0] * GENS 242 | TEMP_FIT_TEN = [0.0] * GENS 243 | TEMP_FIT_HUNDRED = [0.0] * GENS 244 | 245 | for (stat_one, stat_ten, stat_hundred) in ES_HYPERNEAT_MEDIUM_STATS: 246 | if stat_one.best_genome().fitness > MAX_FIT: 247 | ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES.append(MAX_FIT) 248 | else: 249 | ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES.append( 250 | stat_one.best_genome().fitness) 251 | if stat_ten.best_genome().fitness > MAX_FIT: 252 | ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES.append(MAX_FIT) 253 | else: 254 | ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES.append( 255 | stat_one.best_genome().fitness) 256 | if stat_hundred.best_genome().fitness > MAX_FIT: 257 | ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 258 | else: 259 | ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES.append( 260 | stat_one.best_genome().fitness) 261 | 262 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 263 | ES_HYPERNEAT_MEDIUM_ONE_SOLVED += 1 264 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 265 | ES_HYPERNEAT_MEDIUM_TEN_SOLVED += 1 266 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 267 | ES_HYPERNEAT_MEDIUM_HUNDRED_SOLVED += 1 268 | 269 | for i in range(GENS): 270 | if i < len(stat_one.most_fit_genomes): 271 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 272 | TEMP_FIT_ONE[i] += MAX_FIT 273 | else: 274 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 275 | else: 276 | TEMP_FIT_ONE[i] += MAX_FIT 277 | if i < len(stat_ten.most_fit_genomes): 278 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 279 | TEMP_FIT_TEN[i] += MAX_FIT 280 | else: 281 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 282 | else: 283 | TEMP_FIT_TEN[i] += MAX_FIT 284 | if i < len(stat_hundred.most_fit_genomes): 285 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 286 | TEMP_FIT_HUNDRED[i] += MAX_FIT 287 | else: 288 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 289 | else: 290 | TEMP_FIT_HUNDRED[i] += MAX_FIT 291 | 292 | ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 293 | ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 294 | ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT = [ 295 | x / RUNS for x in TEMP_FIT_HUNDRED] 296 | 297 | # Average the large ES-HyperNEAT runs. 298 | TEMP_FIT_ONE = [0.0] * GENS 299 | TEMP_FIT_TEN = [0.0] * GENS 300 | TEMP_FIT_HUNDRED = [0.0] * GENS 301 | 302 | for (stat_one, stat_ten, stat_hundred) in ES_HYPERNEAT_LARGE_STATS: 303 | if stat_one.best_genome().fitness > MAX_FIT: 304 | ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES.append(MAX_FIT) 305 | else: 306 | ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES.append( 307 | stat_one.best_genome().fitness) 308 | if stat_ten.best_genome().fitness > MAX_FIT: 309 | ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES.append(MAX_FIT) 310 | else: 311 | ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES.append( 312 | stat_one.best_genome().fitness) 313 | if stat_hundred.best_genome().fitness > MAX_FIT: 314 | ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES.append(MAX_FIT) 315 | else: 316 | ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES.append( 317 | stat_one.best_genome().fitness) 318 | 319 | if stat_one.best_genome().fitness >= FIT_THRESHOLD: 320 | ES_HYPERNEAT_LARGE_ONE_SOLVED += 1 321 | if stat_ten.best_genome().fitness >= FIT_THRESHOLD: 322 | ES_HYPERNEAT_LARGE_TEN_SOLVED += 1 323 | if stat_hundred.best_genome().fitness >= FIT_THRESHOLD: 324 | ES_HYPERNEAT_LARGE_HUNDRED_SOLVED += 1 325 | 326 | for i in range(GENS): 327 | if i < len(stat_one.most_fit_genomes): 328 | if stat_one.most_fit_genomes[i].fitness > MAX_FIT: 329 | TEMP_FIT_ONE[i] += MAX_FIT 330 | else: 331 | TEMP_FIT_ONE[i] += stat_one.most_fit_genomes[i].fitness 332 | else: 333 | TEMP_FIT_ONE[i] += MAX_FIT 334 | if i < len(stat_ten.most_fit_genomes): 335 | if stat_ten.most_fit_genomes[i].fitness > MAX_FIT: 336 | TEMP_FIT_TEN[i] += MAX_FIT 337 | else: 338 | TEMP_FIT_TEN[i] += stat_ten.most_fit_genomes[i].fitness 339 | else: 340 | TEMP_FIT_TEN[i] += MAX_FIT 341 | if i < len(stat_hundred.most_fit_genomes): 342 | if stat_hundred.most_fit_genomes[i].fitness > MAX_FIT: 343 | TEMP_FIT_HUNDRED[i] += MAX_FIT 344 | else: 345 | TEMP_FIT_HUNDRED[i] += stat_hundred.most_fit_genomes[i].fitness 346 | else: 347 | TEMP_FIT_HUNDRED[i] += MAX_FIT 348 | 349 | ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_ONE] 350 | ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT_TEN] 351 | ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT = [ 352 | x / RUNS for x in TEMP_FIT_HUNDRED] 353 | 354 | # Write fitnesses to files. 355 | # NEAT. 356 | THEFILE = open('neat_pole_balancing_run_fitnesses.txt', 'w+') 357 | THEFILE.write("NEAT one\n") 358 | 359 | for item in NEAT_RUN_ONE_FITNESS: 360 | THEFILE.write("%s\n" % item) 361 | 362 | if MAX_FIT in NEAT_ONE_AVERAGE_FIT: 363 | THEFILE.write("NEAT one solves pole_balancing at generation: " + 364 | str(NEAT_ONE_AVERAGE_FIT.index(MAX_FIT))) 365 | else: 366 | THEFILE.write("NEAT one does not solve pole_balancing with best fitness: " + 367 | str(NEAT_ONE_AVERAGE_FIT[GENS-1])) 368 | THEFILE.write("\nNEAT one solves pole_balancing in " + 369 | str(NEAT_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 370 | THEFILE.write("NEAT ten\n") 371 | 372 | for item in NEAT_RUN_TEN_FITNESSES: 373 | THEFILE.write("%s\n" % item) 374 | 375 | if MAX_FIT in NEAT_TEN_AVERAGE_FIT: 376 | THEFILE.write("NEAT ten solves pole_balancing at generation: " + 377 | str(NEAT_TEN_AVERAGE_FIT.index(MAX_FIT))) 378 | else: 379 | THEFILE.write("NEAT ten does not solve pole_balancing with best fitness: " + 380 | str(NEAT_TEN_AVERAGE_FIT[GENS-1])) 381 | THEFILE.write("\nNEAT ten solves pole_balancing in " + 382 | str(NEAT_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 383 | THEFILE.write("NEAT hundred\n") 384 | 385 | for item in NEAT_RUN_HUNDRED_FITNESSES: 386 | THEFILE.write("%s\n" % item) 387 | 388 | if MAX_FIT in NEAT_HUNDRED_AVERAGE_FIT: 389 | THEFILE.write("NEAT hundred solves pole_balancing at generation: " + 390 | str(NEAT_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 391 | else: 392 | THEFILE.write("NEAT hundred does not solve pole_balancing with best fitness: " + 393 | str(NEAT_HUNDRED_AVERAGE_FIT[GENS-1])) 394 | THEFILE.write("\nNEAT hundred solves pole_balancing in " + 395 | str(NEAT_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 396 | 397 | # HyperNEAT. 398 | THEFILE = open('hyperneat_pole_balancing_run_fitnesses.txt', 'w+') 399 | THEFILE.write("HyperNEAT one\n") 400 | 401 | for item in HYPERNEAT_RUN_ONE_FITNESSES: 402 | THEFILE.write("%s\n" % item) 403 | 404 | if MAX_FIT in HYPERNEAT_ONE_AVERAGE_FIT: 405 | THEFILE.write("HyperNEAT one solves pole_balancing at generation: " + 406 | str(HYPERNEAT_ONE_AVERAGE_FIT.index(MAX_FIT))) 407 | else: 408 | THEFILE.write("HyperNEAT one does not solve pole_balancing with best fitness: " + 409 | str(HYPERNEAT_ONE_AVERAGE_FIT[GENS-1])) 410 | THEFILE.write("\nHyperNEAT one solves pole_balancing in " + 411 | str(HYPERNEAT_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 412 | THEFILE.write("HyperNEAT ten\n") 413 | 414 | for item in HYPERNEAT_RUN_TEN_FITNESSES: 415 | THEFILE.write("%s\n" % item) 416 | 417 | if MAX_FIT in HYPERNEAT_TEN_AVERAGE_FIT: 418 | THEFILE.write("HyperNEAT ten solves pole_balancing at generation: " + 419 | str(HYPERNEAT_TEN_AVERAGE_FIT.index(MAX_FIT))) 420 | else: 421 | THEFILE.write("HyperNEAT ten does not solve pole_balancing with best fitness: " + 422 | str(HYPERNEAT_TEN_AVERAGE_FIT[GENS-1])) 423 | THEFILE.write("\nHyperNEAT ten solves pole_balancing in " + 424 | str(HYPERNEAT_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 425 | THEFILE.write("HyperNEAT hundred\n") 426 | 427 | for item in HYPERNEAT_RUN_HUNDRED_FITNESSES: 428 | THEFILE.write("%s\n" % item) 429 | 430 | if MAX_FIT in HYPERNEAT_HUNDRED_AVERAGE_FIT: 431 | THEFILE.write("HyperNEAT hundred solves pole_balancing at generation: " + 432 | str(HYPERNEAT_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 433 | else: 434 | THEFILE.write("HyperNEAT hundred does not solve pole_balancing with best fitness: " + 435 | str(HYPERNEAT_HUNDRED_AVERAGE_FIT[GENS-1])) 436 | THEFILE.write("\nHyperNEAT hundred solves pole_balancing in " + 437 | str(HYPERNEAT_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 438 | 439 | # ES-HyperNEAT small. 440 | THEFILE = open('es_hyperneat_pole_balancing_small_run_fitnesses.txt', 'w+') 441 | THEFILE.write("ES-HyperNEAT small one\n") 442 | 443 | for item in ES_HYPERNEAT_SMALL_RUN_ONE_FITNESSES: 444 | THEFILE.write("%s\n" % item) 445 | 446 | if MAX_FIT in ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT: 447 | THEFILE.write("ES-HyperNEAT small one solves pole_balancing at generation: " + 448 | str(ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT.index(MAX_FIT))) 449 | else: 450 | THEFILE.write("ES-HyperNEAT small one does not solve pole_balancing with best fitness: " + 451 | str(ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT[GENS-1])) 452 | THEFILE.write("\nES-HyperNEAT small one solves pole_balancing in " + 453 | str(ES_HYPERNEAT_SMALL_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 454 | THEFILE.write("ES-HyperNEAT small ten\n") 455 | 456 | for item in ES_HYPERNEAT_SMALL_RUN_TEN_FITNESSES: 457 | THEFILE.write("%s\n" % item) 458 | 459 | if MAX_FIT in ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT: 460 | THEFILE.write("ES-HyperNEAT small ten solves pole_balancing at generation: " + 461 | str(ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT.index(MAX_FIT))) 462 | else: 463 | THEFILE.write("ES-HyperNEAT small ten does not solve pole_balancing with best fitness: " + 464 | str(ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT[GENS-1])) 465 | THEFILE.write("\nES-HyperNEAT small ten solves pole_balancing in " + 466 | str(ES_HYPERNEAT_SMALL_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 467 | THEFILE.write("ES-HyperNEAT small hundred\n") 468 | 469 | for item in ES_HYPERNEAT_SMALL_RUN_HUNDRED_FITNESSES: 470 | THEFILE.write("%s\n" % item) 471 | 472 | if MAX_FIT in ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT: 473 | THEFILE.write("ES-HyperNEAT small hundred solves pole_balancing at generation: " + 474 | str(ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 475 | else: 476 | THEFILE.write( 477 | "ES-HyperNEAT small hundred does not solve pole_balancing with best fitness: " + 478 | str(ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT[GENS-1])) 479 | THEFILE.write("\nES-HyperNEAT small hundred solves pole_balancing in " + 480 | str(ES_HYPERNEAT_SMALL_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 481 | 482 | # ES-HyperNEAT medium. 483 | THEFILE = open( 484 | 'es_hyperneat_pole_balancing_medium_run_fitnesses.txt', 'w+') 485 | THEFILE.write("ES-HyperNEAT medium one\n") 486 | 487 | for item in ES_HYPERNEAT_MEDIUM_RUN_ONE_FITNESSES: 488 | THEFILE.write("%s\n" % item) 489 | 490 | if MAX_FIT in ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT: 491 | THEFILE.write("ES-HyperNEAT medium one solves pole_balancing at generation: " + 492 | str(ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT.index(MAX_FIT))) 493 | else: 494 | THEFILE.write("ES-HyperNEAT medium one does not solve pole_balancing with best fitness: " + 495 | str(ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT[GENS-1])) 496 | THEFILE.write("\nES-HyperNEAT medium one solves pole_balancing in " + 497 | str(ES_HYPERNEAT_MEDIUM_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 498 | THEFILE.write("ES-HyperNEAT medium ten\n") 499 | 500 | for item in ES_HYPERNEAT_MEDIUM_RUN_TEN_FITNESSES: 501 | THEFILE.write("%s\n" % item) 502 | 503 | if MAX_FIT in ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT: 504 | THEFILE.write("ES-HyperNEAT medium ten solves pole_balancing at generation: " + 505 | str(ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT.index(MAX_FIT))) 506 | else: 507 | THEFILE.write("ES-HyperNEAT medium ten does not solve pole_balancing with best fitness: " + 508 | str(ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT[GENS-1])) 509 | THEFILE.write("\nES-HyperNEAT medium ten solves pole_balancing in " + 510 | str(ES_HYPERNEAT_MEDIUM_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 511 | THEFILE.write("ES-HyperNEAT medium hundred\n") 512 | 513 | for item in ES_HYPERNEAT_MEDIUM_RUN_HUNDRED_FITNESSES: 514 | THEFILE.write("%s\n" % item) 515 | 516 | if MAX_FIT in ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT: 517 | THEFILE.write("ES-HyperNEAT medium hundred solves pole_balancing at generation: " + 518 | str(ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 519 | else: 520 | THEFILE.write( 521 | "ES-HyperNEAT medium hundred does not solve pole_balancing with best fitness: " + 522 | str(ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT[GENS-1])) 523 | THEFILE.write("\nES-HyperNEAT medium hundred solves pole_balancing in " + 524 | str(ES_HYPERNEAT_MEDIUM_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 525 | 526 | # ES-HyperNEAT large. 527 | THEFILE = open('es_hyperneat_pole_balancing_large_run_fitnesses.txt', 'w+') 528 | THEFILE.write("ES-HyperNEAT large one\n") 529 | 530 | for item in ES_HYPERNEAT_LARGE_RUN_ONE_FITNESSES: 531 | THEFILE.write("%s\n" % item) 532 | 533 | if MAX_FIT in ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT: 534 | THEFILE.write("ES-HyperNEAT large one solves pole_balancing at generation: " + 535 | str(ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT.index(MAX_FIT))) 536 | else: 537 | THEFILE.write("ES-HyperNEAT large one does not solve pole_balancing with best fitness: " + 538 | str(ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT[GENS-1])) 539 | THEFILE.write("\nES-HyperNEAT large one solves pole_balancing in " + 540 | str(ES_HYPERNEAT_LARGE_ONE_SOLVED) + " out of " + str(RUNS) + " runs.\n") 541 | THEFILE.write("ES-HyperNEAT large ten\n") 542 | 543 | for item in ES_HYPERNEAT_LARGE_RUN_TEN_FITNESSES: 544 | THEFILE.write("%s\n" % item) 545 | 546 | if MAX_FIT in ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT: 547 | THEFILE.write("ES-HyperNEAT large ten solves pole_balancing at generation: " + 548 | str(ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT.index(MAX_FIT))) 549 | else: 550 | THEFILE.write("ES-HyperNEAT large ten does not solve pole_balancing with best fitness: " + 551 | str(ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT[GENS-1])) 552 | THEFILE.write("\nES-HyperNEAT large ten solves pole_balancing in " + 553 | str(ES_HYPERNEAT_LARGE_TEN_SOLVED) + " out of " + str(RUNS) + " runs.\n") 554 | THEFILE.write("ES-HyperNEAT large hundred\n") 555 | 556 | for item in ES_HYPERNEAT_LARGE_RUN_HUNDRED_FITNESSES: 557 | THEFILE.write("%s\n" % item) 558 | 559 | if MAX_FIT in ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT: 560 | THEFILE.write("ES-HyperNEAT large hundred solves pole_balancing at generation: " + 561 | str(ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT.index(MAX_FIT))) 562 | else: 563 | THEFILE.write( 564 | "ES-HyperNEAT large hundred does not solve pole_balancing with best fitness: " + 565 | str(ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT[GENS-1])) 566 | THEFILE.write("\nES-HyperNEAT large hundred solves pole_balancing in " + 567 | str(ES_HYPERNEAT_LARGE_HUNDRED_SOLVED) + " out of " + str(RUNS) + " runs.\n") 568 | 569 | # Plot one fitnesses. 570 | plt.plot(range(GENS), NEAT_ONE_AVERAGE_FIT, 'r-', label="NEAT") 571 | plt.plot(range(GENS), HYPERNEAT_ONE_AVERAGE_FIT, 'g--', label="HyperNEAT") 572 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_ONE_AVERAGE_FIT, 573 | 'b-.', label="ES-HyperNEAT small") 574 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_ONE_AVERAGE_FIT, 575 | 'c-.', label="ES-HyperNEAT medium") 576 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_ONE_AVERAGE_FIT, 577 | 'm-.', label="ES-HyperNEAT large") 578 | 579 | plt.title("Average pole_balancing fitnesses one episode") 580 | plt.xlabel("Generations") 581 | plt.ylabel("Fitness") 582 | plt.grid() 583 | plt.legend(loc="best") 584 | 585 | plt.savefig('pole_balancing_one_fitnesses.svg') 586 | 587 | plt.close() 588 | 589 | # Plot ten fitnesses. 590 | plt.plot(range(GENS), NEAT_TEN_AVERAGE_FIT, 'r-', label="NEAT") 591 | plt.plot(range(GENS), HYPERNEAT_TEN_AVERAGE_FIT, 'g--', label="HyperNEAT") 592 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_TEN_AVERAGE_FIT, 593 | 'b-.', label="ES-HyperNEAT small") 594 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_TEN_AVERAGE_FIT, 595 | 'c-.', label="ES-HyperNEAT medium") 596 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_TEN_AVERAGE_FIT, 597 | 'm-.', label="ES-HyperNEAT large") 598 | 599 | plt.title("Average pole_balancing fitnesses ten episodes") 600 | plt.xlabel("Generations") 601 | plt.ylabel("Fitness") 602 | plt.grid() 603 | plt.legend(loc="best") 604 | 605 | plt.savefig('pole_balancing_ten_fitnesses.svg') 606 | 607 | plt.close() 608 | 609 | # Plot hundred fitnesses. 610 | plt.plot(range(GENS), NEAT_HUNDRED_AVERAGE_FIT, 'r-', label="NEAT") 611 | plt.plot(range(GENS), HYPERNEAT_HUNDRED_AVERAGE_FIT, 612 | 'g--', label="HyperNEAT") 613 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_HUNDRED_AVERAGE_FIT, 614 | 'b-.', label="ES-HyperNEAT small") 615 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_HUNDRED_AVERAGE_FIT, 616 | 'c-.', label="ES-HyperNEAT medium") 617 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_HUNDRED_AVERAGE_FIT, 618 | 'm-.', label="ES-HyperNEAT large") 619 | 620 | plt.title("Average pole_balancing fitnesses hundred episodes") 621 | plt.xlabel("Generations") 622 | plt.ylabel("Fitness") 623 | plt.grid() 624 | plt.legend(loc="best") 625 | 626 | plt.savefig('pole_balancing_hundred_fitnesses.svg') 627 | 628 | plt.close() 629 | -------------------------------------------------------------------------------- /pureples/experiments/xor/config_cppn_xor: -------------------------------------------------------------------------------- 1 | #--- parameters for the CPPN related to the XOR experiment ---# 2 | 3 | [NEAT] 4 | fitness_criterion = max 5 | fitness_threshold = 0.975 6 | pop_size = 150 7 | reset_on_extinction = False 8 | 9 | [DefaultGenome] 10 | # node activation options 11 | activation_default = tanh 12 | activation_mutate_rate = 0.5 13 | activation_options = gauss sin tanh 14 | 15 | # node aggregation options 16 | aggregation_default = sum 17 | aggregation_mutate_rate = 0.0 18 | aggregation_options = sum 19 | 20 | # node bias options 21 | bias_init_mean = 0.0 22 | bias_init_stdev = 1.0 23 | bias_max_value = 30.0 24 | bias_min_value = -30.0 25 | bias_mutate_power = 0.5 26 | bias_mutate_rate = 0.7 27 | bias_replace_rate = 0.1 28 | 29 | # genome compatibility options 30 | compatibility_disjoint_coefficient = 1.0 31 | compatibility_weight_coefficient = 0.5 32 | 33 | # connection add/remove rates 34 | conn_add_prob = 0.5 35 | conn_delete_prob = 0.5 36 | 37 | # connection enable options 38 | enabled_default = True 39 | enabled_mutate_rate = 0.01 40 | 41 | feed_forward = True 42 | initial_connection = full 43 | 44 | # node add/remove rates 45 | node_add_prob = 0.2 46 | node_delete_prob = 0.2 47 | 48 | # network parameters 49 | num_hidden = 0 50 | num_inputs = 5 51 | num_outputs = 1 52 | 53 | # node response options 54 | response_init_mean = 1.0 55 | response_init_stdev = 0.0 56 | response_max_value = 30.0 57 | response_min_value = -30.0 58 | response_mutate_power = 0.0 59 | response_mutate_rate = 0.0 60 | response_replace_rate = 0.0 61 | 62 | # connection weight options 63 | weight_init_mean = 0.0 64 | weight_init_stdev = 1.0 65 | weight_max_value = 30 66 | weight_min_value = -30 67 | weight_mutate_power = 0.5 68 | weight_mutate_rate = 0.8 69 | weight_replace_rate = 0.1 70 | 71 | [DefaultSpeciesSet] 72 | compatibility_threshold = 3.0 73 | 74 | [DefaultStagnation] 75 | species_fitness_func = max 76 | max_stagnation = 20 77 | species_elitism = 15 78 | 79 | [DefaultReproduction] 80 | elitism = 15 81 | survival_threshold = 0.2 -------------------------------------------------------------------------------- /pureples/experiments/xor/config_neat_xor: -------------------------------------------------------------------------------- 1 | #--- parameters for the NEAT XOR experiment ---# 2 | 3 | [NEAT] 4 | fitness_criterion = max 5 | fitness_threshold = 0.975 6 | pop_size = 150 7 | reset_on_extinction = False 8 | 9 | [DefaultGenome] 10 | # node activation options 11 | activation_default = sigmoid 12 | activation_mutate_rate = 0.0 13 | activation_options = sigmoid 14 | 15 | # node aggregation options 16 | aggregation_default = sum 17 | aggregation_mutate_rate = 0.0 18 | aggregation_options = sum 19 | 20 | # node bias options 21 | bias_init_mean = 0.0 22 | bias_init_stdev = 1.0 23 | bias_max_value = 30.0 24 | bias_min_value = -30.0 25 | bias_mutate_power = 0.5 26 | bias_mutate_rate = 0.7 27 | bias_replace_rate = 0.1 28 | 29 | # genome compatibility options 30 | compatibility_disjoint_coefficient = 1.0 31 | compatibility_weight_coefficient = 0.5 32 | 33 | # connection add/remove rates 34 | conn_add_prob = 0.5 35 | conn_delete_prob = 0.5 36 | 37 | # connection enable options 38 | enabled_default = True 39 | enabled_mutate_rate = 0.01 40 | 41 | feed_forward = True 42 | initial_connection = full 43 | 44 | # node add/remove rates 45 | node_add_prob = 0.2 46 | node_delete_prob = 0.2 47 | 48 | # network parameters 49 | num_hidden = 0 50 | num_inputs = 3 51 | num_outputs = 1 52 | 53 | # node response options 54 | response_init_mean = 1.0 55 | response_init_stdev = 0.0 56 | response_max_value = 30.0 57 | response_min_value = -30.0 58 | response_mutate_power = 0.0 59 | response_mutate_rate = 0.0 60 | response_replace_rate = 0.0 61 | 62 | # connection weight options 63 | weight_init_mean = 0.0 64 | weight_init_stdev = 1.0 65 | weight_max_value = 30 66 | weight_min_value = -30 67 | weight_mutate_power = 0.5 68 | weight_mutate_rate = 0.8 69 | weight_replace_rate = 0.1 70 | 71 | [DefaultSpeciesSet] 72 | compatibility_threshold = 3.0 73 | 74 | [DefaultStagnation] 75 | species_fitness_func = max 76 | max_stagnation = 20 77 | species_elitism = 15 78 | 79 | [DefaultReproduction] 80 | elitism = 15 81 | survival_threshold = 0.2 -------------------------------------------------------------------------------- /pureples/experiments/xor/es_hyperneat_xor.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using a variable-sized ES-HyperNEAT network to perform the simple XOR task. 3 | Fitness threshold set in config 4 | - by default very high to show the high possible accuracy of this library. 5 | """ 6 | 7 | import pickle 8 | import neat 9 | import neat.nn 10 | from pureples.shared.substrate import Substrate 11 | from pureples.shared.visualize import draw_net 12 | from pureples.es_hyperneat.es_hyperneat import ESNetwork 13 | 14 | # S, M or L; Small, Medium or Large (logic implemented as "Not 'S' or 'M' then Large"). 15 | VERSION = "S" 16 | VERSION_TEXT = "small" if VERSION == "S" else "medium" if VERSION == "M" else "large" 17 | 18 | # Network inputs and expected outputs. 19 | XOR_INPUTS = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)] 20 | XOR_OUTPUTS = [(0.0,), (1.0,), (1.0,), (0.0,)] 21 | 22 | # Network coordinates and the resulting substrate. 23 | INPUT_COORDINATES = [(-1.0, -1.0), (0.0, -1.0), (1.0, -1.0)] 24 | OUTPUT_COORDINATES = [(0.0, 1.0)] 25 | SUBSTRATE = Substrate(INPUT_COORDINATES, OUTPUT_COORDINATES) 26 | 27 | 28 | def params(version): 29 | """ 30 | ES-HyperNEAT specific parameters. 31 | """ 32 | return {"initial_depth": 0 if version == "S" else 1 if version == "M" else 2, 33 | "max_depth": 1 if version == "S" else 2 if version == "M" else 3, 34 | "variance_threshold": 0.03, 35 | "band_threshold": 0.3, 36 | "iteration_level": 1, 37 | "division_threshold": 0.5, 38 | "max_weight": 5.0, 39 | "activation": "sigmoid"} 40 | 41 | 42 | DYNAMIC_PARAMS = params(VERSION) 43 | 44 | # Config for CPPN. 45 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 46 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 47 | 'pureples/experiments/xor/config_cppn_xor') 48 | 49 | 50 | def eval_fitness(genomes, config): 51 | """ 52 | Fitness function. 53 | For each genome evaluate its fitness, in this case, as the mean squared error. 54 | """ 55 | for _, genome in genomes: 56 | cppn = neat.nn.FeedForwardNetwork.create(genome, config) 57 | network = ESNetwork(SUBSTRATE, cppn, DYNAMIC_PARAMS) 58 | net = network.create_phenotype_network() 59 | 60 | sum_square_error = 0.0 61 | 62 | for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS): 63 | new_xor_input = xor_inputs + (1.0,) 64 | net.reset() 65 | 66 | for _ in range(network.activations): 67 | xor_output = net.activate(new_xor_input) 68 | 69 | sum_square_error += ((xor_output[0] - xor_expected[0])**2.0)/4.0 70 | 71 | genome.fitness = 1 - sum_square_error 72 | 73 | 74 | def run(gens, version): 75 | """ 76 | Create the population and run the XOR task by providing eval_fitness as the fitness function. 77 | Returns the winning genome and the statistics of the run. 78 | """ 79 | pop = neat.population.Population(CONFIG) 80 | stats = neat.statistics.StatisticsReporter() 81 | pop.add_reporter(stats) 82 | pop.add_reporter(neat.reporting.StdOutReporter(True)) 83 | 84 | global DYNAMIC_PARAMS 85 | DYNAMIC_PARAMS = params(version) 86 | 87 | winner = pop.run(eval_fitness, gens) 88 | print(f"es_hyperneat_xor_{VERSION_TEXT} done") 89 | return winner, stats 90 | 91 | 92 | # If run as script. 93 | if __name__ == '__main__': 94 | WINNER = run(300, VERSION)[0] # Only relevant to look at the winner. 95 | print('\nBest genome:\n{!s}'.format(WINNER)) 96 | 97 | # Verify network output against training data. 98 | print('\nOutput:') 99 | CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 100 | NETWORK = ESNetwork(SUBSTRATE, CPPN, DYNAMIC_PARAMS) 101 | # This will also draw winner_net. 102 | WINNER_NET = NETWORK.create_phenotype_network( 103 | filename=f'pureples/experiments/xor/es_hyperneat_xor_{VERSION_TEXT}_winner.png') 104 | 105 | for inputs, expected in zip(XOR_INPUTS, XOR_OUTPUTS): 106 | new_input = inputs + (1.0,) 107 | WINNER_NET.reset() 108 | 109 | for i in range(NETWORK.activations): 110 | output = WINNER_NET.activate(new_input) 111 | 112 | print(" input {!r}, expected output {!r}, got {!r}".format( 113 | inputs, expected, output)) 114 | 115 | # Save CPPN if wished reused and draw it to file. 116 | draw_net( 117 | CPPN, filename=f"pureples/experiments/xor/es_hyperneat_xor_{VERSION_TEXT}_cppn") 118 | with open(f'pureples/experiments/xor/es_hyperneat_xor_{VERSION_TEXT}_cppn.pkl', 'wb') as output: 119 | pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL) 120 | -------------------------------------------------------------------------------- /pureples/experiments/xor/hyperneat_xor.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using HyperNEAT to perform the simple XOR task. 3 | Fitness threshold set in config 4 | - by default very high to show the high possible accuracy of this library. 5 | """ 6 | 7 | import pickle 8 | import neat 9 | import neat.nn 10 | from pureples.shared.visualize import draw_net 11 | from pureples.shared.substrate import Substrate 12 | from pureples.hyperneat.hyperneat import create_phenotype_network 13 | 14 | # Network inputs and expected outputs. 15 | XOR_INPUTS = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)] 16 | XOR_OUTPUTS = [(0.0,), (1.0,), (1.0,), (0.0,)] 17 | 18 | INPUT_COORDINATES = [(-1.0, -1.0), (0.0, -1.0), (1.0, -1.0)] 19 | HIDDEN_COORDINATES = [[(-1.0, 0.0), (0.0, 0.0), (1.0, 0.0)]] 20 | OUTPUT_COORDINATES = [(0.0, 1.0)] 21 | ACTIVATIONS = len(HIDDEN_COORDINATES) + 2 22 | 23 | SUBSTRATE = Substrate( 24 | INPUT_COORDINATES, OUTPUT_COORDINATES, HIDDEN_COORDINATES) 25 | 26 | # Config for CPPN. 27 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 28 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 29 | 'pureples/experiments/xor/config_cppn_xor') 30 | 31 | 32 | def eval_fitness(genomes, config): 33 | """ 34 | Fitness function. 35 | For each genome evaluate its fitness, in this case, as the mean squared error. 36 | """ 37 | for _, genome in genomes: 38 | cppn = neat.nn.FeedForwardNetwork.create(genome, config) 39 | net = create_phenotype_network(cppn, SUBSTRATE) 40 | 41 | sum_square_error = 0.0 42 | 43 | for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS): 44 | new_xor_input = xor_inputs + (1.0,) 45 | net.reset() 46 | 47 | for _ in range(ACTIVATIONS): 48 | xor_output = net.activate(new_xor_input) 49 | 50 | sum_square_error += ((xor_output[0] - xor_expected[0])**2.0)/4.0 51 | 52 | genome.fitness = 1 - sum_square_error 53 | 54 | 55 | def run(gens): 56 | """ 57 | Create the population and run the XOR task by providing eval_fitness as the fitness function. 58 | Returns the winning genome and the statistics of the run. 59 | """ 60 | pop = neat.population.Population(CONFIG) 61 | stats = neat.statistics.StatisticsReporter() 62 | pop.add_reporter(stats) 63 | pop.add_reporter(neat.reporting.StdOutReporter(True)) 64 | 65 | winner = pop.run(eval_fitness, gens) 66 | print("hyperneat_xor done") 67 | return winner, stats 68 | 69 | 70 | # If run as script. 71 | if __name__ == '__main__': 72 | WINNER = run(300)[0] # Only relevant to look at the winner. 73 | print('\nBest genome:\n{!s}'.format(WINNER)) 74 | 75 | # Verify network output against training data. 76 | print('\nOutput:') 77 | CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 78 | WINNER_NET = create_phenotype_network(CPPN, SUBSTRATE) 79 | 80 | for inputs, expected in zip(XOR_INPUTS, XOR_OUTPUTS): 81 | new_input = inputs + (1.0,) 82 | WINNER_NET.reset() 83 | 84 | for i in range(ACTIVATIONS): 85 | output = WINNER_NET.activate(new_input) 86 | 87 | print(" input {!r}, expected output {!r}, got {!r}".format( 88 | inputs, expected, output)) 89 | 90 | # Save CPPN if wished reused and draw it to file along with the winner. 91 | with open('pureples/experiments/xor/hyperneat_xor_cppn.pkl', 'wb') as output: 92 | pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL) 93 | draw_net(CPPN, filename="pureples/experiments/xor/hyperneat_xor_cppn") 94 | draw_net(WINNER_NET, filename="pureples/experiments/xor/hyperneat_xor_winner") 95 | -------------------------------------------------------------------------------- /pureples/experiments/xor/neat_xor.py: -------------------------------------------------------------------------------- 1 | """ 2 | An experiment using NEAT to perform the simple XOR task. 3 | Fitness threshold set in config 4 | - by default very high to show the high possible accuracy of the NEAT library. 5 | """ 6 | 7 | import pickle 8 | import neat 9 | import neat.nn 10 | from pureples.shared.visualize import draw_net 11 | 12 | # Network inputs and expected outputs. 13 | XOR_INPUTS = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)] 14 | XOR_OUTPUTS = [(0.0,), (1.0,), (1.0,), (0.0,)] 15 | 16 | # Config for FeedForwardNetwork. 17 | CONFIG = neat.config.Config(neat.genome.DefaultGenome, neat.reproduction.DefaultReproduction, 18 | neat.species.DefaultSpeciesSet, neat.stagnation.DefaultStagnation, 19 | 'pureples/experiments/xor/config_neat_xor') 20 | 21 | 22 | def eval_fitness(genomes, config): 23 | """ 24 | Fitness function. 25 | For each genome evaluate its fitness, in this case, as the mean squared error. 26 | """ 27 | for _, genome in genomes: 28 | net = neat.nn.FeedForwardNetwork.create(genome, config) 29 | 30 | sum_square_error = 0.0 31 | 32 | for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS): 33 | new_xor_input = xor_inputs + (1.0,) 34 | xor_output = net.activate(new_xor_input) 35 | sum_square_error += ((xor_output[0] - xor_expected[0])**2.0)/4.0 36 | 37 | genome.fitness = 1 - sum_square_error 38 | 39 | 40 | def run(gens): 41 | """ 42 | Create the population and run the XOR task by providing eval_fitness as the fitness function. 43 | Returns the winning genome and the statistics of the run. 44 | """ 45 | pop = neat.population.Population(CONFIG) 46 | stats = neat.statistics.StatisticsReporter() 47 | pop.add_reporter(stats) 48 | pop.add_reporter(neat.reporting.StdOutReporter(True)) 49 | 50 | winner = pop.run(eval_fitness, gens) 51 | print("neat_xor done") 52 | return winner, stats 53 | 54 | 55 | # If run as script. 56 | if __name__ == '__main__': 57 | WINNER = run(300)[0] # Only relevant to look at the winner. 58 | print('\nBest genome:\n{!s}'.format(WINNER)) 59 | 60 | # Verify network output against training data. 61 | print('\nOutput:') 62 | WINNER_NET = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG) 63 | 64 | for inputs, expected in zip(XOR_INPUTS, XOR_OUTPUTS): 65 | new_input = inputs + (1.0,) 66 | output = WINNER_NET.activate(new_input) 67 | print(" input {!r}, expected output {!r}, got {!r}".format( 68 | inputs, expected, output)) 69 | 70 | # Save net if wished reused and draw it to a file. 71 | with open('pureples/experiments/xor/winner_neat_xor.pkl', 'wb') as output: 72 | pickle.dump(WINNER_NET, output, pickle.HIGHEST_PROTOCOL) 73 | draw_net(WINNER_NET, filename="pureples/experiments/xor/neat_xor_winner") 74 | -------------------------------------------------------------------------------- /pureples/experiments/xor/run_all_xor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Runs ALL XOR tasks using ES-HyperNEAT, HyperNEAT and NEAT. 3 | Reports everything to text files. 4 | """ 5 | 6 | 7 | from multiprocessing import Manager 8 | import multiprocessing as multi 9 | from itertools import repeat 10 | import matplotlib.pyplot as plt 11 | import matplotlib 12 | import es_hyperneat_xor 13 | import hyperneat_xor 14 | import neat_xor 15 | matplotlib.use('Agg') 16 | 17 | 18 | def run(number, gens, neat_stats, hyperneat_stats, es_hyperneat_small_stats, 19 | es_hyperneat_medium_stats, es_hyperneat_large_stats): 20 | """ 21 | Run the experiments. 22 | """ 23 | print(f"This is run #{str(number)}") 24 | neat_stats.append(neat_xor.run(gens)[1]) 25 | hyperneat_stats.append(hyperneat_xor.run(gens)[1]) 26 | es_hyperneat_small_stats.append(es_hyperneat_xor.run(gens, "S")[1]) 27 | es_hyperneat_medium_stats.append(es_hyperneat_xor.run(gens, "M")[1]) 28 | es_hyperneat_large_stats.append(es_hyperneat_xor.run(gens, "L")[1]) 29 | 30 | 31 | if __name__ == '__main__': 32 | MANAGER = Manager() 33 | 34 | NEAT_STATS, HYPERNEAT_STATS, ES_HYPERNEAT_SMALL_STATS = MANAGER.list( 35 | []), MANAGER.list([]), MANAGER.list([]) 36 | ES_HYPERNEAT_MEDIUM_STATS, ES_HYPERNEAT_LARGE_STATS = MANAGER.list( 37 | []), MANAGER.list([]) 38 | NEAT_RUN_FITNESS, HYPERNEAT_RUN_FITNESSES, ES_HYPERNEAT_SMALL_RUN_FITNESSES = [], [], [] 39 | ES_HYPERNEAT_MEDIUM_RUN_FITNESSES, ES_HYPERNEAT_LARGE_RUN_FITNESSES = [], [] 40 | NEAT_SOLVED, HYPERNEAT_SOLVED, ES_HYPERNEAT_SMALL_SOLVED = 0, 0, 0 41 | ES_HYPERNEAT_MEDIUM_SOLVED, ES_HYPERNEAT_LARGE_SOLVED = 0, 0 42 | RUNS = 20 43 | INPUTS = range(RUNS) 44 | GENS = 300 45 | FIT_THRESHOLD = 0.975 46 | MAX_FIT = 1.0 47 | 48 | P = multi.Pool(multi.cpu_count()) 49 | P.starmap(run, zip(range(RUNS), repeat(GENS), repeat(NEAT_STATS), 50 | repeat(HYPERNEAT_STATS), repeat( 51 | ES_HYPERNEAT_SMALL_STATS), repeat(ES_HYPERNEAT_MEDIUM_STATS), 52 | repeat(ES_HYPERNEAT_LARGE_STATS))) 53 | 54 | # Average the NEAT runs. 55 | TEMP_FIT = [0.0] * GENS 56 | 57 | for stat in NEAT_STATS: 58 | NEAT_RUN_FITNESS.append(stat.best_genome().fitness) 59 | if stat.best_genome().fitness >= FIT_THRESHOLD: 60 | NEAT_SOLVED += 1 61 | 62 | for i in range(GENS): 63 | if i < len(stat.most_fit_genomes): 64 | TEMP_FIT[i] += stat.most_fit_genomes[i].fitness 65 | else: 66 | TEMP_FIT[i] += MAX_FIT 67 | 68 | NEAT_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT] 69 | 70 | # Average the HyperNEAT runs. 71 | TEMP_FIT = [0.0] * GENS 72 | 73 | for stat in HYPERNEAT_STATS: 74 | HYPERNEAT_RUN_FITNESSES.append(stat.best_genome().fitness) 75 | if stat.best_genome().fitness >= FIT_THRESHOLD: 76 | HYPERNEAT_SOLVED += 1 77 | 78 | for i in range(GENS): 79 | if i < len(stat.most_fit_genomes): 80 | TEMP_FIT[i] += stat.most_fit_genomes[i].fitness 81 | else: 82 | TEMP_FIT[i] += MAX_FIT 83 | 84 | HYPERNEAY_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT] 85 | 86 | # Average the small ES-HyperNEAT runs. 87 | TEMP_FIT = [0.0] * GENS 88 | 89 | for stat in ES_HYPERNEAT_SMALL_STATS: 90 | ES_HYPERNEAT_SMALL_RUN_FITNESSES.append(stat.best_genome().fitness) 91 | if stat.best_genome().fitness >= FIT_THRESHOLD: 92 | ES_HYPERNEAT_SMALL_SOLVED += 1 93 | 94 | for i in range(GENS): 95 | if i < len(stat.most_fit_genomes): 96 | TEMP_FIT[i] += stat.most_fit_genomes[i].fitness 97 | else: 98 | TEMP_FIT[i] += MAX_FIT 99 | 100 | ES_HYPERNEAT_SMALL_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT] 101 | 102 | # Average the medium ES-HyperNEAT runs. 103 | TEMP_FIT = [0.0] * GENS 104 | 105 | for stat in ES_HYPERNEAT_MEDIUM_STATS: 106 | ES_HYPERNEAT_MEDIUM_RUN_FITNESSES.append(stat.best_genome().fitness) 107 | if stat.best_genome().fitness >= FIT_THRESHOLD: 108 | ES_HYPERNEAT_MEDIUM_SOLVED += 1 109 | 110 | for i in range(GENS): 111 | if i < len(stat.most_fit_genomes): 112 | TEMP_FIT[i] += stat.most_fit_genomes[i].fitness 113 | else: 114 | TEMP_FIT[i] += MAX_FIT 115 | 116 | ES_HYPERNEAT_MEDIUM_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT] 117 | 118 | # Average the large ES-HyperNEAT runs. 119 | TEMP_FIT = [0.0] * GENS 120 | 121 | for stat in ES_HYPERNEAT_LARGE_STATS: 122 | ES_HYPERNEAT_LARGE_RUN_FITNESSES.append(stat.best_genome().fitness) 123 | if stat.best_genome().fitness >= FIT_THRESHOLD: 124 | ES_HYPERNEAT_LARGE_SOLVED += 1 125 | 126 | for i in range(GENS): 127 | if i < len(stat.most_fit_genomes): 128 | TEMP_FIT[i] += stat.most_fit_genomes[i].fitness 129 | else: 130 | TEMP_FIT[i] += MAX_FIT 131 | 132 | ES_HYPERNEAT_LARGE_AVERAGE_FIT = [x / RUNS for x in TEMP_FIT] 133 | 134 | # Write fitnesses to files. 135 | THEFILE = open('neat_xor_run_fitnesses.txt', 'w+') 136 | 137 | for item in NEAT_RUN_FITNESS: 138 | THEFILE.write("%s\n" % item) 139 | if 1.0 in NEAT_AVERAGE_FIT: 140 | THEFILE.write("NEAT solves XOR at generation: " + 141 | str(NEAT_AVERAGE_FIT.index(1.0)-1)) 142 | else: 143 | THEFILE.write("NEAT does not solve XOR with best fitness: " + 144 | str(NEAT_AVERAGE_FIT[GENS-1])) 145 | THEFILE.write("\nNEAT solves XOR in " + str(NEAT_SOLVED) + 146 | " out of " + str(RUNS) + " runs.") 147 | 148 | THEFILE = open('hyperneat_xor_run_fitnesses.txt', 'w+') 149 | 150 | for item in HYPERNEAT_RUN_FITNESSES: 151 | THEFILE.write("%s\n" % item) 152 | if 1.0 in HYPERNEAY_AVERAGE_FIT: 153 | THEFILE.write("HyperNEAT solves XOR at generation: " + 154 | str(HYPERNEAY_AVERAGE_FIT.index(1.0)-1)) 155 | else: 156 | THEFILE.write("HyperNEAT does not solve XOR with best fitness: " + 157 | str(HYPERNEAY_AVERAGE_FIT[GENS-1])) 158 | THEFILE.write("\nHyperEAT solves XOR in " + 159 | str(HYPERNEAT_SOLVED) + " out of " + str(RUNS) + " runs.") 160 | 161 | THEFILE = open('es_hyperneat_xor_small_run_fitnesses.txt', 'w+') 162 | 163 | for item in ES_HYPERNEAT_SMALL_RUN_FITNESSES: 164 | THEFILE.write("%s\n" % item) 165 | if 1.0 in ES_HYPERNEAT_SMALL_AVERAGE_FIT: 166 | THEFILE.write("ESHyperNEAT small solves XOR at generation: " + 167 | str(ES_HYPERNEAT_SMALL_AVERAGE_FIT.index(1.0)-1)) 168 | else: 169 | THEFILE.write("ES-HyperNEAT small does not solve XOR with best fitness: " + 170 | str(ES_HYPERNEAT_SMALL_AVERAGE_FIT[GENS-1])) 171 | THEFILE.write("\nES-HyperNEAT small solves XOR in " + 172 | str(ES_HYPERNEAT_SMALL_SOLVED) + " out of " + str(RUNS) + " runs.") 173 | 174 | THEFILE = open('es_hyperneat_xor_medium_run_fitnesses.txt', 'w+') 175 | 176 | for item in ES_HYPERNEAT_MEDIUM_RUN_FITNESSES: 177 | THEFILE.write("%s\n" % item) 178 | if 1.0 in ES_HYPERNEAT_MEDIUM_AVERAGE_FIT: 179 | THEFILE.write("ESHyperNEAT medium solves XOR at generation: " + 180 | str(ES_HYPERNEAT_MEDIUM_AVERAGE_FIT.index(1.0)-1)) 181 | else: 182 | THEFILE.write("ES-HyperNEAT medium does not solve XOR with best fitness: " + 183 | str(ES_HYPERNEAT_MEDIUM_AVERAGE_FIT[GENS-1])) 184 | THEFILE.write("\nES-HyperNEAT medium solves XOR in " + 185 | str(ES_HYPERNEAT_MEDIUM_SOLVED) + " out of " + str(RUNS) + " runs.") 186 | 187 | THEFILE = open('es_hyperneat_xor_large_run_fitnesses.txt', 'w+') 188 | 189 | for item in ES_HYPERNEAT_LARGE_RUN_FITNESSES: 190 | THEFILE.write("%s\n" % item) 191 | if 1.0 in ES_HYPERNEAT_LARGE_AVERAGE_FIT: 192 | THEFILE.write("ESHyperNEAT large solves XOR at generation: " + 193 | str(ES_HYPERNEAT_LARGE_AVERAGE_FIT.index(1.0)-1)) 194 | else: 195 | THEFILE.write("ES-HyperNEAT large does not solve XOR with best fitness: " + 196 | str(ES_HYPERNEAT_LARGE_AVERAGE_FIT[GENS-1])) 197 | THEFILE.write("\nES-HyperNEAT large solves XOR in " + 198 | str(ES_HYPERNEAT_LARGE_SOLVED) + " out of " + str(RUNS) + " runs.") 199 | 200 | # Plot the fitnesses. 201 | plt.plot(range(GENS), NEAT_AVERAGE_FIT, 'r-', label="NEAT") 202 | plt.plot(range(GENS), HYPERNEAY_AVERAGE_FIT, 'g--', label="HyperNEAT") 203 | plt.plot(range(GENS), ES_HYPERNEAT_SMALL_AVERAGE_FIT, 204 | 'b-.', label="ES-HyperNEAT small") 205 | plt.plot(range(GENS), ES_HYPERNEAT_MEDIUM_AVERAGE_FIT, 206 | 'c-.', label="ES-HyperNEAT medium") 207 | plt.plot(range(GENS), ES_HYPERNEAT_LARGE_AVERAGE_FIT, 208 | 'm-.', label="ES-HyperNEAT large") 209 | 210 | plt.title("Average XOR fitnesses") 211 | plt.xlabel("Generations") 212 | plt.ylabel("Fitness") 213 | plt.grid() 214 | plt.legend(loc="best") 215 | 216 | plt.savefig('xor_fitnesses.svg') 217 | 218 | plt.close() 219 | -------------------------------------------------------------------------------- /pureples/hyperneat/__init__.py: -------------------------------------------------------------------------------- 1 | from pureples.hyperneat.hyperneat import create_phenotype_network 2 | 3 | -------------------------------------------------------------------------------- /pureples/hyperneat/hyperneat.py: -------------------------------------------------------------------------------- 1 | """ 2 | All Hyperneat related logic resides here. 3 | """ 4 | 5 | import neat 6 | 7 | 8 | def create_phenotype_network(cppn, substrate, activation_function="sigmoid"): 9 | """ 10 | Creates a recurrent network using a cppn and a substrate. 11 | """ 12 | input_coordinates = substrate.input_coordinates 13 | output_coordinates = substrate.output_coordinates 14 | # List of layers, first index = top layer. 15 | hidden_coordinates = substrate.hidden_coordinates 16 | 17 | input_nodes = list(range(len(input_coordinates))) 18 | output_nodes = list(range(len(input_nodes), len( 19 | input_nodes)+len(output_coordinates))) 20 | 21 | counter = 0 22 | for layer in hidden_coordinates: 23 | counter += len(layer) 24 | 25 | hidden_nodes = range(len(input_nodes)+len(output_nodes), 26 | len(input_nodes)+len(output_nodes)+counter) 27 | 28 | node_evals = [] 29 | 30 | # Get activation function. 31 | activation_functions = neat.activations.ActivationFunctionSet() 32 | activation = activation_functions.get(activation_function) 33 | 34 | # Connect hidden to output. 35 | counter = 0 36 | for oc in output_coordinates: 37 | idx = 0 38 | for layer in hidden_coordinates: 39 | im = find_neurons(cppn, oc, layer, hidden_nodes[idx], False) 40 | idx += len(layer) 41 | if im: 42 | node_evals.append( 43 | (output_nodes[counter], activation, sum, 0.0, 1.0, im)) 44 | 45 | counter += 1 46 | 47 | # Connect hidden to hidden - starting from the top layer. 48 | current_layer = 1 49 | idx = 0 50 | for layer in hidden_coordinates: 51 | idx += len(layer) 52 | counter = idx - len(layer) 53 | for i in range(current_layer, len(hidden_coordinates)): 54 | for hc in layer: 55 | im = find_neurons( 56 | cppn, hc, hidden_coordinates[i], hidden_nodes[idx], False) 57 | if im: 58 | node_evals.append( 59 | (hidden_nodes[counter], activation, sum, 0.0, 1.0, im)) 60 | counter += 1 61 | 62 | counter -= idx 63 | 64 | current_layer += 1 65 | 66 | # Connect input to hidden. 67 | counter = 0 68 | for layer in hidden_coordinates: 69 | for hc in layer: 70 | im = find_neurons(cppn, hc, input_coordinates, 71 | input_nodes[0], False) 72 | if im: 73 | node_evals.append( 74 | (hidden_nodes[counter], activation, sum, 0.0, 1.0, im)) 75 | counter += 1 76 | 77 | return neat.nn.RecurrentNetwork(input_nodes, output_nodes, node_evals) 78 | 79 | 80 | def find_neurons(cppn, coord, nodes, start_idx, outgoing, max_weight=5.0): 81 | """ 82 | Find the neurons to which the given coord is connected. 83 | """ 84 | im = [] 85 | idx = start_idx 86 | 87 | for node in nodes: 88 | w = query_cppn(coord, node, outgoing, cppn, max_weight) 89 | 90 | if w != 0.0: # Only include connection if the weight isn't 0.0. 91 | im.append((idx, w)) 92 | idx += 1 93 | 94 | return im 95 | 96 | 97 | def query_cppn(coord1, coord2, outgoing, cppn, max_weight=5.0): 98 | """ 99 | Get the weight from one point to another using the CPPN. 100 | Takes into consideration which point is source/target. 101 | """ 102 | 103 | if outgoing: 104 | i = [coord1[0], coord1[1], coord2[0], coord2[1], 1.0] 105 | else: 106 | i = [coord2[0], coord2[1], coord1[0], coord1[1], 1.0] 107 | w = cppn.activate(i)[0] 108 | if abs(w) > 0.2: # If abs(weight) is below threshold, treat weight as 0.0. 109 | if w > 0: 110 | w = (w - 0.2) / 0.8 111 | else: 112 | w = (w + 0.2) / 0.8 113 | return w * max_weight 114 | else: 115 | return 0.0 116 | -------------------------------------------------------------------------------- /pureples/shared/__init__.py: -------------------------------------------------------------------------------- 1 | from pureples.shared.create_cppn import create_cppn 2 | from pureples.shared.gym_runner import run_es, run_hyper, run_neat 3 | from pureples.shared.visualize import draw_net, draw_pattern, draw_es 4 | from pureples.shared.substrate import Substrate 5 | -------------------------------------------------------------------------------- /pureples/shared/create_cppn.py: -------------------------------------------------------------------------------- 1 | """ 2 | CPPN creator. 3 | """ 4 | 5 | import neat 6 | from neat.graphs import feed_forward_layers 7 | 8 | 9 | def create_cppn(genome, config, output_activation_function="tanh"): 10 | """ 11 | Receives a genome and returns its phenotype (a FeedForwardNetwork). 12 | """ 13 | 14 | # Gather expressed connections. 15 | connections = [cg.key for cg in genome.connections.values() if cg.enabled] 16 | 17 | layers = feed_forward_layers( 18 | config.genome_config.input_keys, config.genome_config.output_keys, connections) 19 | node_evals = [] 20 | for layer in layers: 21 | for node in layer: 22 | inputs = [] 23 | node_expr = [] # currently unused 24 | 25 | for conn_key in connections: 26 | inode, onode = conn_key 27 | if onode == node: 28 | cg = genome.connections[conn_key] 29 | inputs.append((inode, cg.weight)) 30 | node_expr.append("v[{}] * {:.7e}".format(inode, cg.weight)) 31 | 32 | ng = genome.nodes[node] 33 | aggregation_function = config.genome_config.aggregation_function_defs.get( 34 | ng.aggregation) 35 | # Fix the output note's activation function to any function. 36 | if node in config.genome_config.output_keys: 37 | ng.activation = output_activation_function 38 | activation_function = config.genome_config.activation_defs.get( 39 | ng.activation) 40 | node_evals.append( 41 | (node, activation_function, aggregation_function, ng.bias, ng.response, inputs)) 42 | 43 | return neat.nn.FeedForwardNetwork(config.genome_config.input_keys, 44 | config.genome_config.output_keys, node_evals) 45 | -------------------------------------------------------------------------------- /pureples/shared/gym_runner.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generic runner for AI Gym - runs Neat, Hyperneat and ES-Hyperneat 3 | """ 4 | 5 | import neat 6 | import numpy as np 7 | from pureples.hyperneat.hyperneat import create_phenotype_network 8 | from pureples.es_hyperneat.es_hyperneat import ESNetwork 9 | 10 | 11 | def ini_pop(state, stats, config, output): 12 | """ 13 | Initialize population attaching statistics reporter. 14 | """ 15 | pop = neat.population.Population(config, state) 16 | if output: 17 | pop.add_reporter(neat.reporting.StdOutReporter(True)) 18 | pop.add_reporter(stats) 19 | return pop 20 | 21 | 22 | def run_es(gens, env, max_steps, config, params, substrate, max_trials=100, output=True): 23 | """ 24 | Generic OpenAI Gym runner for ES-HyperNEAT. 25 | """ 26 | trials = 1 27 | 28 | def eval_fitness(genomes, config): 29 | 30 | for _, g in genomes: 31 | cppn = neat.nn.FeedForwardNetwork.create(g, config) 32 | network = ESNetwork(substrate, cppn, params) 33 | net = network.create_phenotype_network() 34 | 35 | fitnesses = [] 36 | 37 | for _ in range(trials): 38 | ob = env.reset() 39 | net.reset() 40 | 41 | total_reward = 0 42 | 43 | for _ in range(max_steps): 44 | for _ in range(network.activations): 45 | o = net.activate(ob) 46 | 47 | action = np.argmax(o) 48 | ob, reward, done, _ = env.step(action) 49 | total_reward += reward 50 | if done: 51 | break 52 | 53 | fitnesses.append(total_reward) 54 | 55 | g.fitness = np.array(fitnesses).mean() 56 | 57 | # Create population and train the network. Return winner of network running 100 episodes. 58 | stats_one = neat.statistics.StatisticsReporter() 59 | pop = ini_pop(None, stats_one, config, output) 60 | pop.run(eval_fitness, gens) 61 | 62 | stats_ten = neat.statistics.StatisticsReporter() 63 | pop = ini_pop((pop.population, pop.species, 0), stats_ten, config, output) 64 | trials = 10 65 | winner_ten = pop.run(eval_fitness, gens) 66 | 67 | if max_trials == 0: 68 | return winner_ten, (stats_one, stats_ten) 69 | 70 | stats_hundred = neat.statistics.StatisticsReporter() 71 | pop = ini_pop((pop.population, pop.species, 0), 72 | stats_hundred, config, output) 73 | trials = max_trials 74 | winner_hundred = pop.run(eval_fitness, gens) 75 | return winner_hundred, (stats_one, stats_ten, stats_hundred) 76 | 77 | 78 | def run_hyper(gens, env, max_steps, config, substrate, activations, max_trials=100, 79 | activation="sigmoid", output=True): 80 | """ 81 | Generic OpenAI Gym runner for HyperNEAT. 82 | """ 83 | trials = 1 84 | 85 | def eval_fitness(genomes, config): 86 | 87 | for _, g in genomes: 88 | cppn = neat.nn.FeedForwardNetwork.create(g, config) 89 | net = create_phenotype_network(cppn, substrate, activation) 90 | 91 | fitnesses = [] 92 | 93 | for _ in range(trials): 94 | ob = env.reset() 95 | net.reset() 96 | 97 | total_reward = 0 98 | 99 | for _ in range(max_steps): 100 | for _ in range(activations): 101 | o = net.activate(ob) 102 | action = np.argmax(o) 103 | ob, reward, done, _ = env.step(action) 104 | total_reward += reward 105 | if done: 106 | break 107 | fitnesses.append(total_reward) 108 | 109 | g.fitness = np.array(fitnesses).mean() 110 | 111 | # Create population and train the network. Return winner of network running 100 episodes. 112 | stats_one = neat.statistics.StatisticsReporter() 113 | pop = ini_pop(None, stats_one, config, output) 114 | pop.run(eval_fitness, gens) 115 | 116 | stats_ten = neat.statistics.StatisticsReporter() 117 | pop = ini_pop((pop.population, pop.species, 0), stats_ten, config, output) 118 | trials = 10 119 | winner_ten = pop.run(eval_fitness, gens) 120 | 121 | if max_trials == 0: 122 | return winner_ten, (stats_one, stats_ten) 123 | 124 | stats_hundred = neat.statistics.StatisticsReporter() 125 | pop = ini_pop((pop.population, pop.species, 0), 126 | stats_hundred, config, output) 127 | trials = max_trials 128 | winner_hundred = pop.run(eval_fitness, gens) 129 | return winner_hundred, (stats_one, stats_ten, stats_hundred) 130 | 131 | 132 | def run_neat(gens, env, max_steps, config, max_trials=100, output=True): 133 | """ 134 | Generic OpenAI Gym runner for NEAT. 135 | """ 136 | trials = 1 137 | 138 | def eval_fitness(genomes, config): 139 | 140 | for _, g in genomes: 141 | net = neat.nn.FeedForwardNetwork.create(g, config) 142 | 143 | fitnesses = [] 144 | 145 | for _ in range(trials): 146 | ob = env.reset() 147 | 148 | total_reward = 0 149 | 150 | for _ in range(max_steps): 151 | o = net.activate(ob) 152 | action = np.argmax(o) 153 | ob, reward, done, _ = env.step(action) 154 | total_reward += reward 155 | if done: 156 | break 157 | fitnesses.append(total_reward) 158 | 159 | g.fitness = np.array(fitnesses).mean() 160 | 161 | # Create population and train the network. Return winner of network running 100 episodes. 162 | stats_one = neat.statistics.StatisticsReporter() 163 | pop = ini_pop(None, stats_one, config, output) 164 | pop.run(eval_fitness, gens) 165 | 166 | stats_ten = neat.statistics.StatisticsReporter() 167 | pop = ini_pop((pop.population, pop.species, 0), stats_ten, config, output) 168 | trials = 10 169 | winner_ten = pop.run(eval_fitness, gens) 170 | 171 | if max_trials == 0: 172 | return winner_ten, (stats_one, stats_ten) 173 | 174 | stats_hundred = neat.statistics.StatisticsReporter() 175 | pop = ini_pop((pop.population, pop.species, 0), 176 | stats_hundred, config, output) 177 | trials = max_trials 178 | winner_hundred = pop.run(eval_fitness, gens) 179 | return winner_hundred, (stats_one, stats_ten, stats_hundred) 180 | -------------------------------------------------------------------------------- /pureples/shared/substrate.py: -------------------------------------------------------------------------------- 1 | """ 2 | The substrate. 3 | """ 4 | 5 | 6 | class Substrate(object): 7 | """ 8 | Represents a substrate: Input coordinates, output coordinates, hidden coordinates and a resolution defaulting to 10.0. 9 | """ 10 | 11 | def __init__(self, input_coordinates, output_coordinates, hidden_coordinates=(), res=10.0): 12 | self.input_coordinates = input_coordinates 13 | self.hidden_coordinates = hidden_coordinates 14 | self.output_coordinates = output_coordinates 15 | self.res = res 16 | -------------------------------------------------------------------------------- /pureples/shared/test_cppn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Visualizes a CPPN - remember to edit path in visualize.py, sorry. 3 | """ 4 | 5 | import pickle 6 | from pureples.es_hyperneat.es_hyperneat import find_pattern 7 | from pureples.shared.visualize import draw_pattern 8 | 9 | path_to_cppn = "es_hyperneat_xor_small_cppn.pkl" 10 | 11 | # For now, path_to_cppn should match path in visualize.py, sorry. 12 | with open(path_to_cppn, 'rb') as cppn_input: 13 | CPPN = pickle.load(cppn_input) 14 | pattern = find_pattern(CPPN, (0.0, -1.0)) 15 | draw_pattern(pattern) 16 | -------------------------------------------------------------------------------- /pureples/shared/visualize.py: -------------------------------------------------------------------------------- 1 | """ 2 | Varying visualisation tools. 3 | """ 4 | 5 | import pickle 6 | import graphviz 7 | import matplotlib.pyplot as plt 8 | 9 | 10 | def draw_net(net, filename=None, node_names={}, node_colors={}): 11 | """ 12 | Draw neural network with arbitrary topology. 13 | """ 14 | node_attrs = { 15 | 'shape': 'circle', 16 | 'fontsize': '9', 17 | 'height': '0.2', 18 | 'width': '0.2'} 19 | 20 | dot = graphviz.Digraph('svg', node_attr=node_attrs) 21 | 22 | inputs = set() 23 | for k in net.input_nodes: 24 | inputs.add(k) 25 | name = node_names.get(k, str(k)) 26 | input_attrs = {'style': 'filled', 27 | 'shape': 'box', 28 | 'fillcolor': node_colors.get(k, 'lightgray')} 29 | dot.node(name, _attributes=input_attrs) 30 | 31 | outputs = set() 32 | for k in net.output_nodes: 33 | outputs.add(k) 34 | name = node_names.get(k, str(k)) 35 | node_attrs = {'style': 'filled', 36 | 'fillcolor': node_colors.get(k, 'lightblue')} 37 | dot.node(name, _attributes=node_attrs) 38 | 39 | for node, _, _, _, _, links in net.node_evals: 40 | for i, w in links: 41 | node_input, output = node, i 42 | a = node_names.get(output, str(output)) 43 | b = node_names.get(node_input, str(node_input)) 44 | style = 'solid' 45 | color = 'green' if w > 0.0 else 'red' 46 | width = str(0.1 + abs(w / 5.0)) 47 | dot.edge(a, b, _attributes={ 48 | 'style': style, 'color': color, 'penwidth': width}) 49 | 50 | dot.render(filename) 51 | 52 | return dot 53 | 54 | 55 | def onclick(event): 56 | """ 57 | Click handler for weight gradient created by a CPPN. Will re-query with the clicked coordinate. 58 | """ 59 | plt.close() 60 | x = event.xdata 61 | y = event.ydata 62 | 63 | path_to_cppn = "es_hyperneat_xor_small_cppn.pkl" 64 | # For now, path_to_cppn should match path in test_cppn.py, sorry. 65 | with open(path_to_cppn, 'rb') as cppn_input: 66 | cppn = pickle.load(cppn_input) 67 | from pureples.es_hyperneat.es_hyperneat import find_pattern 68 | pattern = find_pattern(cppn, (x, y)) 69 | draw_pattern(pattern) 70 | 71 | 72 | def draw_pattern(im, res=60): 73 | """ 74 | Draws the pattern/weight gradient queried by a CPPN. 75 | """ 76 | fig = plt.figure() 77 | plt.axis([-1, 1, -1, 1]) 78 | fig.add_subplot(111) 79 | 80 | a = range(res) 81 | b = range(res) 82 | 83 | for x in a: 84 | for y in b: 85 | px = -1.0 + (x/float(res))*2.0+1.0/float(res) 86 | py = -1.0 + (y/float(res))*2.0+1.0/float(res) 87 | c = str(0.5-im[x][y]/float(res)) 88 | plt.plot(px, py, marker='s', color=c) 89 | 90 | fig.canvas.mpl_connect('button_press_event', onclick) 91 | 92 | plt.grid() 93 | plt.show() 94 | 95 | 96 | def draw_es(id_to_coords, connections, filename): 97 | """ 98 | Draw the net created by ES-HyperNEAT 99 | """ 100 | fig = plt.figure() 101 | plt.axis([-1.1, 1.1, -1.1, 1.1]) 102 | fig.add_subplot(111) 103 | 104 | for c in connections: 105 | color = 'red' 106 | if c.weight > 0.0: 107 | color = 'black' 108 | plt.arrow(c.x1, c.y1, c.x2-c.x1, c.y2-c.y1, head_width=0.00, head_length=0.0, 109 | fc=color, ec=color, length_includes_head=True) 110 | 111 | for (coord, _) in id_to_coords.items(): 112 | plt.plot(coord[0], coord[1], marker='o', markersize=8.0, color='grey') 113 | 114 | plt.grid() 115 | fig.savefig(filename) 116 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='pureples', 5 | version='0.0', 6 | author='adrian, simon', 7 | author_email='mail@adrianwesth.dk', 8 | maintainer='simon, adrian', 9 | maintainer_email='mail@adrianwesth.dk', 10 | url='https://github.com/ukuleleplayer/pureples', 11 | license="MIT", 12 | description='HyperNEAT and ES-HyperNEAT implemented in pure Python', 13 | long_description='Python implementation of HyperNEAT and ES-HyperNEAT ' + 14 | 'developed by Adrian Westh and Simon Krabbe Munck for evolving arbitrary neural networks. ' + 15 | 'HyperNEAT and ES-HyperNEAT is originally developed by Kenneth O. Stanley and Sebastian Risi', 16 | packages=['pureples', 'pureples/hyperneat', 'pureples/es_hyperneat', 'pureples/shared'], 17 | classifiers=[ 18 | 'Development Status :: 3 - Alpha', 19 | 'Intended Audience :: Developers', 20 | 'Intended Audience :: Education', 21 | 'Intended Audience :: Science/Research', 22 | 'License :: MIT License', 23 | 'Operating System :: OS Independent', 24 | 'Programming Language :: Python :: 3.x', 25 | 'Programming Language :: Python :: Implementation :: PyPy', 26 | 'Topic :: Scientific/Engineering' 27 | ], 28 | install_requires=['numpy', 'neat-python', 'graphviz', 'matplotlib', 'gym'] 29 | ) 30 | --------------------------------------------------------------------------------