├── .gitignore
├── LICENSE
├── README.md
├── benchmark
├── __init__.py
├── dataset
│ ├── mount_everest.csv
│ └── rastrigin.csv
├── main_for_mount_everest.py
├── main_for_rastrigin.py
├── methods
│ ├── __init__.py
│ ├── evolutor.py
│ ├── inherent.py
│ ├── obtain_rastrigin.py
│ ├── show_me_results.py
│ └── show_ra_results.py
└── results
│ └── note.md
├── configures
├── example
│ ├── cart-pole-v0
│ ├── lunar-lander-v2
│ └── xor
└── task
│ ├── cart-pole-v0.bi
│ ├── cart-pole-v0.fs
│ ├── cart-pole-v0.gs
│ ├── cart-pole-v0.n
│ ├── logic.bi
│ ├── logic.fs
│ ├── logic.gs
│ ├── logic.n
│ ├── lunar-lander-v2.bi
│ ├── lunar-lander-v2.fs
│ ├── lunar-lander-v2.gs
│ └── lunar-lander-v2.n
├── evolution
├── __init__.py
├── bean
│ ├── __init__.py
│ ├── attacker.py
│ ├── genome.py
│ └── species_set.py
├── evolutor.py
└── methods
│ ├── __init__.py
│ ├── bi.py
│ └── gs.py
├── example
├── __init__.py
├── cart_pole_v0.py
├── cart_pole_v0_with_attacker.py
├── lunar_lander_v2.py
└── xor.py
├── figures
├── cartpole.gif
├── demo_RET2020.png
└── lunar_lander_success_example.gif
├── output
└── note.md
├── setup.py
├── tasks
├── __init__.py
├── task_handler.py
└── task_inform.py
└── utils
├── __init__.py
├── operator.py
└── visualize.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | # IDEA
107 | .idea/
108 |
109 | # output middleware
110 | output/*
111 | !output/*.md
112 | benchmark/results/*
113 | !benchmark/results/*.md
114 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Evolving Neural Network through a Reverse Encoding Tree
2 |
3 | News: Our Paper has been accepted to IEEE CEC 2020 for a lecture presentation. An updated version could be found out [here](https://arxiv.org/abs/2002.00539). Feel free to contact us for experiement details. [Video](https://www.youtube.com/watch?v=bCfc5macPD0)
4 |
5 |
6 |
7 | Code for Python 3.7 implementation (in the PyCharm) of **Reverse Encoding Tree** from the [paper](https://arxiv.org/abs/2002.00539).
8 | ## Getting Started
9 | The library is divided into two parts.
10 | In the **benchmark** part, you will easy easily understand the principle of our strategy and its difference from other strategies.
11 | In the **evolution** part, you can use it for many tasks of **NeuroEvolution**.
12 |
13 | We have further integrated **neat-python** in **evolution/bean**.
14 | The files in the **example** folder describe how to use the original NEAT to finish the well-accepted tasks.
15 | **tasks** folder includes all the execution documents in the experiments mentioned in the paper.
16 |
17 | ### Prerequisites
18 | - [neat-python](https://pypi.org/project/neat-python/) -- version 0.92
19 | - [gym](https://pypi.org/project/gym/) -- version 0.14.0
20 | - [box2d](https://pypi.org/project/Box2D/) -- version 2.3.2
21 | - [matplotlib](https://pypi.org/project/matplotlib/) -- version 3.1.1
22 | - [pandas](https://pypi.org/project/pandas/) -- version 0.25.1
23 | - [numpy](https://pypi.org/project/numpy/) -- version 1.17.1
24 |
25 | ### Building a Bi-NEAT
26 | We have 6 additional hyper-parameters in the configure.
27 | - **max_node_num** in the **network parameters**: maximum numnber of node in all the generated neural networks, it describes the range of phenotypic landscape.
28 | - **init_distance** in the **Reproduction**: initial distance describes the minimum distance between each of the two neural networks in the initial (first) generation.
29 | - **min_distance** in the **Reproduction**: minimum distance describes the minimum distance between each of the two neural networks after the initial (first) generation.
30 | - **correlation_rate** in the **Reproduction**: correlation rate describes the demarcation line between positive and negative correlation coefficient. The default value is **-0.5**. If the correlation coefficient less than correlation rate, it is positive.
31 | - **search_count** in the **Reproduction**: search count describes the maximum number of searches required when adding a novel neural network.
32 | - **cluster_method** in the **Reproduction**: Alternative clustering methods, the default is **kmeans++**. We have **kmeans**, **kmeans++**, **birch** and **spectral** options.
33 |
34 | You need to create a configure before running, the document including original settings is shown in [https://readthedocs.org/projects/neat-python/](https://readthedocs.org/projects/neat-python/).
35 |
36 | After creating the configure:
37 | ```python
38 | from neat import population, config, genome, reproduction, species, stagnation
39 |
40 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction, species.DefaultSpeciesSet, stagnation.DefaultStagnation, "your configure path")
41 | task_population = population.Population(task_config)
42 | ```
43 | ### Continous Learning Environments
44 |
45 | - Cartport-v0
46 |
47 |
48 |
49 | - LunarLander-v2
50 |
51 |
52 |
53 | If you think this repo helps or being used in your research, please consider refer this paper. Thank you.
54 |
55 | - [Evolving Neural Networks through a Reverse Encoding Tree](https://arxiv.org/abs/2002.00539), Arxiv 2002.00539, IEEE-CEC 2020 Oral.
56 |
57 | ````
58 | @inproceedings{zhang2020evolving,
59 | title={Evolving neural networks through a reverse encoding tree},
60 | author={Zhang, Haoling and Yang, Chao-Han Huck and Zenil, Hector and Kiani, Narsis A and Shen, Yue and Tegner, Jesper N},
61 | booktitle={2020 IEEE Congress on Evolutionary Computation (CEC)},
62 | pages={1--10},
63 | year={2020},
64 | organization={IEEE}
65 | }
66 |
67 | ````
68 |
69 | Haoling Zhang, Chao-Han Huck Yang, Hector Zenil, Narsis A. Kiani, Yue Shen, Jesper N. Tegner
70 |
71 | # Contributors for this library
72 | [Haoling Zhang](https://github.com/HaolingZHANG), [Chao-Han Huck Yang](https://github.com/huckiyang)
73 |
--------------------------------------------------------------------------------
/benchmark/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/benchmark/__init__.py
--------------------------------------------------------------------------------
/benchmark/main_for_mount_everest.py:
--------------------------------------------------------------------------------
1 | from ReverseEncodingTree.benchmark.methods import inherent
2 | from ReverseEncodingTree.benchmark.methods.evolutor import GA, PBIL, BI, CMAES
3 | from ReverseEncodingTree.benchmark.methods.show_me_results import clime_by_generation, get_mount_everest
4 |
5 |
6 | def obtain_ga(start_position, stride):
7 | _, _, landscape = get_mount_everest("./dataset/mount_everest.csv")
8 | function = GA(size=90, scope=len(landscape), start_position=start_position, stride=stride)
9 | function.evolute(terrain=landscape, generations=200, evolute_type=inherent.MAX)
10 |
11 | for i in [0, 49, 99, 149, 199]:
12 | clime_by_generation(function.recorder.get_result(), "GA", i, False,
13 | mount_path="./dataset/mount_everest.csv", save_path="./results/")
14 |
15 |
16 | def obtain_pbil():
17 | _, _, landscape = get_mount_everest("./dataset/mount_everest.csv")
18 | function = PBIL(size=90, scope=len(landscape))
19 | function.evolute(terrain=landscape, generations=80, evolute_type=inherent.MAX)
20 |
21 | for i in [0, 19, 39, 59, 79]:
22 | clime_by_generation(function.recorder.get_result(), "PBIL", i, False,
23 | mount_path="./dataset/mount_everest.csv", save_path="./results/")
24 |
25 |
26 | def obtain_cmaes(start_position, stride):
27 | _, _, landscape = get_mount_everest("./dataset/mount_everest.csv")
28 | function = CMAES(size=90, scope=len(landscape), start_position=start_position, stride=stride)
29 | function.evolute(terrain=landscape, generations=20, evolute_type=inherent.MAX)
30 |
31 | for i in [0, 4, 9, 14, 19]:
32 | clime_by_generation(function.recorder.get_result(), "CMA-ES", i, False,
33 | mount_path="./dataset/mount_everest.csv", save_path="./results/")
34 |
35 |
36 | def obtain_bi():
37 | _, _, landscape = get_mount_everest("./dataset/mount_everest.csv")
38 | function = BI(size=10, init_interval=50, min_interval=1, scope=len(landscape))
39 | function.evolute(terrain=landscape, generations=8, evolute_type=inherent.MAX)
40 |
41 | for i in [0, 1, 3, 5, 7]:
42 | clime_by_generation(function.recorder.get_result(), "RET", i, False,
43 | mount_path="./dataset/mount_everest.csv", save_path="./results/")
44 |
45 |
46 | def get_min(landscape):
47 | height = landscape[0][0]
48 | position = [0, 0]
49 | for row in range(len(landscape)):
50 | for col in range(len(landscape[row])):
51 | if landscape[row][col] < height:
52 | height = landscape[row][col]
53 | position = [row, col]
54 |
55 | return position, height
56 |
57 |
58 | if __name__ == '__main__':
59 | _, _, fitness_landscape = get_mount_everest("./dataset/mount_everest.csv")
60 | min_position, min_height = get_min(fitness_landscape)
61 | obtain_ga(min_position, 15)
62 | obtain_pbil()
63 | obtain_cmaes(min_position, 28)
64 | obtain_bi()
65 |
--------------------------------------------------------------------------------
/benchmark/main_for_rastrigin.py:
--------------------------------------------------------------------------------
1 | from ReverseEncodingTree.benchmark.methods import inherent
2 | from ReverseEncodingTree.benchmark.methods.evolutor import GA, PBIL, BI, CMAES
3 | from ReverseEncodingTree.benchmark.methods.show_ra_results import clime_by_generation, get_rastrigin
4 |
5 |
6 | def obtain_ga(start_position, stride):
7 | _, _, landscape = get_rastrigin("./dataset/rastrigin.csv")
8 | function = GA(size=90, scope=len(landscape), start_position=start_position, stride=stride)
9 | function.evolute(terrain=landscape, generations=200, evolute_type=inherent.MAX)
10 |
11 | for i in [0, 49, 99, 149, 199]:
12 | clime_by_generation(function.recorder.get_result(), "GA", i, False,
13 | mount_path="./dataset/rastrigin.csv", save_path="./results/")
14 |
15 |
16 | def obtain_pbil():
17 | _, _, landscape = get_rastrigin("./dataset/rastrigin.csv")
18 | function = PBIL(size=90, scope=len(landscape))
19 | function.evolute(terrain=landscape, generations=80, evolute_type=inherent.MAX)
20 |
21 | for i in [0, 19, 39, 59, 79]:
22 | clime_by_generation(function.recorder.get_result(), "PBIL", i, False,
23 | mount_path="./dataset/rastrigin.csv", save_path="./results/")
24 |
25 |
26 | def obtain_cmaes(start_position, stride):
27 | _, _, landscape = get_rastrigin("./dataset/rastrigin.csv")
28 | function = CMAES(size=90, scope=len(landscape), start_position=start_position, stride=stride)
29 | function.evolute(terrain=landscape, generations=20, evolute_type=inherent.MAX)
30 |
31 | for i in [0, 4, 9, 14, 19]:
32 | clime_by_generation(function.recorder.get_result(), "CMA-ES", i, False,
33 | mount_path="./dataset/rastrigin.csv", save_path="./results/")
34 |
35 |
36 | def obtain_bi():
37 | _, _, landscape = get_rastrigin("./dataset/rastrigin.csv")
38 | function = BI(size=10, init_interval=40, min_interval=3, scope=len(landscape))
39 | function.evolute(terrain=landscape, generations=8, evolute_type=inherent.MAX)
40 |
41 | for i in [0, 1, 3, 5, 7]:
42 | clime_by_generation(function.recorder.get_result(), "RET", i, False,
43 | mount_path="./dataset/rastrigin.csv", save_path="./results/")
44 |
45 |
46 | def get_min(landscape):
47 | height = landscape[0][0]
48 | position = [0, 0]
49 | for row in range(len(landscape)):
50 | for col in range(len(landscape[row])):
51 | if landscape[row][col] < height:
52 | height = landscape[row][col]
53 | position = [row, col]
54 |
55 | return position, height
56 |
57 |
58 | if __name__ == '__main__':
59 | _, _, fitness_landscape = get_rastrigin("./dataset/rastrigin.csv")
60 | min_position, min_height = get_min(fitness_landscape)
61 | obtain_ga(min_position, 15)
62 | obtain_pbil()
63 | obtain_cmaes(min_position, 28)
64 | obtain_bi()
65 |
--------------------------------------------------------------------------------
/benchmark/methods/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/benchmark/methods/__init__.py
--------------------------------------------------------------------------------
/benchmark/methods/evolutor.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import math
4 | import numpy
5 | import numpy as np
6 | import numpy.linalg as la
7 |
8 | from sklearn.cluster import KMeans
9 |
10 | from ReverseEncodingTree.benchmark.methods import inherent
11 | from ReverseEncodingTree.benchmark.methods.inherent import Population, Recorder
12 |
13 |
14 | class GA(object):
15 |
16 | def __init__(self, size, scope, start_position, stride, mutate_rate=0.5):
17 | """
18 | initialize the evolution method based on simple genetic algorithm.
19 |
20 | :param size: population size.
21 | :param scope: scope of landscape.
22 | :param start_position: starting position of the population.
23 | :param stride: individual maximum moving step.
24 | :param mutate_rate: individual mutation rate.
25 | """
26 | self.population = Population()
27 | self.population.create_by_local(size=size, start_position=start_position, stride=stride, scope=scope)
28 | self.recorder = Recorder()
29 |
30 | self.scope = scope
31 | self.mutate_rate = mutate_rate
32 | self.stride = stride
33 | self.size = size
34 |
35 | def evolute(self, terrain, generations, evolute_type):
36 | """
37 | evolution process.
38 |
39 | :param terrain: landscape.
40 | :param generations: count of generations.
41 | :param evolute_type: evolution direction, go big or small.
42 | """
43 | self.population.fitness(terrain)
44 | self.recorder.add_population(self.population)
45 |
46 | for generation in range(generations):
47 | self.population.save_by_sort(self.size, save_type=evolute_type)
48 | new_population = Population()
49 | save_count = int(self.size * (1 - self.mutate_rate))
50 |
51 | for index in range(save_count):
52 | new_population.add_by_individual(self.population.get_individual(index))
53 |
54 | for index in range(save_count, self.size):
55 | individual = self.population.get_individual(index)
56 | new_individual = []
57 | for attribute_index in range(len(individual) - 1):
58 | attribute = individual[attribute_index] + random.randint(-self.stride, self.stride)
59 | if attribute < 0:
60 | attribute = 0
61 | elif attribute >= self.scope:
62 | attribute = self.scope - 1
63 | new_individual.append(attribute)
64 |
65 | new_individual.append(terrain[int(new_individual[0])][int(new_individual[1])])
66 | new_population.add_by_individual(new_individual)
67 |
68 | self.population = new_population
69 | self.recorder.add_population(self.population)
70 | print("generation = " + str(generation))
71 |
72 |
73 | class PBIL(object):
74 |
75 | def __init__(self, size, scope, learn_rate=0.1):
76 | """
77 | initialize the evolution method based on Population-Based Incremental Learning.
78 |
79 | :param size: population size.
80 | :param scope: scope of landscape.
81 | :param learn_rate: learn rate of probability matrix.
82 | """
83 | self.population = Population()
84 | self.population.create_by_global(size=size, scope=scope)
85 | self.recorder = Recorder()
86 |
87 | self.probability_matrix = [[0.5 for _ in range(scope)] for _ in range(scope)]
88 | self.learn_rate = learn_rate
89 |
90 | self.scope = scope
91 | self.size = size
92 |
93 | def evolute(self, terrain, generations, evolute_type):
94 | """
95 | evolution process.
96 |
97 | :param terrain: landscape.
98 | :param generations: count of generations.
99 | :param evolute_type: evolution direction, go big or small.
100 | """
101 | self.population.fitness(terrain)
102 | self.recorder.add_population(self.population)
103 |
104 | for generation in range(generations):
105 | self._update_probability_matrix(evolute_type)
106 | self._next_generation()
107 | self.population.fitness(terrain)
108 | self.recorder.add_population(self.population)
109 | print("generation = " + str(generation))
110 |
111 | def _update_probability_matrix(self, evolute_type):
112 | """
113 | update the probability matrix by population.
114 |
115 | :param evolute_type: evolution direction, go big or small.
116 | """
117 | best_individual = self.population.get_individual(0)
118 | mean_fitness = best_individual[2]
119 |
120 | if evolute_type == inherent.MAX:
121 | # obtain best individual
122 | for index in range(1, self.size):
123 | individual = self.population.get_individual(index)
124 | mean_fitness += individual[2]
125 | if best_individual[2] < individual[2]:
126 | best_individual = individual
127 |
128 | mean_fitness /= self.size
129 | self.probability_matrix[best_individual[0]][best_individual[1]] += 0.5
130 | max_probability = self.probability_matrix[best_individual[0]][best_individual[1]]
131 |
132 | # update probability matrix
133 | for index in range(1, self.size):
134 | individual = self.population.get_individual(index)
135 | if individual[2] >= mean_fitness:
136 | self.probability_matrix[individual[0]][individual[1]] += \
137 | self.learn_rate * (self.probability_matrix[best_individual[0]][best_individual[1]] - 0.5)
138 | if max_probability < self.probability_matrix[individual[0]][individual[1]]:
139 | max_probability = self.probability_matrix[individual[0]][individual[1]]
140 | else:
141 | self.probability_matrix[individual[0]][individual[1]] -= \
142 | self.learn_rate * (self.probability_matrix[best_individual[0]][best_individual[1]] - 0.5)
143 |
144 | # adjust value range
145 | if max_probability > 1:
146 | for row in range(len(self.probability_matrix)):
147 | for col in range(len(self.probability_matrix)):
148 | self.probability_matrix[row][col] /= max_probability
149 |
150 | else:
151 | # obtain best individual
152 | for index in range(1, self.size):
153 | individual = self.population.get_individual(index)
154 | mean_fitness += individual[2]
155 | if best_individual[2] > individual[2]:
156 | best_individual = individual
157 |
158 | mean_fitness /= self.size
159 | self.probability_matrix[best_individual[0]][best_individual[1]] += 0.5
160 | max_probability = self.probability_matrix[best_individual[0]][best_individual[1]]
161 |
162 | # update probability matrix
163 | for index in range(1, self.size):
164 | individual = self.population.get_individual(index)
165 | if individual[2] <= mean_fitness:
166 | self.probability_matrix[individual[0]][individual[1]] += \
167 | self.learn_rate * (self.probability_matrix[best_individual[0]][best_individual[1]] - 0.5)
168 | if max_probability < self.probability_matrix[individual[0]][individual[1]]:
169 | max_probability = self.probability_matrix[individual[0]][individual[1]]
170 | else:
171 | self.probability_matrix[individual[0]][individual[1]] -= \
172 | self.learn_rate * (self.probability_matrix[best_individual[0]][best_individual[1]] - 0.5)
173 |
174 | # adjust value range
175 | if max_probability > 1:
176 | for row in range(len(self.probability_matrix)):
177 | for col in range(len(self.probability_matrix)):
178 | self.probability_matrix[row][col] /= max_probability
179 |
180 | def _next_generation(self):
181 | while True:
182 | chooser_matrix = list(numpy.random.random(self.scope ** 2) <=
183 | list(numpy.reshape(numpy.array(self.probability_matrix), (1, self.scope ** 2))[0]))
184 |
185 | if sum(chooser_matrix) >= self.size:
186 | created_population = Population()
187 | indices = []
188 | for index, value in enumerate(chooser_matrix):
189 | if value:
190 | indices.append(index)
191 | for choose in random.sample(indices, self.size):
192 | col = choose % self.scope
193 | row = int((choose - col) / self.scope)
194 | created_population.add_by_individual([row, col, 0])
195 |
196 | self.population = created_population
197 | break
198 |
199 |
200 | class CMAES(object):
201 |
202 | def __init__(self, size, scope, start_position, stride, elite_rate=0.3):
203 | """
204 | initialize the evolution method based on simple genetic algorithm.
205 |
206 | :param size: population size.
207 | :param scope: scope of landscape.
208 | :param start_position: starting position of the population.
209 | """
210 | self.population = Population()
211 | self.population.create_by_normal(size, start_position, stride, scope)
212 | self.recorder = Recorder()
213 |
214 | self.scope = scope
215 | self.elite_rate = elite_rate
216 | self.size = size
217 |
218 | def evolute(self, terrain, generations, evolute_type):
219 | """
220 | evolution process.
221 |
222 | :param terrain: landscape.
223 | :param generations: count of generations.
224 | :param evolute_type: evolution direction, go big or small.
225 | """
226 | self.population.fitness(terrain)
227 | self.recorder.add_population(self.population)
228 |
229 | for generation in range(generations):
230 | self.population.save_by_sort(self.size, save_type=evolute_type)
231 | current_individuals = [[], []]
232 | elite_individuals = [[], []]
233 |
234 | for index in range(self.size):
235 | individual = self.population.get_individual(index)
236 | if index <= int(self.size * self.elite_rate):
237 | elite_individuals[0].append(individual[0])
238 | elite_individuals[1].append(individual[1])
239 | current_individuals[0].append(individual[0])
240 | current_individuals[1].append(individual[1])
241 |
242 | elite_individuals = numpy.array(elite_individuals)
243 | current_individuals = numpy.array(current_individuals)
244 | centered = elite_individuals - current_individuals.mean(1, keepdims=True)
245 | c = (centered @ centered.T) / (int(self.size * self.elite_rate) - 1)
246 | w, e = la.eigh(c)
247 | new_individuals = None
248 | while True:
249 | try:
250 | new_individuals = elite_individuals.mean(1, keepdims=True) + (e @ np.diag(np.sqrt(w)) @
251 | np.random.normal(size=(2, self.size)))
252 | except ValueError:
253 | continue
254 | break
255 |
256 | new_population = Population()
257 | for position in new_individuals.T:
258 | if position[0] < 0:
259 | position[0] = 0
260 | elif position[0] >= self.scope:
261 | position[0] = self.scope - 1
262 | if position[1] < 0:
263 | position[1] = 0
264 | elif position[1] >= self.scope:
265 | position[1] = self.scope - 1
266 | new_population.add_by_individual([int(position[0]), int(position[1]), 0])
267 |
268 | new_population.fitness(terrain)
269 | self.population = new_population
270 |
271 | self.recorder.add_population(self.population)
272 | print("generation = " + str(generation))
273 |
274 |
275 | class BI(object):
276 |
277 | def __init__(self, size, init_interval, min_interval, scope):
278 | """
279 | initialize the evolution method based on binary search.
280 |
281 | :param size: population size.
282 | :param init_interval: minimum interval between two individuals at initialization.
283 | :param min_interval: minimum interval between two individuals at each iteration.
284 | :param scope: scope of landscape.
285 | """
286 | self.population = Population()
287 | self.population.create_by_global(size=size, scope=scope, interval=init_interval)
288 | self.recorder = Recorder()
289 | self.scope = scope
290 | self.min_interval = min_interval
291 | self.size = size
292 |
293 | def evolute(self, terrain, generations, evolute_type):
294 | """
295 | evolution process.
296 |
297 | :param terrain: landscape.
298 | :param generations: count of generations.
299 | :param evolute_type: evolution direction, go big or small.
300 | """
301 | self.population.fitness(terrain)
302 | self.recorder.add_population(self.population)
303 |
304 | for generation in range(generations):
305 | created_population = Population()
306 |
307 | method = KMeans(n_clusters=self.size, max_iter=self.size)
308 | feature_matrices = []
309 | for index in range(self.size):
310 | feature_matrices.append(self.population.get_individual(index)[:-1])
311 | method.fit(feature_matrices)
312 | genome_clusters = [[] for _ in range(self.size)]
313 | for index, cluster_index in enumerate(method.labels_):
314 | genome_clusters[cluster_index].append(self.population.get_individual(index))
315 |
316 | for index in range(self.size):
317 | individual_1 = genome_clusters[index][0]
318 | for another_index in range(index + 1, self.size):
319 | individual_2 = genome_clusters[another_index][0]
320 |
321 | if evolute_type == inherent.MAX:
322 | if math.sqrt(math.pow(individual_1[0] - individual_2[0], 2) +
323 | math.pow(individual_1[1] - individual_2[1], 2)) > self.min_interval:
324 | if individual_1[2] > individual_2[2]:
325 | new_individual = self._find_around(individual_1, [self.population, created_population],
326 | self.min_interval * 2, self.size)
327 | if new_individual is not None:
328 | created_population.add_by_individual(new_individual)
329 | elif individual_1[2] < individual_2[2]:
330 | new_individual = self._find_center(individual_1, individual_2,
331 | [self.population, created_population])
332 | if new_individual is not None:
333 | created_population.add_by_individual(new_individual)
334 | else:
335 | if math.sqrt(math.pow(individual_1[0] - individual_2[0], 2) +
336 | math.pow(individual_1[1] - individual_2[1], 2)) > self.min_interval:
337 | if individual_1[2] > individual_2[2]:
338 | new_individual = self._find_center(individual_1, individual_2,
339 | [self.population, created_population])
340 | if new_individual is not None:
341 | created_population.add_by_individual(new_individual)
342 | elif individual_1[2] < individual_2[2]:
343 | new_individual = self._find_around(individual_1, [self.population, created_population],
344 | self.min_interval * 2, self.size)
345 | if new_individual is not None:
346 | created_population.add_by_individual(new_individual)
347 |
348 | created_population.fitness(terrain)
349 |
350 | self.population.merge_population(created_population)
351 | self.recorder.add_population(self.population)
352 | self.population.save_by_sort(self.size, save_type=evolute_type)
353 | print("generation = " + str(generation))
354 |
355 | def _find_center(self, individual_1, individual_2, remains):
356 | """
357 | look for an individual between two individuals.
358 |
359 | :param individual_1: one individual.
360 | :param individual_2: another individual.
361 |
362 | :param remains: remain population(s).
363 |
364 | :return: created individual (if it is remain).
365 | """
366 | new_row = int(round((individual_1[0] + individual_2[0]) / 2))
367 | new_col = int(round((individual_1[1] + individual_2[1]) / 2))
368 | is_created = True
369 | for remain in remains:
370 | if remain.population is not None:
371 | if not remain.meet_interval([new_row, new_col], self.min_interval):
372 | is_created = False
373 |
374 | return [new_row, new_col, 0] if is_created else None
375 |
376 | def _find_around(self, individual, remains, search_scope, search_count):
377 | """
378 | look for an individual around the requested individual.
379 |
380 | :param individual: one individual
381 | :param remains: remain population(s).
382 | :param search_scope: searchable scope.
383 | :param search_count: number of times to search.
384 |
385 | :return: created individual (if it is remain).
386 | """
387 | count = 0
388 | while True:
389 | new_row = individual[0] + random.randint(-search_scope, search_scope)
390 | new_col = individual[1] + random.randint(-search_scope, search_scope)
391 |
392 | is_created = True
393 | if not (0 <= new_row < self.scope and 0 <= new_col < self.scope):
394 | is_created = False
395 | for remain in remains:
396 | if remain.population is not None:
397 | if not remain.meet_interval([new_row, new_col], self.min_interval):
398 | is_created = False
399 | if is_created:
400 | return [new_row, new_col, 0]
401 |
402 | count += 1
403 | if count >= search_count:
404 | return None
405 |
--------------------------------------------------------------------------------
/benchmark/methods/inherent.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 | import math
4 | import numpy
5 |
6 | MAX = 1
7 | MIN = -1
8 |
9 | GRADIENT = 1
10 | POPULATION = -1
11 |
12 |
13 | class Population(object):
14 |
15 | def __init__(self):
16 | """
17 | initialize the population.
18 | """
19 | self.population = None
20 |
21 | def create_by_population(self, old_population):
22 | """
23 | create the population by old population.
24 |
25 | :param old_population: previous population.
26 | """
27 | self.population = copy.deepcopy(old_population)
28 |
29 | def create_by_normal(self, size, start_position, stride, scope):
30 | self.population = [[], [], []]
31 | populations_list = numpy.random.normal(0, stride, (2, size * size))
32 | for position in populations_list.T:
33 | actual_position = [int(start_position[0] + position[0]), int(start_position[1] + position[1])]
34 | if actual_position[0] in self.population[0] and actual_position[1] in self.population[1]:
35 | continue
36 | elif 0 <= actual_position[0] < scope and 0 <= actual_position[1] < scope:
37 | self.population[0].append(actual_position[0])
38 | self.population[1].append(actual_position[1])
39 | self.population[2].append(0)
40 | if len(self.population[0]) == size:
41 | break
42 |
43 | def create_by_local(self, size, start_position, stride, scope):
44 | """
45 | create the population by local random.
46 |
47 | :param size: population size.
48 | :param start_position: center of local population.
49 | :param stride: length of step (random scope or side length).
50 | :param scope: scope or side length of landscape.
51 | """
52 | self.population = [[], [], []]
53 | while len(self.population[0]) < size:
54 | x = start_position[0] + random.randint(-stride * 3, stride * 3)
55 | y = start_position[1] + random.randint(-stride * 3, stride * 3)
56 | if x in self.population[0] and y in self.population[1]:
57 | continue
58 | elif 0 <= x < scope and 0 <= y < scope:
59 | self.population[0].append(x)
60 | self.population[1].append(y)
61 | self.population[2].append(0)
62 |
63 | def create_by_global(self, size, scope, interval=0):
64 | """
65 | create the population by global random.
66 |
67 | :param size: population size.
68 | :param scope: scope or side length of landscape.
69 | :param interval: minimum interval between individuals.
70 | """
71 | self.population = [[], [], []]
72 | while len(self.population[0]) < size:
73 | actual_row = random.randint(0, scope - 1)
74 | actual_col = random.randint(0, scope - 1)
75 | if interval > 0:
76 | if self.meet_interval([actual_row, actual_col], interval):
77 | self.population[0].append(actual_row)
78 | self.population[1].append(actual_col)
79 | self.population[2].append(0)
80 | else:
81 | self.population[0].append(actual_row)
82 | self.population[1].append(actual_col)
83 | self.population[2].append(0)
84 |
85 | def add_by_individual(self, individual):
86 | """
87 | insert individual in population.
88 |
89 | :param individual: one individual.
90 | """
91 | if self.population is None:
92 | self.population = [[] for _ in range(len(individual))]
93 |
94 | for index, attribute in enumerate(individual):
95 | self.population[index].append(attribute)
96 |
97 | def merge_population(self, new_remain):
98 | """
99 | merge another population in this population.
100 |
101 | :param new_remain: another population.
102 | """
103 | for index, attributes in enumerate(new_remain.population):
104 | self.population[index] += attributes
105 |
106 | def save_by_sort(self, save_count, save_type=MAX):
107 | """
108 | save the population by fitness sort.
109 |
110 | :param save_count: count of best preserved individuals.
111 | :param save_type: type of fitness (save better or worse).
112 | """
113 | population = numpy.array(copy.deepcopy(self.population))
114 | if save_type == MIN:
115 | population = list(population.T[numpy.lexsort(population)].T)
116 | else:
117 | population = list(population.T[numpy.lexsort(-population)].T)
118 |
119 | self.population = []
120 | for attributes in population:
121 | self.population.append(list(attributes)[: save_count])
122 |
123 | def get_individual(self, index):
124 | """
125 | get individual by index.
126 |
127 | :param index: index in population.
128 |
129 | :return: requested individual.
130 | """
131 | individual = []
132 | for attribute_index in range(len(self.population)):
133 | individual.append(self.population[attribute_index][index])
134 | return individual
135 |
136 | def fitness(self, terrain, population=None):
137 | """
138 | obtain fitness of each individuals by landscape.
139 |
140 | :param terrain: landscape of phenotype.
141 | :param population: external population if requested.
142 | """
143 | if population is None:
144 | for index, (x, y) in enumerate(zip(self.population[0], self.population[1])):
145 | self.population[2][index] = terrain[x][y]
146 | else:
147 | for index, (x, y) in enumerate(zip(population[0], population[1])):
148 | population[2][index] = terrain[x][y]
149 |
150 | def sort_index(self, sort_type=MAX):
151 | """
152 | sort individuals by fitness.
153 |
154 | :param sort_type: type of fitness (MAX or MIN).
155 |
156 | :return: sorted index list.
157 | """
158 | if sort_type == MIN:
159 | return list(numpy.argsort(numpy.array(self.population[-1])))
160 | else:
161 | return list(numpy.argsort(-numpy.array(self.population[-1])))
162 |
163 | def meet_interval(self, individual, min_interval):
164 | """
165 | check whether the new individual meets the distance requirements.
166 |
167 | :param individual: the new individual.
168 | :param min_interval: minimum interval between every two individual.
169 |
170 | :return: check result.
171 | """
172 | min_distance = -1
173 | for p_row, p_col in zip(self.population[0], self.population[1]):
174 | distance = math.sqrt(math.pow(p_row - individual[0], 2) + math.pow(p_col - individual[1], 2))
175 | if distance < min_distance or min_distance == -1:
176 | min_distance = distance
177 | return (min_distance > min_interval) or (min_distance == -1)
178 |
179 | def get_all_individuals(self):
180 | """
181 | get all individuals from the population.
182 |
183 | :return: individual list.
184 | """
185 | return self.population
186 |
187 | def __str__(self):
188 | """
189 | show all individuals.
190 |
191 | :return: string of all individual
192 | """
193 | individuals = []
194 | for index in range(self.population[0]):
195 | individuals.append(self.get_individual(index))
196 |
197 | return str(individuals)
198 |
199 |
200 | class Recorder(object):
201 |
202 | def __init__(self):
203 | """
204 | initialize the recorder to record the changing process of population.
205 | """
206 | self.recorder = []
207 | self.record_type = None
208 |
209 | def add_population(self, remain):
210 | """
211 | add new population to the recorder.
212 |
213 | :param remain: remain population.
214 | """
215 | if self.record_type is None:
216 | self.record_type = POPULATION
217 |
218 | if self.record_type is POPULATION:
219 | self.recorder.append(copy.deepcopy(remain.population))
220 | else:
221 | print("Input error record type!")
222 | exit(1)
223 |
224 | def get_result(self):
225 | """
226 | get the result from recorder.
227 |
228 | :return: recorder.
229 | """
230 | return self.recorder
231 |
--------------------------------------------------------------------------------
/benchmark/methods/obtain_rastrigin.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy
3 |
4 |
5 | def rastrigin(x, y, a=10):
6 | return a + sum([(index**2 - a * numpy.cos(2 * math.pi * index)) for index in (x, y)])
7 |
8 |
9 | def save_terrain(matrix, path="../dataset/rastrigin.csv"):
10 | with open(path, "w", encoding="utf-8") as save_file:
11 | for row_data in matrix:
12 | string = str(row_data)[1:-1]
13 | string = string.replace("\'", "")
14 | string = string.replace(" ", "")
15 | save_file.write(string + "\n")
16 |
17 |
18 | if __name__ == '__main__':
19 | X = numpy.linspace(-4, 4, 200)
20 | Y = numpy.linspace(-4, 4, 200)
21 |
22 | X, Y = numpy.meshgrid(X, Y)
23 |
24 | Z = rastrigin(X, Y)
25 |
26 | outputs = []
27 | for data in Z:
28 | outputs.append(list(data))
29 |
30 | save_terrain(outputs)
31 |
32 | print(numpy.max(Z))
33 | print(numpy.min(Z))
34 |
--------------------------------------------------------------------------------
/benchmark/methods/show_me_results.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import numpy
3 | from matplotlib import pyplot as plt
4 | from mpl_toolkits.mplot3d import Axes3D
5 |
6 |
7 | def get_mount_everest(path):
8 | matrix = []
9 | with open(path, "r") as file:
10 | reader = csv.reader(file)
11 | for row in reader:
12 | matrix.append(list(map(int, row)))
13 |
14 | x_axis = numpy.arange(0, 200, 1)
15 | y_axis = numpy.arange(0, 200, 1)
16 | x_axis, y_axis = numpy.meshgrid(x_axis, y_axis)
17 |
18 | return x_axis, y_axis, numpy.array(matrix)
19 |
20 |
21 | # 3600 -> 200
22 | def show_mount_everest(path, save_path=None):
23 | x_axis, y_axis, z_axis = get_mount_everest(path)
24 | max_height = 0
25 | min_height = 8844
26 | min_position = [0, 0]
27 | max_position = [0, 0]
28 | for row in range(len(z_axis)):
29 | for col in range(len(z_axis[row])):
30 | if z_axis[row][col] < min_height:
31 | min_height = z_axis[row][col]
32 | min_position = [row, col]
33 | if z_axis[row][col] > max_height:
34 | max_height = z_axis[row][col]
35 | max_position = [row, col]
36 |
37 | fig = plt.figure()
38 | ax = Axes3D(fig)
39 | ax.set_xlabel("latitude")
40 | ax.set_ylabel("longitude")
41 | ax.set_zlabel("height")
42 | ax.plot_surface(x_axis, y_axis, z_axis, rstride=1, cstride=1, cmap='rainbow', alpha=0.8)
43 | ax.scatter([min_position[1]], [min_position[0]], [min_height], c='indigo')
44 | ax.scatter([max_position[1]], [max_position[0]], [max_height], c='red')
45 | ax.contourf(x_axis, y_axis, z_axis,
46 | zdir='z', offset=-10000, cmap='rainbow')
47 |
48 | ax.view_init(35, -150)
49 | ax.set_zlim(-10000, 9000)
50 |
51 | if save_path is not None:
52 | plt.savefig(save_path + "Clime.png", format='png', bbox_inches='tight', transparent=True, dpi=600)
53 |
54 | plt.show()
55 |
56 |
57 | def clime_by_generation(population_recorders,
58 | title, generation, is_final=False, mount_path="mount_everest.csv", save_path=None):
59 | x_axis, y_axis, z_axis = get_mount_everest(mount_path)
60 |
61 | fig = plt.figure()
62 | ax = Axes3D(fig)
63 |
64 | if generation == 0:
65 | population_recorder = population_recorders[generation]
66 | ax.set_title(title + ": (initialized" +
67 | " generations to reach " + str(numpy.max(population_recorder[2])) + " meters height)")
68 | elif is_final:
69 | population_recorder = population_recorders[-1]
70 | ax.set_title(title + ": (" + str(generation + 1) +
71 | " generations to reach " + str(numpy.max(population_recorder[2])) + " meters height finally)")
72 | else:
73 | population_recorder = population_recorders[generation]
74 | ax.set_title(title + ": (" + str(generation + 1) +
75 | " generations to reach " + str(numpy.max(population_recorder[2])) + " meters height)")
76 |
77 | ax.set_xlabel("latitude")
78 | ax.set_ylabel("longitude")
79 | ax.set_zlabel("height")
80 |
81 | ax.plot_surface(x_axis, y_axis, z_axis, rstride=1, cstride=1, cmap='rainbow', alpha=0.3)
82 | ax.scatter(population_recorder[1], population_recorder[0], population_recorder[2], c='black')
83 | ax.contourf(x_axis, y_axis, z_axis, zdir='z', offset=-5000, cmap='rainbow', alpha=0.3)
84 | ax.scatter(population_recorder[1], population_recorder[0],
85 | [-5000 for _ in range(len(population_recorder[0]))], c='black')
86 |
87 | ax.view_init(30, -150)
88 | ax.set_zlim(-5000, 9000)
89 | if save_path is not None:
90 | plt.savefig(save_path + title + "." + str(generation + 1) + ".svg", format='svg', bbox_inches='tight',
91 | transparent=True, dpi=600)
92 | plt.show()
93 |
--------------------------------------------------------------------------------
/benchmark/methods/show_ra_results.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import numpy
3 | from matplotlib import pyplot as plt
4 | from mpl_toolkits.mplot3d import Axes3D
5 |
6 |
7 | def get_rastrigin(path):
8 | matrix = []
9 | with open(path, "r") as file:
10 | reader = csv.reader(file)
11 | for row in reader:
12 | matrix.append(list(map(float, row)))
13 |
14 | x_axis = numpy.arange(0, 200, 1)
15 | y_axis = numpy.arange(0, 200, 1)
16 | x_axis, y_axis = numpy.meshgrid(x_axis, y_axis)
17 |
18 | return x_axis, y_axis, numpy.array(matrix)
19 |
20 |
21 | def show_rastrigin(path, save_path=None):
22 | x_axis, y_axis, z_axis = get_rastrigin(path)
23 | max_height = -10
24 | min_height = 60
25 | min_position = [0, 0]
26 | max_position = [0, 0]
27 | for row in range(len(z_axis)):
28 | for col in range(len(z_axis[row])):
29 | if z_axis[row][col] < min_height:
30 | min_height = z_axis[row][col]
31 | min_position = [row, col]
32 | if z_axis[row][col] > max_height:
33 | max_height = z_axis[row][col]
34 | max_position = [row, col]
35 |
36 | fig = plt.figure()
37 | ax = Axes3D(fig)
38 | ax.set_xlabel("x")
39 | ax.set_ylabel("y")
40 | ax.set_zlabel("z")
41 | ax.plot_surface(x_axis, y_axis, z_axis, rstride=1, cstride=1, cmap='rainbow', alpha=0.8)
42 | ax.scatter([min_position[1]], [min_position[0]], [min_height], c='indigo')
43 | ax.scatter([max_position[1]], [max_position[0]], [max_height], c='red')
44 | ax.contourf(x_axis, y_axis, z_axis,
45 | zdir='z', offset=-100, cmap='rainbow')
46 |
47 | ax.view_init(35, -150)
48 | ax.set_zlim(-100, 60)
49 |
50 | if save_path is not None:
51 | plt.savefig(save_path + "ra_clime.png", format='png', bbox_inches='tight', transparent=True, dpi=600)
52 |
53 | plt.show()
54 |
55 |
56 | def clime_by_generation(population_recorders,
57 | title, generation, is_final=False, mount_path="mount_everest.csv", save_path=None):
58 | x_axis, y_axis, z_axis = get_rastrigin(mount_path)
59 |
60 | fig = plt.figure()
61 | ax = Axes3D(fig)
62 |
63 | if generation == 0:
64 | population_recorder = population_recorders[generation]
65 | ax.set_title(title + ": (initialized" +
66 | " generations to reach " + str(numpy.max(population_recorder[2])) + ")")
67 | elif is_final:
68 | population_recorder = population_recorders[-1]
69 | ax.set_title(title + ": (" + str(generation + 1) +
70 | " generations to reach " + str(numpy.max(population_recorder[2])) + " finally)")
71 | else:
72 | population_recorder = population_recorders[generation]
73 | ax.set_title(title + ": (" + str(generation + 1) +
74 | " generations to reach " + str(numpy.max(population_recorder[2])) + ")")
75 |
76 | ax.set_xlabel("x")
77 | ax.set_ylabel("y")
78 | ax.set_zlabel("z")
79 |
80 | ax.plot_surface(x_axis, y_axis, z_axis, rstride=1, cstride=1, cmap='rainbow', alpha=0.3)
81 | ax.scatter(population_recorder[1], population_recorder[0], population_recorder[2], c='black')
82 | ax.contourf(x_axis, y_axis, z_axis, zdir='z', offset=-100, cmap='rainbow', alpha=0.3)
83 | ax.scatter(population_recorder[1], population_recorder[0],
84 | [-100 for _ in range(len(population_recorder[0]))], c='black')
85 |
86 | ax.view_init(35, -150)
87 | ax.set_zlim(-100, 60)
88 |
89 | if save_path is not None:
90 | plt.savefig(save_path + title + "." + str(generation + 1) + ".png", format='png', bbox_inches='tight',
91 | transparent=True, dpi=600)
92 | plt.show()
93 |
--------------------------------------------------------------------------------
/benchmark/results/note.md:
--------------------------------------------------------------------------------
1 | do it by yourself.
--------------------------------------------------------------------------------
/configures/example/cart-pole-v0:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym
2 |
3 | [NEAT]
4 | pop_size = 5
5 | # Note: the fitness threshold will never be reached because
6 | # we are controlling the termination ourselves based on simulation performance.
7 | fitness_criterion = max
8 | fitness_threshold = 1.
9 | reset_on_extinction = 0
10 |
11 | [DefaultGenome]
12 | # node activation options
13 | activation_default = relu
14 | activation_mutate_rate = 0.0
15 | activation_options = relu
16 |
17 | # node aggregation options
18 | aggregation_default = sum
19 | aggregation_mutate_rate = 0.0
20 | aggregation_options = sum
21 |
22 | # node bias options
23 | bias_init_mean = 0.0
24 | bias_init_stdev = 1.0
25 | bias_max_value = 30.0
26 | bias_min_value = -30.0
27 | bias_mutate_power = 0.5
28 | bias_mutate_rate = 0.7
29 | bias_replace_rate = 0.1
30 |
31 | # genome compatibility options
32 | compatibility_disjoint_coefficient = 1.0
33 | compatibility_weight_coefficient = 1.0
34 |
35 | # connection add/remove rates
36 | conn_add_prob = 0.9
37 | conn_delete_prob = 0.2
38 |
39 | # connection enable options
40 | enabled_default = True
41 | enabled_mutate_rate = 0.01
42 |
43 | feed_forward = True
44 | initial_connection = fs_neat
45 |
46 | # node add/remove rates
47 | node_add_prob = 0.9
48 | node_delete_prob = 0.2
49 |
50 | # network parameters
51 | num_hidden = 0
52 | num_inputs = 4
53 | num_outputs = 2
54 |
55 | # node response options
56 | response_init_mean = 1.0
57 | response_init_stdev = 0.0
58 | response_max_value = 30.0
59 | response_min_value = -30.0
60 | response_mutate_power = 0.0
61 | response_mutate_rate = 0.0
62 | response_replace_rate = 0.0
63 |
64 | # connection weight options
65 | weight_init_mean = 0.0
66 | weight_init_stdev = 1.0
67 | weight_max_value = 30.
68 | weight_min_value = -30.
69 | weight_mutate_power = 0.5
70 | weight_mutate_rate = 0.8
71 | weight_replace_rate = 0.1
72 |
73 | [DefaultSpeciesSet]
74 | compatibility_threshold = 3.0
75 |
76 | [DefaultStagnation]
77 | species_fitness_func = max
78 | max_stagnation = 20
79 | species_elitism = 4
80 |
81 | [DefaultReproduction]
82 | elitism = 2
83 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/example/lunar-lander-v2:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym
2 |
3 | [NEAT]
4 | pop_size = 100
5 | fitness_criterion = max
6 | fitness_threshold = -0.1
7 | reset_on_extinction = 0
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 |
41 | feed_forward = True
42 | initial_connection = fs_neat
43 |
44 | # node add/remove rates
45 | node_add_prob = 0.9
46 | node_delete_prob = 0.2
47 |
48 | # network parameters
49 | num_hidden = 0
50 | num_inputs = 8
51 | num_outputs = 1
52 |
53 | # node response options
54 | response_init_mean = 1.0
55 | response_init_stdev = 0.0
56 | response_max_value = 30.0
57 | response_min_value = -30.0
58 | response_mutate_power = 0.0
59 | response_mutate_rate = 0.0
60 | response_replace_rate = 0.0
61 |
62 | # connection weight options
63 | weight_init_mean = 0.0
64 | weight_init_stdev = 1.0
65 | weight_max_value = 30.
66 | weight_min_value = -30.
67 | weight_mutate_power = 0.5
68 | weight_mutate_rate = 0.8
69 | weight_replace_rate = 0.1
70 |
71 | [DefaultSpeciesSet]
72 | compatibility_threshold = 3.0
73 |
74 | [DefaultStagnation]
75 | species_fitness_func = max
76 | max_stagnation = 20
77 | species_elitism = 4
78 |
79 | [DefaultReproduction]
80 | elitism = 2
81 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/example/xor:
--------------------------------------------------------------------------------
1 | #--- parameters for the XOR-2 experiment ---#
2 |
3 | [NEAT]
4 | fitness_criterion = max
5 | fitness_threshold = 3.9999
6 | pop_size = 135
7 | reset_on_extinction = False
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = sigmoid
12 | activation_mutate_rate = 0.0
13 | activation_options = sigmoid
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 0.5
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.5
35 | conn_delete_prob = 0.5
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 |
41 | feed_forward = True
42 | initial_connection = fs_neat
43 |
44 | # node add/remove rates
45 | node_add_prob = 0.2
46 | node_delete_prob = 0.2
47 |
48 | # network parameters
49 | num_hidden = 0
50 | num_inputs = 2
51 | num_outputs = 1
52 |
53 | # node response options
54 | response_init_mean = 1.0
55 | response_init_stdev = 0.0
56 | response_max_value = 30.0
57 | response_min_value = -30.0
58 | response_mutate_power = 0.0
59 | response_mutate_rate = 0.0
60 | response_replace_rate = 0.0
61 |
62 | # connection weight options
63 | weight_init_mean = 0.0
64 | weight_init_stdev = 1.0
65 | weight_max_value = 30
66 | weight_min_value = -30
67 | weight_mutate_power = 0.5
68 | weight_mutate_rate = 0.8
69 | weight_replace_rate = 0.1
70 |
71 | [DefaultSpeciesSet]
72 | compatibility_threshold = 3.0
73 |
74 | [DefaultStagnation]
75 | species_fitness_func = max
76 | max_stagnation = 20
77 | species_elitism = 2
78 |
79 | [DefaultReproduction]
80 | elitism = 2
81 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/task/cart-pole-v0.bi:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym #
2 |
3 | [NEAT]
4 | pop_size = 3
5 | fitness_criterion = max
6 | fitness_threshold = 0.999
7 | reset_on_extinction = 0
8 |
9 | [GlobalGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 |
42 | # node add/remove rates
43 | node_add_prob = 0.9
44 | node_delete_prob = 0.2
45 |
46 | # network parameters
47 | num_hidden = 0
48 | num_inputs = 4
49 | num_outputs = 2
50 | max_node_num = 7
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30.
65 | weight_min_value = -30.
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [StrongSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 4
77 |
78 | [Reproduction]
79 | init_distance = 2
80 | min_distance = 0.1
81 | correlation_rate = -0.5
82 | search_count = 30
83 | cluster_method = kmeans++
--------------------------------------------------------------------------------
/configures/task/cart-pole-v0.fs:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym
2 |
3 | [NEAT]
4 | pop_size = 6
5 | fitness_criterion = max
6 | fitness_threshold = 0.999
7 | reset_on_extinction = 0
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 | initial_connection = fs_neat
42 |
43 | # node add/remove rates
44 | node_add_prob = 0.9
45 | node_delete_prob = 0.2
46 |
47 | # network parameters
48 | num_hidden = 0
49 | num_inputs = 4
50 | num_outputs = 2
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30.
65 | weight_min_value = -30.
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [DefaultSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 4
77 |
78 | [DefaultReproduction]
79 | elitism = 2
80 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/task/cart-pole-v0.gs:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym #
2 |
3 | [NEAT]
4 | pop_size = 3
5 | fitness_criterion = max
6 | fitness_threshold = 0.999
7 | reset_on_extinction = 0
8 |
9 | [GlobalGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 |
42 | # node add/remove rates
43 | node_add_prob = 0.9
44 | node_delete_prob = 0.2
45 |
46 | # network parameters
47 | num_hidden = 0
48 | num_inputs = 4
49 | num_outputs = 2
50 | max_node_num = 7
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30.
65 | weight_min_value = -30.
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [StrongSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 4
77 |
78 | [Reproduction]
79 | init_distance = 2
80 | min_distance = 0.1
81 | correlation_rate = -0.5
82 | search_count = 30
83 | cluster_method = kmeans++
--------------------------------------------------------------------------------
/configures/task/cart-pole-v0.n:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym
2 |
3 | [NEAT]
4 | pop_size = 6
5 | fitness_criterion = max
6 | fitness_threshold = 0.999
7 | reset_on_extinction = 0
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 | initial_connection = unconnected
42 |
43 | # node add/remove rates
44 | node_add_prob = 0.9
45 | node_delete_prob = 0.2
46 |
47 | # network parameters
48 | num_hidden = 0
49 | num_inputs = 4
50 | num_outputs = 2
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30.
65 | weight_min_value = -30.
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [DefaultSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 4
77 |
78 | [DefaultReproduction]
79 | elitism = 2
80 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/task/logic.bi:
--------------------------------------------------------------------------------
1 | #--- parameters for the XOR-2 experiment ---#
2 |
3 | [NEAT]
4 | fitness_criterion = max
5 | fitness_threshold = 3.999
6 | pop_size = 12
7 | reset_on_extinction = False
8 |
9 | [GlobalGenome]
10 | # node activation options
11 | activation_default = sigmoid
12 | activation_mutate_rate = 0.0
13 | activation_options = sigmoid
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 0.5
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.5
35 | conn_delete_prob = 0.5
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 |
42 | # node add/remove rates
43 | node_add_prob = 0.2
44 | node_delete_prob = 0.2
45 |
46 | # network parameters
47 | num_hidden = 1
48 | num_inputs = 2
49 | num_outputs = 1
50 | max_node_num = 5
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30
65 | weight_min_value = -30
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [StrongSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 2
77 |
78 | [Reproduction]
79 | init_distance = 2.5
80 | min_distance = 0.1
81 | correlation_rate = -0.5
82 | search_count = 30
83 | cluster_method = kmeans++
--------------------------------------------------------------------------------
/configures/task/logic.fs:
--------------------------------------------------------------------------------
1 | #--- parameters for the XOR-2 experiment ---#
2 |
3 | [NEAT]
4 | fitness_criterion = max
5 | fitness_threshold = 3.999
6 | pop_size = 132
7 | reset_on_extinction = False
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = sigmoid
12 | activation_mutate_rate = 0.0
13 | activation_options = sigmoid
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 0.5
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.5
35 | conn_delete_prob = 0.5
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 | initial_connection = fs_neat
42 |
43 | # node add/remove rates
44 | node_add_prob = 0.2
45 | node_delete_prob = 0.2
46 |
47 | # network parameters
48 | num_hidden = 0
49 | num_inputs = 2
50 | num_outputs = 1
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30
65 | weight_min_value = -30
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [DefaultSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 2
77 |
78 | [DefaultReproduction]
79 | elitism = 2
80 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/task/logic.gs:
--------------------------------------------------------------------------------
1 | #--- parameters for the XOR-2 experiment ---#
2 |
3 | [NEAT]
4 | fitness_criterion = max
5 | fitness_threshold = 3.999
6 | pop_size = 12
7 | reset_on_extinction = False
8 |
9 | [GlobalGenome]
10 | # node activation options
11 | activation_default = sigmoid
12 | activation_mutate_rate = 0.0
13 | activation_options = sigmoid
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 0.5
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.5
35 | conn_delete_prob = 0.5
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 |
42 | # node add/remove rates
43 | node_add_prob = 0.2
44 | node_delete_prob = 0.2
45 |
46 | # network parameters
47 | num_hidden = 1
48 | num_inputs = 2
49 | num_outputs = 1
50 | max_node_num = 6
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30
65 | weight_min_value = -30
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [StrongSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 2
77 |
78 | [Reproduction]
79 | init_distance = 2.5
80 | min_distance = 0.1
81 | correlation_rate = -0.5
82 | search_count = 30
83 | cluster_method = kmeans++
--------------------------------------------------------------------------------
/configures/task/logic.n:
--------------------------------------------------------------------------------
1 | #--- parameters for the XOR-2 experiment ---#
2 |
3 | [NEAT]
4 | fitness_criterion = max
5 | fitness_threshold = 3.999
6 | pop_size = 132
7 | reset_on_extinction = False
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = sigmoid
12 | activation_mutate_rate = 0.0
13 | activation_options = sigmoid
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 0.5
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.5
35 | conn_delete_prob = 0.5
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 | feed_forward = True
41 | initial_connection = unconnected
42 |
43 | # node add/remove rates
44 | node_add_prob = 0.2
45 | node_delete_prob = 0.2
46 |
47 | # network parameters
48 | num_hidden = 0
49 | num_inputs = 2
50 | num_outputs = 1
51 |
52 | # node response options
53 | response_init_mean = 1.0
54 | response_init_stdev = 0.0
55 | response_max_value = 30.0
56 | response_min_value = -30.0
57 | response_mutate_power = 0.0
58 | response_mutate_rate = 0.0
59 | response_replace_rate = 0.0
60 |
61 | # connection weight options
62 | weight_init_mean = 0.0
63 | weight_init_stdev = 1.0
64 | weight_max_value = 30
65 | weight_min_value = -30
66 | weight_mutate_power = 0.5
67 | weight_mutate_rate = 0.8
68 | weight_replace_rate = 0.1
69 |
70 | [DefaultSpeciesSet]
71 | compatibility_threshold = 3.0
72 |
73 | [DefaultStagnation]
74 | species_fitness_func = max
75 | max_stagnation = 20
76 | species_elitism = 2
77 |
78 | [DefaultReproduction]
79 | elitism = 2
80 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/task/lunar-lander-v2.bi:
--------------------------------------------------------------------------------
1 | [NEAT]
2 | pop_size = 5
3 | fitness_criterion = max
4 | fitness_threshold = -0.2
5 | reset_on_extinction = 0
6 |
7 | [GlobalGenome]
8 | # node activation options
9 | activation_default = relu
10 | activation_mutate_rate = 0.0
11 | activation_options = relu
12 |
13 | # node aggregation options
14 | aggregation_default = sum
15 | aggregation_mutate_rate = 0.0
16 | aggregation_options = sum
17 |
18 | # node bias options
19 | bias_init_mean = 0.0
20 | bias_init_stdev = 1.0
21 | bias_max_value = 30.0
22 | bias_min_value = -30.0
23 | bias_mutate_power = 0.5
24 | bias_mutate_rate = 0.7
25 | bias_replace_rate = 0.1
26 |
27 | # genome compatibility options
28 | compatibility_disjoint_coefficient = 1.0
29 | compatibility_weight_coefficient = 1.0
30 |
31 | # connection add/remove rates
32 | conn_add_prob = 0.9
33 | conn_delete_prob = 0.2
34 |
35 | # connection enable options
36 | enabled_default = True
37 | enabled_mutate_rate = 0.01
38 | feed_forward = True
39 |
40 | # node add/remove rates
41 | node_add_prob = 0.9
42 | node_delete_prob = 0.2
43 |
44 | # network parameters
45 | num_hidden = 0
46 | num_inputs = 8
47 | num_outputs = 1
48 | max_node_num = 15
49 |
50 | # node response options
51 | response_init_mean = 1.0
52 | response_init_stdev = 3.0
53 | response_max_value = 30.0
54 | response_min_value = -30.0
55 | response_mutate_power = 0.0
56 | response_mutate_rate = 0.0
57 | response_replace_rate = 0.0
58 |
59 | # connection weight options
60 | weight_init_mean = 0.0
61 | weight_init_stdev = 3.0
62 | weight_max_value = 30.
63 | weight_min_value = -30.
64 | weight_mutate_power = 0.5
65 | weight_mutate_rate = 0.8
66 | weight_replace_rate = 0.1
67 |
68 | [StrongSpeciesSet]
69 | compatibility_threshold = 3.0
70 |
71 | [DefaultStagnation]
72 | species_fitness_func = max
73 | max_stagnation = 20
74 | species_elitism = 4
75 |
76 | [Reproduction]
77 | init_distance = 7
78 | min_distance = 0.1
79 | correlation_rate = -0.5
80 | search_count = 30
81 | cluster_method = kmeans++
--------------------------------------------------------------------------------
/configures/task/lunar-lander-v2.fs:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym
2 |
3 | [NEAT]
4 | pop_size = 20
5 | fitness_criterion = max
6 | fitness_threshold = -0.2
7 | reset_on_extinction = 0
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 |
41 | feed_forward = True
42 | initial_connection = fs_neat
43 |
44 | # node add/remove rates
45 | node_add_prob = 0.9
46 | node_delete_prob = 0.2
47 |
48 | # network parameters
49 | num_hidden = 0
50 | num_inputs = 8
51 | num_outputs = 1
52 |
53 | # node response options
54 | response_init_mean = 1.0
55 | response_init_stdev = 3.0
56 | response_max_value = 30.0
57 | response_min_value = -30.0
58 | response_mutate_power = 0.0
59 | response_mutate_rate = 0.0
60 | response_replace_rate = 0.0
61 |
62 | # connection weight options
63 | weight_init_mean = 0.0
64 | weight_init_stdev = 3.0
65 | weight_max_value = 30.
66 | weight_min_value = -30.
67 | weight_mutate_power = 0.5
68 | weight_mutate_rate = 0.8
69 | weight_replace_rate = 0.1
70 |
71 | [DefaultSpeciesSet]
72 | compatibility_threshold = 3.0
73 |
74 | [DefaultStagnation]
75 | species_fitness_func = max
76 | max_stagnation = 20
77 | species_elitism = 4
78 |
79 | [DefaultReproduction]
80 | elitism = 2
81 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/configures/task/lunar-lander-v2.gs:
--------------------------------------------------------------------------------
1 | [NEAT]
2 | pop_size = 5
3 | fitness_criterion = max
4 | fitness_threshold = -0.2
5 | reset_on_extinction = 0
6 |
7 | [GlobalGenome]
8 | # node activation options
9 | activation_default = relu
10 | activation_mutate_rate = 0.0
11 | activation_options = relu
12 |
13 | # node aggregation options
14 | aggregation_default = sum
15 | aggregation_mutate_rate = 0.0
16 | aggregation_options = sum
17 |
18 | # node bias options
19 | bias_init_mean = 0.0
20 | bias_init_stdev = 1.0
21 | bias_max_value = 30.0
22 | bias_min_value = -30.0
23 | bias_mutate_power = 0.5
24 | bias_mutate_rate = 0.7
25 | bias_replace_rate = 0.1
26 |
27 | # genome compatibility options
28 | compatibility_disjoint_coefficient = 1.0
29 | compatibility_weight_coefficient = 1.0
30 |
31 | # connection add/remove rates
32 | conn_add_prob = 0.9
33 | conn_delete_prob = 0.2
34 |
35 | # connection enable options
36 | enabled_default = True
37 | enabled_mutate_rate = 0.01
38 | feed_forward = True
39 |
40 | # node add/remove rates
41 | node_add_prob = 0.9
42 | node_delete_prob = 0.2
43 |
44 | # network parameters
45 | num_hidden = 0
46 | num_inputs = 8
47 | num_outputs = 1
48 | max_node_num = 15
49 |
50 | # node response options
51 | response_init_mean = 1.0
52 | response_init_stdev = 3.0
53 | response_max_value = 30.0
54 | response_min_value = -30.0
55 | response_mutate_power = 0.0
56 | response_mutate_rate = 0.0
57 | response_replace_rate = 0.0
58 |
59 | # connection weight options
60 | weight_init_mean = 0.0
61 | weight_init_stdev = 3.0
62 | weight_max_value = 30.
63 | weight_min_value = -30.
64 | weight_mutate_power = 0.5
65 | weight_mutate_rate = 0.8
66 | weight_replace_rate = 0.1
67 |
68 | [StrongSpeciesSet]
69 | compatibility_threshold = 3.0
70 |
71 | [DefaultStagnation]
72 | species_fitness_func = max
73 | max_stagnation = 20
74 | species_elitism = 4
75 |
76 | [Reproduction]
77 | init_distance = 7
78 | min_distance = 0.1
79 | correlation_rate = -0.5
80 | search_count = 30
81 | cluster_method = kmeans++
--------------------------------------------------------------------------------
/configures/task/lunar-lander-v2.n:
--------------------------------------------------------------------------------
1 | # neat-python configuration for the LunarLander-v2 environment on OpenAI Gym
2 |
3 | [NEAT]
4 | pop_size = 20
5 | fitness_criterion = max
6 | fitness_threshold = -0.2
7 | reset_on_extinction = 0
8 |
9 | [DefaultGenome]
10 | # node activation options
11 | activation_default = relu
12 | activation_mutate_rate = 0.0
13 | activation_options = relu
14 |
15 | # node aggregation options
16 | aggregation_default = sum
17 | aggregation_mutate_rate = 0.0
18 | aggregation_options = sum
19 |
20 | # node bias options
21 | bias_init_mean = 0.0
22 | bias_init_stdev = 1.0
23 | bias_max_value = 30.0
24 | bias_min_value = -30.0
25 | bias_mutate_power = 0.5
26 | bias_mutate_rate = 0.7
27 | bias_replace_rate = 0.1
28 |
29 | # genome compatibility options
30 | compatibility_disjoint_coefficient = 1.0
31 | compatibility_weight_coefficient = 1.0
32 |
33 | # connection add/remove rates
34 | conn_add_prob = 0.9
35 | conn_delete_prob = 0.2
36 |
37 | # connection enable options
38 | enabled_default = True
39 | enabled_mutate_rate = 0.01
40 |
41 | feed_forward = True
42 | initial_connection = unconnected
43 |
44 | # node add/remove rates
45 | node_add_prob = 0.9
46 | node_delete_prob = 0.2
47 |
48 | # network parameters
49 | num_hidden = 0
50 | num_inputs = 8
51 | num_outputs = 1
52 |
53 | # node response options
54 | response_init_mean = 1.0
55 | response_init_stdev = 3.0
56 | response_max_value = 30.0
57 | response_min_value = -30.0
58 | response_mutate_power = 0.0
59 | response_mutate_rate = 0.0
60 | response_replace_rate = 0.0
61 |
62 | # connection weight options
63 | weight_init_mean = 0.0
64 | weight_init_stdev = 3.0
65 | weight_max_value = 30.
66 | weight_min_value = -30.
67 | weight_mutate_power = 0.5
68 | weight_mutate_rate = 0.8
69 | weight_replace_rate = 0.1
70 |
71 | [DefaultSpeciesSet]
72 | compatibility_threshold = 3.0
73 |
74 | [DefaultStagnation]
75 | species_fitness_func = max
76 | max_stagnation = 20
77 | species_elitism = 4
78 |
79 | [DefaultReproduction]
80 | elitism = 2
81 | survival_threshold = 0.2
--------------------------------------------------------------------------------
/evolution/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/evolution/__init__.py
--------------------------------------------------------------------------------
/evolution/bean/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/evolution/bean/__init__.py
--------------------------------------------------------------------------------
/evolution/bean/attacker.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 | from enum import Enum
4 |
5 | import numpy
6 |
7 |
8 | class AttackType(Enum):
9 | Normal = 0
10 | Reverse = 1
11 | Gaussian = 2
12 | Zerout = 3
13 |
14 |
15 | # noinspection PyPep8Naming
16 | class CartPole_v0_Attacker(object):
17 |
18 | def __init__(self, current_state=False, beta=0.25, epsilon=0.3, attack_type=AttackType.Normal,
19 | normal_max=0.1, normal_min=0.05, gaussian_peak=0.2):
20 | """
21 | initialize attacker for the game name CartPole v0.
22 |
23 | :param current_state: current state of attack, is attack or not.
24 | :param beta: rate for the situation of the observed networks.
25 | :param epsilon: rate of confrontation sample.
26 | :param attack_type: the operation type of attack.
27 | :param normal_max: the maximum value in the normal attack type.
28 | :param normal_min: the minimum value in the normal attack type.
29 | :param gaussian_peak: the gaussian peak in the gaussian attack type.
30 | """
31 | self.current_state = current_state
32 | self.beta = beta
33 | self.epsilon = epsilon
34 | self.attack_type = attack_type
35 |
36 | # initialize time step (for updating every UPDATE_EVERY steps)
37 | self.t_step = 0
38 | # discount factor for DQN agent
39 | self.gamma = 0.99
40 | # reverse flag 0% == 100% (dilute noise level, become a half)
41 | self.reverse_flag = True
42 |
43 | self.normal_max = normal_max
44 | self.normal_min = normal_min
45 | self.gaussian_max = gaussian_peak
46 |
47 | def attack(self, original_observation, need_attack=True):
48 | """
49 | attack by requested attacker.
50 |
51 | :param original_observation: original observation of Reinforcement Learning.
52 | :param need_attack: whether the current situation requires attack.
53 |
54 | :return: observation under attack.
55 | """
56 | attack_observation = copy.deepcopy(original_observation)
57 | if need_attack:
58 | size = len(attack_observation)
59 | if self.attack_type == AttackType.Normal:
60 | for index, value in enumerate(numpy.random.normal(self.normal_max, self.normal_min, size)):
61 | if random.randint(0, 1) == 1:
62 | attack_observation[index] += value
63 | else:
64 | attack_observation[index] -= value
65 | elif self.attack_type == AttackType.Reverse:
66 | if self.reverse_flag:
67 | # reverse the ray tracer, but the keep the 2-dim velocity
68 | attack_observation = attack_observation[::-1][: size]
69 | self.reverse_flag = False
70 | else:
71 | self.reverse_flag = True
72 | elif self.attack_type == AttackType.Gaussian:
73 | attack_observation = numpy.random.normal(numpy.mean(attack_observation[: size]),
74 | self.gaussian_max, size)
75 | elif self.attack_type == AttackType.Zerout:
76 | attack_observation[0: size - 1] = 0
77 |
78 | return numpy.array(attack_observation)
79 |
--------------------------------------------------------------------------------
/evolution/bean/genome.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 |
4 | import math
5 | from neat.activations import ActivationFunctionSet
6 | from neat.aggregations import AggregationFunctionSet
7 | from neat.config import ConfigParameter
8 | from neat.genes import DefaultNodeGene, DefaultConnectionGene
9 | from neat.genome import DefaultGenome
10 | from neat.genome import DefaultGenomeConfig
11 | from neat.six_util import iteritems, itervalues
12 |
13 |
14 | def create_center_new(feature_matrix_1, feature_matrix_2, config, key):
15 | """
16 | create a new genome at the midpoint of two genomes.
17 |
18 | :param feature_matrix_1: feature_matrix in one genome.
19 | :param feature_matrix_2: feature_matrix in another genome.
20 | :param config: genome config.
21 | :param key: key of the new genome.
22 |
23 | :return: center genome.
24 | """
25 | new_feature_matrix = []
26 | for row_1, row_2 in zip(feature_matrix_1, feature_matrix_2):
27 | new_row = []
28 | for value_1, value_2 in zip(row_1, row_2):
29 | new_row.append((value_1 + value_2) / 2.0)
30 |
31 | new_feature_matrix.append(new_row)
32 |
33 | new_genome = GlobalGenome(key)
34 | new_genome.feature_matrix_new(new_feature_matrix, config)
35 |
36 | return new_genome
37 |
38 |
39 | def create_golden_section_new(feature_matrix_1, feature_matrix_2, config, key):
40 | """
41 | create a new genome at the golden-section of two genomes, near genome 1.
42 |
43 | :param feature_matrix_1: feature_matrix in one genome.
44 | :param feature_matrix_2: feature_matrix in another genome.
45 | :param config: genome config.
46 | :param key: key of the new genome.
47 |
48 | :return: center genome.
49 | """
50 |
51 | new_feature_matrix = []
52 | for row_1, row_2 in zip(feature_matrix_1, feature_matrix_2):
53 | new_row = []
54 | for value_1, value_2 in zip(row_1, row_2):
55 | new_row.append(value_1 + (value_2 - value_1) * ((3 - math.sqrt(5)) / 2))
56 |
57 | new_feature_matrix.append(new_row)
58 |
59 | new_genome = GlobalGenome(key)
60 | new_genome.feature_matrix_new(new_feature_matrix, config)
61 |
62 | return new_genome
63 |
64 |
65 | def create_near_new(genome, config, key):
66 | """
67 | create a new genome near the old genome.
68 |
69 | :param genome: original genome.
70 | :param config: genome config.
71 | :param key: key of new genome.
72 |
73 | :return: the new genome.
74 | """
75 | new_genome = copy.deepcopy(genome)
76 | new_genome.key = key
77 | new_genome.fitness = None
78 | new_genome.mutate(config)
79 | new_genome.set_feature_matrix(config)
80 | return new_genome
81 | # while True:
82 | # new_genome = copy.deepcopy(genome)
83 | # new_genome.key = key
84 | # new_genome.fitness = None
85 | # new_genome.mutate(config)
86 | # new_genome.set_feature_matrix(config)
87 | # if create_check(genome):
88 | # return new_genome
89 |
90 |
91 | def create_check(genome):
92 | """
93 | check whether the genome can be constructed into a network.
94 |
95 | :param genome: the information of genome.
96 | """
97 | for connection_gene in itervalues(genome.connections):
98 | if connection_gene.enabled:
99 | for node_id in connection_gene.key:
100 | if node_id >= 0 and node_id not in list(genome.nodes.keys()):
101 | return False
102 |
103 | return True
104 |
105 |
106 | def distance_between_two_matrices(matrix_1, matrix_2):
107 | """
108 | obtain the distance between two matrices.
109 |
110 | :param matrix_1: one matrix.
111 | :param matrix_2: another matrix.
112 |
113 | :return: the distance.
114 | """
115 | distance = 0
116 | for row_1, row_2 in zip(matrix_1, matrix_2):
117 | for value_1, value_2 in zip(row_1, row_2):
118 | distance += math.pow(value_1 - value_2, 2)
119 |
120 | return math.sqrt(distance)
121 |
122 |
123 | # noinspection PyMissingConstructor
124 | class GlobalGenomeConfig(DefaultGenomeConfig):
125 |
126 | def __init__(self, params):
127 | """
128 | initialize config by params, add ConfigParameter('max_num', int)
129 | """
130 | # create full set of available activation functions.
131 | self.num_inputs = 0
132 | self.num_outputs = 0
133 | self.single_structural_mutation = None
134 | self.activation_defs = ActivationFunctionSet()
135 | # ditto for aggregation functions - name difference for backward compatibility
136 | self.aggregation_function_defs = AggregationFunctionSet()
137 | self.aggregation_defs = self.aggregation_function_defs
138 |
139 | self._params = [ConfigParameter('num_inputs', int),
140 | ConfigParameter('num_outputs', int),
141 | ConfigParameter('num_hidden', int),
142 | ConfigParameter('max_node_num', int),
143 | ConfigParameter('feed_forward', bool),
144 | ConfigParameter('compatibility_disjoint_coefficient', float),
145 | ConfigParameter('compatibility_weight_coefficient', float),
146 | ConfigParameter('conn_add_prob', float),
147 | ConfigParameter('conn_delete_prob', float),
148 | ConfigParameter('node_add_prob', float),
149 | ConfigParameter('node_delete_prob', float),
150 | ConfigParameter('single_structural_mutation', bool, 'false'),
151 | ConfigParameter('structural_mutation_surer', str, 'default'),
152 | ConfigParameter('initial_connection', str, 'unconnected')]
153 |
154 | # Gather configuration data from the gene classes.
155 | self.node_gene_type = params['node_gene_type']
156 | self._params += self.node_gene_type.get_config_params()
157 | self.connection_gene_type = params['connection_gene_type']
158 | self._params += self.connection_gene_type.get_config_params()
159 |
160 | # Use the configuration data to interpret the supplied parameters.
161 | for p in self._params:
162 | setattr(self, p.name, p.interpret(params))
163 |
164 | # By convention, input pins have negative keys, and the output
165 | # pins have keys 0,1,...
166 | self.input_keys = [-i - 1 for i in range(self.num_inputs)]
167 | self.output_keys = [i for i in range(self.num_outputs)]
168 |
169 | self.connection_fraction = None
170 |
171 | # Verify that initial connection type is valid.
172 | # pylint: disable=access-member-before-definition
173 | if 'partial' in self.initial_connection:
174 | c, p = self.initial_connection.split()
175 | self.initial_connection = c
176 | self.connection_fraction = float(p)
177 | if not (0 <= self.connection_fraction <= 1):
178 | raise RuntimeError(
179 | "'partial' connection value must be between 0.0 and 1.0, inclusive.")
180 |
181 | assert self.initial_connection in DefaultGenomeConfig.allowed_connectivity
182 |
183 | # Verify structural_mutation_surer is valid.
184 | if self.structural_mutation_surer.lower() in ['1', 'yes', 'true', 'on']:
185 | self.structural_mutation_surer = 'true'
186 | elif self.structural_mutation_surer.lower() in ['0', 'no', 'false', 'off']:
187 | self.structural_mutation_surer = 'false'
188 | elif self.structural_mutation_surer.lower() == 'default':
189 | self.structural_mutation_surer = 'default'
190 | else:
191 | error_string = "Invalid structural_mutation_surer {!r}".format(
192 | self.structural_mutation_surer)
193 | raise RuntimeError(error_string)
194 |
195 | self.node_indexer = None
196 |
197 |
198 | class GlobalGenome(DefaultGenome):
199 |
200 | @classmethod
201 | def parse_config(cls, param_dict):
202 | super().parse_config(param_dict)
203 | return GlobalGenomeConfig(param_dict)
204 |
205 | def __init__(self, key):
206 | super().__init__(key)
207 | self.feature_matrix = None
208 |
209 | def configure_new(self, config):
210 | """
211 | create new genome by configure, and then create the feature matrix.
212 |
213 | :param config: genome config.
214 | """
215 | # create node genes for the output pins.
216 | for node_key in config.output_keys:
217 | self.nodes[node_key] = self.create_node(config, node_key)
218 |
219 | # add hidden nodes if requested.
220 | if config.num_hidden > 0:
221 | for node_key in range(len(config.output_keys), config.num_hidden + len(config.output_keys)):
222 | node = self.create_node(config, node_key)
223 | self.nodes[node_key] = node
224 |
225 | # add connections with global random.
226 | for input_id, output_id in self.compute_connections(config):
227 | connection = self.create_connection(config, input_id, output_id)
228 | self.connections[connection.key] = connection
229 |
230 | # add feature matrix
231 | self.set_feature_matrix(config)
232 |
233 | @staticmethod
234 | def compute_connections(config):
235 | start = len(config.output_keys)
236 | stop = config.num_hidden + len(config.output_keys)
237 | hidden_keys = random.sample([index for index in range(start, stop)], random.randint(0, config.num_hidden))
238 |
239 | connections = []
240 |
241 | if len(hidden_keys) == 0:
242 | for input_id in config.input_keys:
243 | for output_id in config.output_keys:
244 | if random.randint(0, 1):
245 | connections.append((input_id, output_id))
246 |
247 | if len(connections) == 0:
248 | input_id = random.sample(config.input_keys, 1)
249 | for output_id in config.output_keys:
250 | connections.append((input_id[0], output_id))
251 | else:
252 | chosen_keys = set()
253 |
254 | # from input and hidden nodes to hidden nodes.
255 | for index in range(len(hidden_keys)):
256 | before_keys = config.input_keys + hidden_keys[:index]
257 | for in_degree in random.sample(before_keys, random.randint(1, len(before_keys))):
258 | connections.append((in_degree, hidden_keys[index]))
259 | chosen_keys.add(in_degree)
260 | chosen_keys.add(hidden_keys[index])
261 |
262 | # from input and hidden nodes to output nodes.
263 | chosen_keys = list(chosen_keys)
264 | for output_id in config.output_keys:
265 | for in_degree in random.sample(chosen_keys, random.randint(1, len(chosen_keys))):
266 | connections.append((in_degree, output_id))
267 |
268 | connections = sorted(connections)
269 |
270 | return connections
271 |
272 | def feature_matrix_new(self, feature_matrix, config):
273 | """
274 | create new genome by feature matrix.
275 |
276 | :param feature_matrix: obtained feature matrix.
277 | :param config: genome config
278 | """
279 | self.feature_matrix = feature_matrix
280 |
281 | # create node genes for the output pins.
282 | for node_key in config.output_keys:
283 | self.nodes[node_key] = self.create_node(config, node_key)
284 |
285 | # add hidden nodes by feature matrix if requested.
286 | for node_key in range(config.num_hidden):
287 | node = DefaultNodeGene(node_key)
288 | node.bias = feature_matrix[node_key + config.num_inputs][0]
289 | node.response = config.response_init_mean
290 | node.activation = config.activation_default
291 | node.aggregation = config.aggregation_default
292 | self.nodes[node_key] = node
293 |
294 | # set connections by feature matrix.
295 | for in_index in range(len(feature_matrix)):
296 | for out_index in range(1, len(feature_matrix[in_index])):
297 | if feature_matrix[in_index][out_index] > 0:
298 | connection = DefaultConnectionGene((in_index, out_index - 1))
299 | connection.weight = feature_matrix[in_index][out_index]
300 | connection.enabled = config.enabled_default
301 | self.connections[connection.key] = connection
302 |
303 | def set_feature_matrix(self, config):
304 | """
305 | set the feature matrix for this genome.
306 |
307 | :param config: genome config.
308 | """
309 | # bia + weight
310 | self.feature_matrix = [[0 for _ in range(config.max_node_num + 1)] for _ in range(config.max_node_num)]
311 |
312 | # position mapping of feature matrix
313 | mapping = {}
314 | for index in range(config.num_inputs):
315 | mapping[index - config.num_inputs] = index
316 |
317 | # add node bias
318 | index = config.num_inputs
319 | for node_key, node_gene in iteritems(self.nodes):
320 | self.feature_matrix[index][0] = node_gene.bias
321 | mapping[node_key] = index
322 | index += 1
323 |
324 | # add connect weight
325 | for connect_gene in itervalues(self.connections):
326 | if mapping.get(connect_gene.key[0]) is not None and mapping.get(connect_gene.key[1]) is not None:
327 | row = mapping.get(connect_gene.key[0])
328 | col = mapping.get(connect_gene.key[1]) + 1
329 | self.feature_matrix[row][col] = connect_gene.weight
330 |
331 | def distance(self, other, config):
332 | """
333 | obtain distance by two feature matrix.
334 |
335 | :param other: another genome.
336 | :param config: genome config.
337 |
338 | :return: distance of two genomes.
339 | """
340 | if other.feature_matrix is None:
341 | other.set_feature_matrix(config)
342 |
343 | return distance_between_two_matrices(self.feature_matrix, other.feature_matrix)
344 |
345 | def mutate_add_node(self, config):
346 | """
347 | mutate add node when current hidden node (when node number less than the node range).
348 |
349 | :param config:genome config.
350 | """
351 | if config.max_node_num - config.num_inputs - config.num_outputs > len(self.nodes):
352 | super().mutate_add_node(config)
353 |
354 | def __str__(self):
355 | s = super().__str__()
356 | s += "\nFeature Matrix:"
357 | for row in self.feature_matrix:
358 | s += "\n\t" + str(row)
359 |
360 | return s
361 |
--------------------------------------------------------------------------------
/evolution/bean/species_set.py:
--------------------------------------------------------------------------------
1 | from neat.species import DefaultSpeciesSet
2 | from neat.math_util import mean, stdev
3 | from neat.species import Species, GenomeDistanceCache
4 | from six import iteritems, itervalues, iterkeys
5 |
6 |
7 | class StrongSpeciesSet(DefaultSpeciesSet):
8 |
9 | def speciate(self, config, population, generation):
10 | """
11 | Place genomes into species by genetic similarity.
12 |
13 | Note that this method assumes the current representatives of the species are from the old
14 | generation, and that after speciation has been performed, the old representatives should be
15 | dropped and replaced with representatives from the new generation. If you violate this
16 | assumption, you should make sure other necessary parts of the code are updated to reflect
17 | the new behavior.
18 | """
19 | assert isinstance(population, dict)
20 |
21 | compatibility_threshold = self.species_set_config.compatibility_threshold
22 |
23 | # Find the best representatives for each existing species.
24 | unspeciated = set(iterkeys(population))
25 | distances = GenomeDistanceCache(config.genome_config)
26 | new_representatives = {}
27 | new_members = {}
28 | for sid, s in iteritems(self.species):
29 | candidates = []
30 | for gid in unspeciated:
31 | g = population[gid]
32 | d = distances(s.representative, g)
33 | candidates.append((d, g))
34 |
35 | # The new representative is the genome closest to the current representative.
36 | if len(candidates) > 0:
37 | ignored_rdist, new_rep = min(candidates, key=lambda x: x[0])
38 | new_rid = new_rep.key
39 | new_representatives[sid] = new_rid
40 | new_members[sid] = [new_rid]
41 | unspeciated.remove(new_rid)
42 |
43 | # Partition population into species based on genetic similarity.
44 | while unspeciated:
45 | gid = unspeciated.pop()
46 | g = population[gid]
47 |
48 | # Find the species with the most similar representative.
49 | candidates = []
50 | for sid, rid in iteritems(new_representatives):
51 | rep = population[rid]
52 | d = distances(rep, g)
53 | if d < compatibility_threshold:
54 | candidates.append((d, sid))
55 |
56 | if len(candidates) > 0:
57 | ignored_sdist, sid = min(candidates, key=lambda x: x[0])
58 | new_members[sid].append(gid)
59 | else:
60 | # No species is similar enough, create a new species, using
61 | # this genome as its representative.
62 | sid = next(self.indexer)
63 | new_representatives[sid] = gid
64 | new_members[sid] = [gid]
65 |
66 | # Update species collection based on new speciation.
67 | self.genome_to_species = {}
68 | for sid, rid in iteritems(new_representatives):
69 | s = self.species.get(sid)
70 | if s is None:
71 | s = Species(sid, generation)
72 | self.species[sid] = s
73 |
74 | members = new_members[sid]
75 | for gid in members:
76 | self.genome_to_species[gid] = sid
77 |
78 | member_dict = dict((gid, population[gid]) for gid in members)
79 | s.update(population[rid], member_dict)
80 |
81 | gdmean = mean(itervalues(distances.distances))
82 | gdstdev = stdev(itervalues(distances.distances))
83 | self.reporters.info(
84 | 'Mean genetic distance {0:.3f}, standard deviation {1:.3f}'.format(gdmean, gdstdev))
85 |
--------------------------------------------------------------------------------
/evolution/evolutor.py:
--------------------------------------------------------------------------------
1 | import random
2 | from enum import Enum
3 | import math
4 | import numpy
5 | import logging
6 |
7 | from neat.nn import feed_forward, recurrent
8 |
9 |
10 | class LearnType(Enum):
11 | Supervised = 1
12 | Reinforced = 2
13 |
14 |
15 | class NetType(Enum):
16 | FeedForward = 1
17 | Recurrent = 2
18 |
19 |
20 | class EvalType(Enum):
21 | EulerDistance = 1
22 | HammingDistance = 2
23 | ManhattanDistance = 3
24 |
25 |
26 | class TypeCorrect(Enum):
27 | List = 1
28 | Value = 2
29 |
30 |
31 | class FitDevice(object):
32 |
33 | def __init__(self, method, network_type=NetType.FeedForward):
34 | """
35 | Initialize the evolution calculation and type of network.
36 |
37 | :param method: evolution process, see /evolution/methods/
38 | :param network_type: type of network created by genome.
39 | """
40 | logging.info("Initialize the evolution process calculation.")
41 | self.method = method
42 | self.network_type = network_type
43 |
44 | self.learn_type = None
45 |
46 | self.dataset = None
47 |
48 | self.environment = None
49 | self.episode_steps = None
50 | self.episode_generation = None
51 | self.input_type = None
52 | self.output_type = None
53 | self.attacker = None
54 | self.noise_level = None
55 |
56 | def set_environment(self, environment, episode_steps, episode_generation,
57 | input_type, output_type,
58 | attacker=None, noise_level=None):
59 | """
60 | Set the environment of Reinforcement Learning in gym library.
61 |
62 | :param environment: environment of Reinforcement Learning in gym library.
63 | :param episode_steps: maximum episode steps.
64 | :param episode_generation: evaluate by the minimum of episode rewards.
65 | :param input_type: type of input, TYPE_CORRECT.List or TYPE_CORRECT.Value.
66 | :param output_type: type of output, TYPE_CORRECT.List or TYPE_CORRECT.Value.
67 | :param attacker: noise attacker for observation.
68 | :param noise_level: noise level of attacker.
69 | """
70 | logging.info("Obtain the environment.")
71 | if self.dataset is None:
72 | self.environment = environment
73 | self.episode_steps = episode_steps
74 | self.episode_generation = episode_generation
75 | self.learn_type = LearnType.Reinforced
76 | self.input_type = input_type
77 | self.output_type = output_type
78 | self.attacker = attacker
79 | self.noise_level = noise_level
80 | elif self.learn_type is None:
81 | logging.warning("Do not enter data repeatedly!")
82 | else:
83 | logging.warning("You have expect data set in Supervised Learning!")
84 |
85 | def set_dataset(self, dataset):
86 | """
87 | Set the dataset of Supervised Learning.
88 |
89 | :param dataset: dataset, including inputs and expected outputs, type is {"i": data, "o": data}.
90 | """
91 | if self.environment is None:
92 | self.dataset = dataset
93 | self.learn_type = LearnType.Supervised
94 | elif self.learn_type is None:
95 | logging.warning("Do not enter data repeatedly!")
96 | else:
97 | logging.warning("You have environment in Reinforcement Learning!")
98 |
99 | def genomes_fitness(self, genomes, config):
100 | """
101 | Calculate the evolution process of genomes.
102 |
103 | :param genomes: genomes of NEAT.
104 | :param config: configure of genome.
105 | """
106 | for genome_id, genome in genomes:
107 | if genome.fitness is None:
108 | self.genome_fitness(genome, config)
109 |
110 | def genome_fitness(self, genome, config):
111 | """
112 | Calculate the evolution process of genome.
113 |
114 | :param genome: genome of NEAT.
115 | :param config: configure of genome.
116 | """
117 | if self.learn_type == LearnType.Supervised:
118 | eval("self._genome_in_supervised")(genome, config)
119 | else:
120 | eval("self._genome_in_reinforced")(genome, config)
121 |
122 | def _genome_in_supervised(self, genome, config):
123 | """
124 | Calculate evolution of genome in Supervised Learning.
125 |
126 | :param genome: one genome in current generation.
127 | :param config: generated configure of network by genome.
128 | """
129 | network = self.generated_network(genome, config)
130 |
131 | obtain_outputs = []
132 | for current_input in self.dataset.get("i"):
133 | obtain_outputs.append(network.activate(current_input))
134 |
135 | genome.fitness = self.method.calculate(learn_type=self.learn_type,
136 | obtain_outputs=obtain_outputs,
137 | expected_outputs=self.dataset.get("o"))
138 |
139 | def _genome_in_reinforced(self, genome, config):
140 | """
141 | Calculate evolution of genome in Reinforcement Learning.
142 |
143 | :param genome: one genomes in current generation.
144 | :param config: generated configures of network by genome.
145 | """
146 | network = self.generated_network(genome, config)
147 |
148 | has_attack = self.attacker is not None and self.noise_level is not None
149 |
150 | episode_recorder = []
151 | # tasks many episodes for the genome in case it is lucky.
152 | for episode in range(self.episode_generation):
153 | accumulative_recorder = 0
154 | attack_count = 0
155 | observation = self.environment.reset()
156 | for step in range(self.episode_steps):
157 | # set attack if has attack.
158 | if has_attack and random.randint(0, 100) < self.noise_level * 100:
159 | if type(observation) is not numpy.ndarray:
160 | observation = numpy.array([observation])
161 | actual_observation = self.attacker.attack(observation)
162 | attack_count += 1
163 | else:
164 | actual_observation = observation
165 |
166 | # check input type
167 | if self.input_type == TypeCorrect.List and type(actual_observation) is not numpy.ndarray:
168 | actual_observation = numpy.array([actual_observation])
169 | if self.input_type == TypeCorrect.Value and type(actual_observation) is numpy.ndarray:
170 | actual_observation = actual_observation[0]
171 |
172 | action_values = network.activate(actual_observation)
173 | action = numpy.argmax(action_values)
174 |
175 | # check output type
176 | if self.output_type == TypeCorrect.List and type(action) is not numpy.ndarray:
177 | action = numpy.array([action])
178 | if self.output_type == TypeCorrect.Value and type(action) is numpy.ndarray:
179 | action = action[0]
180 |
181 | current_observation, reward, done, _ = self.environment.step(action)
182 | accumulative_recorder += reward
183 |
184 | if done:
185 | # if has_attack:
186 | # print("with: ", round(attack_count / float(step + 1), 2), "% attack.")
187 | break
188 | else:
189 | observation = current_observation
190 | episode_recorder.append(accumulative_recorder)
191 |
192 | genome.fitness = self.method.calculate(learn_type=self.learn_type,
193 | episode_recorder=episode_recorder,
194 | episode_steps=self.episode_steps)
195 |
196 | def generated_network(self, genome, config):
197 | """
198 | Obtain a network from genome and its configure.
199 |
200 | :param genome: all genomes in current generation.
201 | :param config: generated configures of network by genome.
202 |
203 | :return: generated network.
204 | """
205 | if self.network_type == NetType.FeedForward:
206 | return feed_forward.FeedForwardNetwork.create(genome, config)
207 | elif self.network_type == NetType.Recurrent:
208 | return recurrent.RecurrentNetwork.create(genome, config)
209 |
210 | return None
211 |
212 |
213 | class FitProcess(object):
214 |
215 | def __init__(self, init_fitness=None, eval_type=EvalType.EulerDistance):
216 | """
217 | Initialize the hyper-parameters.
218 |
219 | :param init_fitness: initialize fitness in Distance.
220 | :param eval_type: distance type for evaluation.
221 | """
222 | self.init_fitness = init_fitness
223 | self.eval_type = eval_type
224 |
225 | def _update(self, previous_fitness, output, expected_output):
226 | """
227 | Update the current fitness.
228 |
229 | :param previous_fitness: previous fitness.
230 | :param output: actual outputs in Supervised Learning.
231 | :param expected_output: expected outputs in Supervised Learning.
232 |
233 | :return: current fitness.
234 | """
235 | record = 0
236 | for value, expected_value in zip(output, expected_output):
237 | if self.eval_type == EvalType.EulerDistance:
238 | record += math.sqrt(math.pow(value - expected_value, 2))
239 | elif self.eval_type == EvalType.HammingDistance:
240 | record += abs(1 - int(value == expected_value))
241 | elif self.eval_type == EvalType.ManhattanDistance:
242 | record += math.pow(value - expected_value, 2)
243 |
244 | return previous_fitness - record
245 |
246 | def calculate(self, learn_type,
247 | obtain_outputs=None, expected_outputs=None,
248 | episode_recorder=None, episode_steps=None):
249 | """
250 | Calculate the current fitness.
251 |
252 | :param learn_type: learning type of tasks, Supervised or Reinforced.
253 | :param obtain_outputs: actual outputs in Supervised Learning.
254 | :param expected_outputs: expected outputs in Supervised Learning.
255 | :param episode_recorder: episode recorder in Reinforcement Learning.
256 | :param episode_steps: episode steps in Reinforcement Learning.
257 |
258 | :return: current fitness.
259 | """
260 | if learn_type == LearnType.Supervised:
261 | if self.init_fitness is None:
262 | raise Exception("No init fitness value!")
263 | current_fitness = self.init_fitness
264 | for output, expected_output in zip(obtain_outputs, expected_outputs):
265 | current_fitness = self._update(current_fitness, output, expected_output)
266 | return current_fitness
267 | else:
268 | return numpy.min(episode_recorder) / float(episode_steps)
269 |
--------------------------------------------------------------------------------
/evolution/methods/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/evolution/methods/__init__.py
--------------------------------------------------------------------------------
/evolution/methods/bi.py:
--------------------------------------------------------------------------------
1 | """
2 | Name: NEAT evolved by Binary Search
3 |
4 | Function(s):
5 | Reproduction by Binary Search and Random Near Search.
6 | """
7 | import copy
8 |
9 | import math
10 | import pandas
11 | from sklearn.cluster import KMeans, SpectralClustering, Birch
12 | from neat.reproduction import DefaultReproduction
13 | from neat.config import DefaultClassConfig, ConfigParameter
14 |
15 | from ReverseEncodingTree.evolution.bean.genome import create_near_new, create_center_new, distance_between_two_matrices
16 |
17 |
18 | class Reproduction(DefaultReproduction):
19 |
20 | def __init__(self, config, reporters, stagnation):
21 | super().__init__(config, reporters, stagnation)
22 | self.best_genome = None
23 | self.genome_config = None
24 | self.genome_type = None
25 | self.global_rate = None
26 |
27 | @classmethod
28 | def parse_config(cls, param_dict):
29 | """
30 | add init and min distance in config.
31 |
32 | :param param_dict: parameter dictionary.
33 |
34 | :return: config.
35 | """
36 | return DefaultClassConfig(param_dict,
37 | [ConfigParameter('init_distance', float, 5),
38 | ConfigParameter('min_distance', float, 0.2),
39 | ConfigParameter('correlation_rate', float, -0.5),
40 | ConfigParameter('search_count', int, 1),
41 | ConfigParameter('cluster_method', str, "kmeans++")])
42 |
43 | def create_new(self, genome_type, genome_config, num_genomes):
44 | """
45 | create new genomes by type, config, and number.
46 |
47 | :param genome_type: genome type.
48 | :param genome_config: genome config.
49 | :param num_genomes: number of new genomes.
50 |
51 | :return: new genomes.
52 | """
53 |
54 | if genome_config.num_hidden + genome_config.num_inputs + genome_config.num_outputs > genome_config.max_node_num:
55 | raise Exception("config: max_node_num must larger than num_inputs + num_outputs + num_hidden")
56 |
57 | self.genome_config = genome_config
58 | self.genome_type = genome_type
59 |
60 | new_genomes = {}
61 | distance_matrix = [[float("inf") for _ in range(num_genomes - 1)] for _ in range(num_genomes - 1)]
62 |
63 | for created_index in range(num_genomes):
64 | key = next(self.genome_indexer)
65 | genome = genome_type(key)
66 | count = 0
67 | while True:
68 | genome.configure_new(genome_config)
69 | min_distance = float("inf")
70 | for check_index, new_genome in new_genomes.items():
71 | current_distance = genome.distance(new_genome, genome_config)
72 | distance_matrix[created_index - 1][check_index - 1] = current_distance
73 | distance_matrix[check_index - 1][created_index - 1] = current_distance
74 | if min_distance > current_distance:
75 | min_distance = current_distance
76 | if min_distance >= self.reproduction_config.init_distance:
77 | break
78 |
79 | count += 1
80 |
81 | if count > self.reproduction_config.search_count:
82 | raise Exception("init_distance is too large for the whole landscape," +
83 | "please reduce init_distance or try again!")
84 |
85 | new_genomes[key] = genome
86 | self.ancestors[key] = tuple()
87 |
88 | return new_genomes
89 |
90 | def reproduce(self, config, species, pop_size, generation):
91 | """
92 | handles creation of genomes, either from scratch or by sexual or asexual reproduction from parents.
93 |
94 | :param config: genome config.
95 | :param species: genome species.
96 | :param pop_size: population size.
97 | :param generation: generation of population.
98 |
99 | :return: new population.
100 | """
101 |
102 | # obtain current genomes and current evolution speed.
103 | genome_clusters, cluster_centers = self.obtain_clusters(species, pop_size)
104 |
105 | # obtain topological genomes and near genomes.
106 | new_genomes = self.obtain_phenotypic_network(pop_size, genome_clusters, cluster_centers)
107 |
108 | # aggregate final population
109 | new_population = {}
110 | for index, genome in enumerate(new_genomes):
111 | genome.key = index
112 | new_population[index] = genome
113 |
114 | return new_population
115 |
116 | def obtain_clusters(self, species, pop_size):
117 | """
118 | obtain current genotypical network and evolution speed.
119 |
120 | :param species: genome species.
121 | :param pop_size: population size.
122 |
123 | :return:
124 | """
125 | # obtain all genomes from species.
126 | current_genomes = []
127 | for i, value in species.species.items():
128 | members = value.members
129 | for key, individual in members.items():
130 | current_genomes.append(individual)
131 |
132 | # sort members in order of descending fitness.
133 | current_genomes.sort(reverse=True, key=lambda g: g.fitness)
134 |
135 | # calculate speed list and avg adjusted fitness
136 | avg_adjusted_fitness = 0
137 |
138 | if len(current_genomes) > pop_size:
139 | feature_matrices = []
140 | for genome in current_genomes:
141 | feature_matrices.append([])
142 | for feature_slice in genome.feature_matrix:
143 | feature_matrices[-1] += copy.deepcopy(feature_slice)
144 |
145 | # cluster the current network based on the size of population.
146 | labels, centers = self.cluster(feature_matrices, pop_size, len(current_genomes))
147 |
148 | genome_clusters = [[] for _ in range(pop_size)]
149 | for index, cluster_index in enumerate(labels):
150 | genome_clusters[cluster_index].append(current_genomes[index])
151 |
152 | for genome_cluster in genome_clusters:
153 | avg_adjusted_fitness += genome_cluster[0].fitness / pop_size
154 |
155 | self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
156 |
157 | return genome_clusters, centers
158 | else:
159 | genome_clusters = []
160 | for genome in current_genomes:
161 | genome_clusters.append([genome])
162 | avg_adjusted_fitness += genome.fitness / pop_size
163 |
164 | self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
165 |
166 | return genome_clusters, None
167 |
168 | def obtain_phenotypic_network(self, pop_size, genome_clusters, cluster_centers):
169 | """
170 | obtain new phenotypic network from population size and current phenotypic network.
171 |
172 | :param pop_size: population size.
173 | :param genome_clusters: current phenotypic network.
174 | :param cluster_centers: centers of cluster.
175 |
176 | :return: center genomes and near genomes.
177 | """
178 | if cluster_centers is not None:
179 | saved_genomes = []
180 | correlations = []
181 |
182 | # analyze the correlation between fitting degree and spatial position (negative correlation normally).
183 | for genome_cluster in genome_clusters:
184 | distances = [0]
185 | fitnesses = [genome_cluster[0].fitness]
186 | saved_genomes.append(genome_cluster[0])
187 | for index in range(1, len(genome_cluster)):
188 | distances.append(genome_cluster[0].distance(genome_cluster[index], self.genome_config))
189 | fitnesses.append(genome_cluster[index].fitness)
190 |
191 | if len(fitnesses) > 1:
192 | correlations.append(round(pandas.Series(distances).corr(pandas.Series(fitnesses)), 2))
193 | else:
194 | correlations.append(-1.00)
195 |
196 | for index in range(len(correlations)):
197 | if math.isnan(correlations[index]):
198 | correlations[index] = 0
199 |
200 | print("Correlations: " + str(correlations))
201 |
202 | new_genomes = []
203 | # construct the topology of the phenotypical network
204 | for index_1 in range(pop_size):
205 | cluster_1 = genome_clusters[index_1]
206 | for index_2 in range(index_1 + 1, pop_size):
207 | cluster_2 = genome_clusters[index_2]
208 |
209 | if distance_between_two_matrices(cluster_1[0].feature_matrix, cluster_2[0].feature_matrix) \
210 | > self.reproduction_config.min_distance:
211 |
212 | # If the two clusters both have highly correlations,
213 | # it means that the current network of these two clusters has a better description of phenotype,
214 | # and then evolution is carried out according to the original method.
215 | if correlations[index_1] >= self.reproduction_config.correlation_rate \
216 | and correlations[index_2] >= self.reproduction_config.correlation_rate:
217 | topo_genome = self.obtain_global_genome(cluster_centers[index_1],
218 | cluster_centers[index_2],
219 | saved_genomes + new_genomes, -1)
220 | if cluster_1[0].fitness > cluster_2[0].fitness:
221 | near_genome = self.obtain_near_genome(cluster_1[0],
222 | saved_genomes + new_genomes + cluster_1, -1)
223 | else:
224 | near_genome = self.obtain_near_genome(cluster_2[0],
225 | saved_genomes + new_genomes + cluster_2, -1)
226 |
227 | if near_genome is not None:
228 | new_genomes.append(near_genome)
229 | if topo_genome is not None:
230 | new_genomes.append(topo_genome)
231 |
232 | elif correlations[index_1] >= self.reproduction_config.correlation_rate > correlations[index_2]:
233 | if cluster_1[0].fitness > cluster_2[0].fitness:
234 | near_genome_1 = self.obtain_near_genome(cluster_1[0],
235 | saved_genomes + new_genomes + cluster_1, -1)
236 | near_genome_2 = self.obtain_near_genome(cluster_2[0],
237 | saved_genomes + new_genomes + cluster_2, -1)
238 | else:
239 | near_genome_1 = self.obtain_near_genome(cluster_2[0],
240 | saved_genomes + new_genomes + cluster_2, -1)
241 | near_genome_2 = self.obtain_near_genome(cluster_2[0],
242 | saved_genomes + new_genomes + cluster_2, -1)
243 |
244 | if near_genome_1 is not None:
245 | new_genomes.append(near_genome_1)
246 | if near_genome_2 is not None:
247 | new_genomes.append(near_genome_2)
248 |
249 | elif correlations[index_2] >= self.reproduction_config.correlation_rate > correlations[index_1]:
250 | if cluster_1[0].fitness > cluster_2[0].fitness:
251 | near_genome_1 = self.obtain_near_genome(cluster_1[0],
252 | saved_genomes + new_genomes + cluster_1, -1)
253 | near_genome_2 = self.obtain_near_genome(cluster_1[0],
254 | saved_genomes + new_genomes + cluster_1, -1)
255 |
256 | else:
257 | near_genome_1 = self.obtain_near_genome(cluster_1[0],
258 | saved_genomes + new_genomes + cluster_1, -1)
259 | near_genome_2 = self.obtain_near_genome(cluster_2[0],
260 | saved_genomes + new_genomes + cluster_2, -1)
261 |
262 | if near_genome_1 is not None:
263 | new_genomes.append(near_genome_1)
264 | if near_genome_2 is not None:
265 | new_genomes.append(near_genome_2)
266 | else:
267 | near_genome_1 = self.obtain_near_genome(cluster_1[0],
268 | saved_genomes + new_genomes + cluster_1, -1)
269 | near_genome_2 = self.obtain_near_genome(cluster_2[0],
270 | saved_genomes + new_genomes + cluster_2, -1)
271 | if near_genome_1 is not None:
272 | new_genomes.append(near_genome_1)
273 | if near_genome_2 is not None:
274 | new_genomes.append(near_genome_2)
275 |
276 | new_genomes += saved_genomes
277 | else:
278 | # create the initial topology network (binary search & near search).
279 | new_genomes = []
280 | for genome_cluster in genome_clusters:
281 | new_genomes.append(genome_cluster[0])
282 |
283 | for index_1 in range(pop_size):
284 | genome_1 = new_genomes[index_1]
285 | for index_2 in range(index_1 + 1, pop_size):
286 | genome_2 = new_genomes[index_2]
287 |
288 | if genome_1.distance(genome_2, self.genome_config) > self.reproduction_config.min_distance:
289 | # add near genome (limit search count)
290 | near_genome = self.obtain_near_genome(genome_1, new_genomes, -1)
291 | # add center genome
292 | topo_genome = self.obtain_global_genome(genome_1.feature_matrix, genome_2.feature_matrix,
293 | new_genomes, -1)
294 | if near_genome is not None:
295 | new_genomes.append(near_genome)
296 | if topo_genome is not None:
297 | new_genomes.append(topo_genome)
298 |
299 | return new_genomes
300 |
301 | def cluster(self, feature_matrices, pop_size, iteration):
302 | """
303 | cluster the current network based on the size of population using Cluster Method.
304 |
305 | :param feature_matrices: set of feature matrix (one dimensio).
306 | :param pop_size: population size.
307 | :param iteration: maximum iteration.
308 |
309 | :return: labels and cluster centers.
310 | """
311 | centers = []
312 | if self.reproduction_config.cluster_method == "kmeans++":
313 | method = KMeans(n_clusters=pop_size, max_iter=iteration)
314 | elif self.reproduction_config.cluster_method == "spectral":
315 | method = SpectralClustering(n_clusters=pop_size)
316 | elif self.reproduction_config.cluster_method == "birch":
317 | method = Birch(n_clusters=pop_size)
318 | else:
319 | method = KMeans(n_clusters=pop_size, max_iter=iteration, init="random")
320 |
321 | method.fit(feature_matrices)
322 | for cluster_center in method.cluster_centers_:
323 | feature_matrix = []
324 | for index in range(self.genome_config.max_node_num):
325 | feature_matrix.append(list(cluster_center[index * self.genome_config.max_node_num:
326 | (index + 1) * self.genome_config.max_node_num + 1]))
327 | centers.append(feature_matrix)
328 |
329 | return method.labels_, centers
330 |
331 | def obtain_global_genome(self, matrix_1, matrix_2, saved_genomes, index):
332 | """
333 | obtain global genome based on the feather matrix in two genomes.
334 |
335 | :param matrix_1: one feature matrix.
336 | :param matrix_2: another feature matrix.
337 | :param saved_genomes: genomes are saved in this population before.
338 | :param index: genome index.
339 |
340 | :return: novel global (binary search) genome or not (cannot create due to min_distance).
341 | """
342 | center_genome = create_center_new(matrix_1, matrix_2, self.genome_config, index)
343 | is_input = True
344 | for check_genome in saved_genomes:
345 | if center_genome.distance(check_genome, self.genome_config) < self.reproduction_config.min_distance:
346 | is_input = False
347 |
348 | if is_input:
349 | return center_genome
350 |
351 | return None
352 |
353 | def obtain_near_genome(self, parent_genome, saved_genomes, index):
354 | """
355 | obtain near genome by NEAT.
356 |
357 | :param parent_genome: parent genome.
358 | :param saved_genomes: genomes are saved in this population before.
359 | :param index: genome index.
360 |
361 | :return: novel near genome or not (cannot create due to min_distance).
362 | """
363 | count = 0
364 | while count < self.reproduction_config.search_count:
365 | near_genome = create_near_new(parent_genome, self.genome_config, index)
366 | is_input = True
367 | for check_genome in saved_genomes:
368 | if near_genome.distance(check_genome, self.genome_config) < self.reproduction_config.min_distance:
369 | is_input = False
370 | if is_input:
371 | return near_genome
372 |
373 | count += 1
374 |
375 | return None
376 |
--------------------------------------------------------------------------------
/evolution/methods/gs.py:
--------------------------------------------------------------------------------
1 | """
2 | Name: NEAT evolved by Golden-Section Search
3 |
4 | Function(s):
5 | Reproduction by Golden-Section Search and Random Near Search.
6 | """
7 |
8 | from ReverseEncodingTree.evolution.bean.genome import create_golden_section_new
9 | from ReverseEncodingTree.evolution.methods import bi
10 |
11 |
12 | class Reproduction(bi.Reproduction):
13 |
14 | def obtain_global_genome(self, matrix_1, matrix_2, saved_genomes, index):
15 | """
16 | obtain global genome based on the feather matrix in two genomes.
17 |
18 | :param matrix_1: one feature matrix.
19 | :param matrix_2: another feature matrix.
20 | :param saved_genomes: genomes are saved in this population before.
21 | :param index: genome index.
22 |
23 | :return: novel global (golden-section search) genome or not (cannot create due to min_distance).
24 | """
25 | center_genome = create_golden_section_new(matrix_1, matrix_2, self.genome_config, index)
26 | is_input = True
27 | for check_genome in saved_genomes:
28 | if center_genome.distance(check_genome, self.genome_config) < self.reproduction_config.min_distance:
29 | is_input = False
30 |
31 | if is_input:
32 | return center_genome
33 |
34 | return None
35 |
--------------------------------------------------------------------------------
/example/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/example/__init__.py
--------------------------------------------------------------------------------
/example/cart_pole_v0.py:
--------------------------------------------------------------------------------
1 | import gym
2 | from neat import config, genome, reproduction, species, stagnation
3 |
4 | from ReverseEncodingTree.evolution.evolutor import FitDevice, FitProcess, TypeCorrect
5 | from ReverseEncodingTree.utils.operator import Operator
6 |
7 | has_environment = False
8 | for environment_space in gym.envs.registry.all():
9 | if "CartPole-v0" in environment_space.id:
10 | has_environment = True
11 | break
12 |
13 | if not has_environment:
14 | raise Exception("no environment named CartPole-v0.")
15 |
16 | environment = gym.make("CartPole-v0").unwrapped
17 |
18 | if __name__ == '__main__':
19 | # load configuration.
20 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
21 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
22 | "../configures/example/cart-pole-v0")
23 |
24 | # load evolution process.
25 | fitter = FitDevice(FitProcess())
26 | fitter.set_environment(environment=environment, episode_steps=300, episode_generation=10,
27 | input_type=TypeCorrect.List, output_type=TypeCorrect.Value)
28 |
29 | # initialize the NeuroEvolution
30 | operator = Operator(config=task_config, fitter=fitter,
31 | node_names={-1: 'In0', -2: 'In1', -3: 'In3', -4: 'In4', 0: 'act1', 1: 'act2'},
32 | max_generation=500, output_path="../output/")
33 |
34 | # obtain the winning genome.
35 | operator.obtain_winner()
36 |
37 | # display the winning genome.
38 | operator.display_genome(filename="example.CartPole-v0.fs")
39 |
40 | # evaluate the NeuroEvolution.
41 | operator.evaluation(environment=environment,
42 | input_type=TypeCorrect.List, output_type=TypeCorrect.Value)
43 |
--------------------------------------------------------------------------------
/example/cart_pole_v0_with_attacker.py:
--------------------------------------------------------------------------------
1 | import gym
2 |
3 | from neat import config, genome, reproduction, species, stagnation
4 |
5 | from ReverseEncodingTree.evolution.bean.attacker import CartPole_v0_Attacker, AttackType
6 | from ReverseEncodingTree.evolution.evolutor import FitDevice, FitProcess, TypeCorrect
7 | from ReverseEncodingTree.utils.operator import Operator
8 |
9 | has_environment = False
10 | for environment_space in gym.envs.registry.all():
11 | if "CartPole-v0" in environment_space.id:
12 | has_environment = True
13 | break
14 |
15 | if not has_environment:
16 | raise Exception("no environment named CartPole-v0.")
17 |
18 | environment = gym.make("CartPole-v0").unwrapped
19 | environment.reset()
20 |
21 | if __name__ == '__main__':
22 | # load configuration.
23 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
24 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
25 | "../configures/example/cart-pole-v0")
26 |
27 | # load evolution process.
28 | fitter = FitDevice(FitProcess())
29 | attacker = CartPole_v0_Attacker(attack_type=AttackType.Normal)
30 | fitter.set_environment(environment=environment, episode_steps=300, episode_generation=10,
31 | input_type=TypeCorrect.List, output_type=TypeCorrect.Value,
32 | attacker=attacker, noise_level=0.5)
33 |
34 | # initialize the NeuroEvolution
35 | operator = Operator(config=task_config, fitter=fitter,
36 | node_names={-1: 'In0', -2: 'In1', -3: 'In3', -4: 'In4', 0: 'act1', 1: 'act2'},
37 | max_generation=500, output_path="../output/")
38 |
39 | # obtain the winning genome.
40 | operator.obtain_winner()
41 |
42 | # display the winning genome.
43 | operator.display_genome(filename="example.CartPole-v0.fs")
44 |
45 | # evaluate the NeuroEvolution.
46 | operator.evaluation(environment=environment,
47 | input_type=TypeCorrect.List, output_type=TypeCorrect.Value)
48 |
--------------------------------------------------------------------------------
/example/lunar_lander_v2.py:
--------------------------------------------------------------------------------
1 | import gym
2 |
3 | from neat import config, genome, reproduction, species, stagnation
4 |
5 | from ReverseEncodingTree.evolution.evolutor import FitDevice, FitProcess, TypeCorrect
6 | from ReverseEncodingTree.utils.operator import Operator
7 |
8 | has_environment = False
9 | for environment_space in gym.envs.registry.all():
10 | if "LunarLander-v2" in environment_space.id:
11 | has_environment = True
12 | break
13 |
14 | if not has_environment:
15 | raise Exception("no environment named LunarLander-v2.")
16 |
17 | environment = gym.make("LunarLander-v2")
18 |
19 | if __name__ == '__main__':
20 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
21 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
22 | "../configures/example/lunar-lander-v2")
23 |
24 | # load evolution process.
25 | fitter = FitDevice(FitProcess())
26 |
27 | fitter.set_environment(environment=environment, episode_steps=300, episode_generation=5,
28 | input_type=TypeCorrect.List, output_type=TypeCorrect.Value)
29 |
30 | # initialize the NeuroEvolution
31 | operator = Operator(config=task_config, fitter=fitter,
32 | node_names={-1: '1', -2: '2', -3: '3', -4: '4',
33 | -5: '5', -6: '6', -7: '7', -8: '8',
34 | 0: 'fire engine'},
35 | max_generation=1000, output_path="../output/")
36 |
37 | # obtain the winning genome.
38 | operator.obtain_winner()
39 |
40 | # display the winning genome.
41 | operator.display_genome(filename="example.LunarLander-v2.fs")
42 |
43 | # evaluate the NeuroEvolution.
44 | operator.evaluation(environment=environment)
45 |
--------------------------------------------------------------------------------
/example/xor.py:
--------------------------------------------------------------------------------
1 | from neat import config, genome, reproduction, species, stagnation
2 |
3 | from ReverseEncodingTree.evolution.evolutor import EvalType, FitDevice, FitProcess
4 | from ReverseEncodingTree.utils.operator import Operator
5 |
6 | # 2-input XOR inputs and expected outputs.
7 | xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
8 | xor_outputs = [(0.0,), (1.0,), (1.0,), (0.0,)]
9 |
10 | if __name__ == '__main__':
11 | # load configuration.
12 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
13 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
14 | "../configures/example/xor")
15 |
16 | # load evolution process.
17 | fitter = FitDevice(FitProcess(init_fitness=4, eval_type=EvalType.ManhattanDistance))
18 | fitter.set_dataset({"i": xor_inputs, "o": xor_outputs})
19 |
20 | # initialize the NeuroEvolution
21 | operator = Operator(config=task_config, fitter=fitter, node_names={-1: 'A', -2: 'B', 0: 'A XOR B'},
22 | max_generation=500, output_path="../output/")
23 |
24 | # obtain the winning genome.
25 | operator.obtain_winner()
26 |
27 | # display the winning genome.
28 | operator.display_genome(filename="example.xor.fs")
29 |
--------------------------------------------------------------------------------
/figures/cartpole.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/figures/cartpole.gif
--------------------------------------------------------------------------------
/figures/demo_RET2020.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/figures/demo_RET2020.png
--------------------------------------------------------------------------------
/figures/lunar_lander_success_example.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/figures/lunar_lander_success_example.gif
--------------------------------------------------------------------------------
/output/note.md:
--------------------------------------------------------------------------------
1 | do it by yourself.
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 |
4 | setup(
5 | name="ReverseEncodingTree",
6 | version="1.2.2",
7 | description="library for the Reverse Encoding Tree",
8 | long_description="NeuroEvolution is one of the most competitive evolutionary learning strategies for "
9 | "designing novel neural networks for use in specific tasks. "
10 | "This library implemented an evolutionary strategy named Reverse Encoding Tree (RET), "
11 | "and expanded this strategy to evolve neural networks (Bi-NEAT and GS-NEAT). "
12 | "The experiments of RET contain the landscapes of Mount Everest and Rastrigin Function, and "
13 | "those of RET-based NEAT include logic gates, Cartpole V0, and Lunar Lander V2.",
14 | author="Haoling Zhang, Chao-Han Huck Yang",
15 | author_email="zhanghaoling@genomics.cn",
16 | url="https://github.com/HaolingZHANG/ReverseEncodingTree",
17 | packages=[
18 | "ReverseEncodingTree",
19 | "ReverseEncodingTree.benchmark",
20 | "ReverseEncodingTree.benchmark.dataset",
21 | "ReverseEncodingTree.benchmark.methods",
22 | "ReverseEncodingTree.benchmark.results",
23 | "ReverseEncodingTree.configures",
24 | "ReverseEncodingTree.configures.example",
25 | "ReverseEncodingTree.configures.task",
26 | "ReverseEncodingTree.evolution",
27 | "ReverseEncodingTree.evolution.bean",
28 | "ReverseEncodingTree.evolution.methods",
29 | "ReverseEncodingTree.example",
30 | "ReverseEncodingTree.output",
31 | "ReverseEncodingTree.tasks",
32 | "ReverseEncodingTree.utils",
33 | ],
34 | package_data={
35 | "ReverseEncodingTree": [
36 | "benchmark/dataset/mount_everest.csv",
37 | "benchmark/dataset/rastrigin.csv",
38 | "configures/example/cart-pole-v0",
39 | "configures/example/lunar-lander-v2",
40 | "configures/example/xor",
41 | "configures/task/cart-pole-v0.bi",
42 | "configures/task/cart-pole-v0.fs",
43 | "configures/task/cart-pole-v0.gs",
44 | "configures/task/cart-pole-v0.n",
45 | "configures/task/logic.bi",
46 | "configures/task/logic.fs",
47 | "configures/task/logic.gs",
48 | "configures/task/logic.n",
49 | "configures/task/lunar-lander-v2.bi",
50 | "configures/task/lunar-lander-v2.fs",
51 | "configures/task/lunar-lander-v2.gs",
52 | "configures/task/lunar-lander-v2.n",
53 | ]
54 | },
55 | package_dir={"ReverseEncodingTree": "."},
56 | install_requires=[
57 | "numpy", "matplotlib", "graphviz", "neat-python", "sklearn", "gym", "six", "pandas"
58 | ],
59 | license="Apache",
60 | classifiers=[
61 | "License :: OSI Approved :: Apache Software License",
62 | "Programming Language :: Python :: 3",
63 | "Operating System :: OS Independent",
64 | ],
65 | keywords="Evolutionary Strategy, NeuroEvolution",
66 | )
67 |
--------------------------------------------------------------------------------
/tasks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/tasks/__init__.py
--------------------------------------------------------------------------------
/tasks/task_handler.py:
--------------------------------------------------------------------------------
1 | from ReverseEncodingTree.evolution.bean.attacker import *
2 | from ReverseEncodingTree.tasks.task_inform import *
3 |
4 |
5 | def run_imply():
6 | task = Logic(method_type=MethodType.N, logic_type=LogicType.IMPLY, max_generation=500, display_results=False)
7 | generations, counts = task.run(1000)
8 | save_distribution(counts, "../output/", "IMPLY", MethodType.N)
9 |
10 | task = Logic(method_type=MethodType.FS, logic_type=LogicType.IMPLY, max_generation=500, display_results=False)
11 | generations, counts = task.run(1000)
12 | save_distribution(counts, "../output/", "IMPLY", MethodType.FS)
13 |
14 | task = Logic(method_type=MethodType.BI, logic_type=LogicType.IMPLY, max_generation=500, display_results=False)
15 | generations, counts = task.run(1000)
16 | save_distribution(counts, "../output/", "IMPLY", MethodType.BI)
17 |
18 | task = Logic(method_type=MethodType.GS, logic_type=LogicType.IMPLY, max_generation=500, display_results=False)
19 | generations, counts = task.run(1000)
20 | save_distribution(counts, "../output/", "IMPLY", MethodType.GS)
21 |
22 |
23 | def run_nand():
24 | task = Logic(method_type=MethodType.N, logic_type=LogicType.NAND, max_generation=500, display_results=False)
25 | generations, counts = task.run(1000)
26 | save_distribution(counts, "../output/", "NAND", MethodType.N)
27 |
28 | task = Logic(method_type=MethodType.FS, logic_type=LogicType.NAND, max_generation=500, display_results=False)
29 | generations, counts = task.run(1000)
30 | save_distribution(counts, "../output/", "NAND", MethodType.FS)
31 |
32 | task = Logic(method_type=MethodType.BI, logic_type=LogicType.NAND, max_generation=500, display_results=False)
33 | generations, counts = task.run(1000)
34 | save_distribution(counts, "../output/", "NAND", MethodType.BI)
35 |
36 | task = Logic(method_type=MethodType.GS, logic_type=LogicType.NAND, max_generation=500, display_results=False)
37 | generations, counts = task.run(1000)
38 | save_distribution(counts, "../output/", "NAND", MethodType.GS)
39 |
40 |
41 | def run_nor():
42 | task = Logic(method_type=MethodType.N, logic_type=LogicType.NOR, max_generation=500, display_results=False)
43 | generations, counts = task.run(1000)
44 | save_distribution(counts, "../output/", "NOR", MethodType.N)
45 |
46 | task = Logic(method_type=MethodType.FS, logic_type=LogicType.NOR, max_generation=500, display_results=False)
47 | generations, counts = task.run(1000)
48 | save_distribution(counts, "../output/", "NOR", MethodType.FS)
49 |
50 | task = Logic(method_type=MethodType.BI, logic_type=LogicType.NOR, max_generation=500, display_results=False)
51 | generations, counts = task.run(1000)
52 | save_distribution(counts, "../output/", "NOR", MethodType.BI)
53 |
54 | task = Logic(method_type=MethodType.GS, logic_type=LogicType.NOR, max_generation=500, display_results=False)
55 | generations, counts = task.run(1000)
56 | save_distribution(counts, "../output/", "NOR", MethodType.GS)
57 |
58 |
59 | def run_xor():
60 | task = Logic(method_type=MethodType.N, logic_type=LogicType.XOR, max_generation=500, display_results=False)
61 | generations, counts = task.run(1000)
62 | save_distribution(counts, "../output/", "XOR", MethodType.N)
63 |
64 | task = Logic(method_type=MethodType.FS, logic_type=LogicType.XOR, max_generation=500, display_results=False)
65 | generations, counts = task.run(1000)
66 | save_distribution(counts, "../output/", "XOR", MethodType.FS)
67 |
68 | task = Logic(method_type=MethodType.BI, logic_type=LogicType.XOR, max_generation=500, display_results=False)
69 | generations, counts = task.run(1000)
70 | save_distribution(counts, "../output/", "XOR", MethodType.BI)
71 |
72 | task = Logic(method_type=MethodType.GS, logic_type=LogicType.XOR, max_generation=500, display_results=False)
73 | generations, counts = task.run(1000)
74 | save_distribution(counts, "../output/", "XOR", MethodType.GS)
75 |
76 |
77 | def run_cart_pole_v0():
78 | task = Game(method_type=MethodType.N, game_type=GameType.CartPole_v0, episode_steps=300, episode_generation=10,
79 | max_generation=500, display_results=False)
80 | generations, counts = task.run(1000)
81 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.N)
82 |
83 | task = Game(method_type=MethodType.FS, game_type=GameType.CartPole_v0, episode_steps=300, episode_generation=10,
84 | max_generation=500, display_results=False)
85 | generations, counts = task.run(1000)
86 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.FS)
87 |
88 | task = Game(method_type=MethodType.BI, game_type=GameType.CartPole_v0, episode_steps=300, episode_generation=10,
89 | max_generation=500, display_results=False)
90 | generations, counts = task.run(1000)
91 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.BI)
92 |
93 | task = Game(method_type=MethodType.GS, game_type=GameType.CartPole_v0, episode_steps=300, episode_generation=10,
94 | max_generation=500, display_results=False)
95 | generations, counts = task.run(1000)
96 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.GS)
97 |
98 |
99 | def run_cart_pole_v0_with_attack():
100 | attacker = CartPole_v0_Attacker(attack_type=AttackType.Gaussian, gaussian_peak=1000)
101 | noise_level = 1
102 |
103 | task = Game(method_type=MethodType.FS, game_type=GameType.CartPole_v0, episode_steps=500, episode_generation=20,
104 | attacker=attacker, noise_level=noise_level, max_generation=500, display_results=False)
105 | generations, counts = task.run(1000)
106 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.FS)
107 |
108 | task = Game(method_type=MethodType.BI, game_type=GameType.CartPole_v0, episode_steps=300, episode_generation=10,
109 | attacker=attacker, noise_level=noise_level, max_generation=500, display_results=False)
110 | generations, counts = task.run(1000)
111 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.BI)
112 |
113 | task = Game(method_type=MethodType.GS, game_type=GameType.CartPole_v0, episode_steps=300, episode_generation=10,
114 | attacker=attacker, noise_level=noise_level, max_generation=500, display_results=False)
115 | generations, counts = task.run(1000)
116 | save_distribution(counts, "../output/", "CartPole_v0", MethodType.GSS)
117 |
118 |
119 | def run_lunar_lander_v0():
120 | task = Game(method_type=MethodType.N, game_type=GameType.LunarLander_v2, episode_steps=100, episode_generation=2,
121 | max_generation=500, display_results=False)
122 | generations, counts = task.run(1000)
123 | save_distribution(counts, "../output/", "LunarLander_v2", MethodType.N)
124 |
125 | task = Game(method_type=MethodType.FS, game_type=GameType.LunarLander_v2, episode_steps=100, episode_generation=2,
126 | max_generation=500, display_results=False)
127 | generations, counts = task.run(1000)
128 | save_distribution(counts, "../output/", "LunarLander_v2", MethodType.FS)
129 |
130 | task = Game(method_type=MethodType.BI, game_type=GameType.LunarLander_v2, episode_steps=100, episode_generation=2,
131 | max_generation=500, display_results=False)
132 | generations, counts = task.run(1000)
133 | save_distribution(counts, "../output/", "LunarLander_v2", MethodType.BI)
134 |
135 | task = Game(method_type=MethodType.GS, game_type=GameType.LunarLander_v2, episode_steps=100, episode_generation=2,
136 | max_generation=500, display_results=False)
137 | generations, counts = task.run(1000)
138 | save_distribution(counts, "../output/", "LunarLander_v2", MethodType.GS)
139 |
--------------------------------------------------------------------------------
/tasks/task_inform.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | import gym
4 | from neat import config, genome, reproduction, species, stagnation
5 |
6 | import ReverseEncodingTree.evolution.bean.genome as autogenome
7 | import ReverseEncodingTree.evolution.bean.species_set as autospecies
8 |
9 | from ReverseEncodingTree.evolution.evolutor import TypeCorrect, EvalType
10 | from ReverseEncodingTree.evolution.evolutor import FitDevice, FitProcess
11 | from ReverseEncodingTree.evolution.methods import bi, gs
12 | from ReverseEncodingTree.utils.operator import Operator
13 |
14 |
15 | class MethodType(Enum):
16 | N = 0
17 | FS = 1
18 | BI = 2
19 | GS = 3
20 |
21 |
22 | class LogicType(Enum):
23 | NAND = 1
24 | NOR = 2
25 | IMPLY = 3
26 | XOR = 4
27 |
28 |
29 | class GameType(Enum):
30 | CartPole_v0 = 0
31 | LunarLander_v2 = 1
32 |
33 |
34 | class Logic(object):
35 |
36 | def __init__(self, method_type, logic_type,
37 | max_generation, display_results=False, checkpoint=-1, stdout=False):
38 | """
39 | initialize the logical task.
40 |
41 | :param method_type: the evolution strategy, FS-NEAT, Bi-NEAT, or GS-NEAT.
42 | :param logic_type: the task type, IMPLY, NAND, NOR, or XOR.
43 | :param max_generation: maximum generation of the evolution strategy,
44 | if the generation exceeds the maximum, it will be terminated.
45 | :param display_results: whether result visualization is required.
46 | :param checkpoint: check the statistics point.
47 | :param stdout: Whether outputting the genome information in the process is required.
48 | """
49 |
50 | data_inputs = None
51 | data_outputs = None
52 |
53 | if logic_type == LogicType.NAND:
54 | data_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
55 | data_outputs = [(1.0,), (1.0,), (1.0,), (0.0,)]
56 | self.filename = "nand."
57 | elif logic_type == LogicType.NOR:
58 | data_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
59 | data_outputs = [(0.0,), (0.0,), (0.0,), (1.0,)]
60 | self.filename = "nor."
61 | elif logic_type == LogicType.IMPLY:
62 | data_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
63 | data_outputs = [(1.0,), (1.0,), (0.0,), (1.0,)]
64 | self.filename = "imply."
65 | elif logic_type == LogicType.XOR:
66 | data_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)]
67 | data_outputs = [(0.0,), (1.0,), (1.0,), (0.0,)]
68 | self.filename = "xor."
69 |
70 | # load evolution process.
71 | fitter = FitDevice(FitProcess(init_fitness=4, eval_type=EvalType.ManhattanDistance))
72 | fitter.set_dataset({"i": data_inputs, "o": data_outputs})
73 |
74 | # load configuration.
75 | task_config = None
76 | if method_type == MethodType.N:
77 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
78 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
79 | "../configures/task/logic.n")
80 | self.filename += "fs"
81 | elif method_type == MethodType.FS:
82 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
83 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
84 | "../configures/task/logic.fs")
85 | self.filename += "fs"
86 | elif method_type == MethodType.BI:
87 | task_config = config.Config(autogenome.GlobalGenome, bi.Reproduction,
88 | autospecies.StrongSpeciesSet, stagnation.DefaultStagnation,
89 | "../configures/task/logic.bi")
90 | self.filename += "bi"
91 | elif method_type == MethodType.GS:
92 | task_config = config.Config(autogenome.GlobalGenome, gs.Reproduction,
93 | autospecies.StrongSpeciesSet, stagnation.DefaultStagnation,
94 | "../configures/task/logic.gs")
95 | self.filename += "gs"
96 |
97 | # initialize the NeuroEvolution
98 | self.operator = Operator(config=task_config, fitter=fitter,
99 | node_names={-1: 'A', -2: 'B', 0: 'A operate B'},
100 | max_generation=max_generation, checkpoint_value=checkpoint, stdout=stdout,
101 | output_path="../output/")
102 |
103 | # set whether display results
104 | self.display_results = display_results
105 |
106 | self.max_generation = max_generation
107 |
108 | def run(self, times):
109 | """
110 | multi-run task.
111 |
112 | :param times: running times.
113 |
114 | :return: results of generation and counts of each generation in the requested times.
115 | """
116 | generations = []
117 | time = 0
118 | while True:
119 | try:
120 | if times > 1:
121 | # print current times.
122 | print()
123 | print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
124 | print("procession time: " + str(time + 1))
125 | print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
126 | print()
127 |
128 | self.operator.obtain_winner()
129 | actual_generation, fit = self.operator.get_actual_generation()
130 |
131 | time += 1
132 | if time >= times:
133 | break
134 |
135 | # reset the hyper-parameters
136 | self.operator.reset()
137 |
138 | if not fit:
139 | continue
140 |
141 | generations.append(actual_generation)
142 | except Exception or ValueError:
143 | print("something error.")
144 | self.operator.reset()
145 |
146 | counts = [0 for _ in range(self.max_generation + 1)]
147 | for generation in generations:
148 | counts[generation] += 1
149 |
150 | if self.display_results:
151 | self.operator.display_genome(filename=self.filename)
152 |
153 | return generations, counts
154 |
155 |
156 | class Game(object):
157 |
158 | def __init__(self, method_type, game_type,
159 | episode_steps, episode_generation, max_generation,
160 | attacker=None, noise_level=-1,
161 | display_results=False, checkpoint=-1, stdout=False):
162 | """
163 | initialize the game task.
164 |
165 | :param method_type: the evolution strategy, FS-NEAT, Bi-NEAT, or GS-NEAT.
166 | :param game_type: the task type, CartPole_v0 or LunarLander_v2.
167 | :param episode_steps: step parameter of game task
168 | :param episode_generation: generation parameter of game task.
169 | :param max_generation: maximum generation of the evolution strategy,
170 | if the generation exceeds the maximum, it will be terminated.
171 | :param attacker: noise attacker, see evolution/bean/attacker.py.
172 | :param noise_level: noise level.
173 | :param display_results: whether result visualization is required.
174 | :param checkpoint: check the statistics point.
175 | :param stdout: Whether outputting the genome information in the process is required.
176 | """
177 |
178 | game_environment = None
179 | if game_type == GameType.CartPole_v0:
180 | game_environment = gym.make("CartPole-v0").unwrapped
181 | self.filename = "cart-pole-v0."
182 | self.node_name = {-1: 'In0', -2: 'In1', -3: 'In3', -4: 'In4', 0: 'act1', 1: 'act2'}
183 | elif game_type == GameType.LunarLander_v2:
184 | game_environment = gym.make("LunarLander-v2")
185 | self.filename = "lunar-lander-v2."
186 | self.node_name = {-1: '1', -2: '2', -3: '3', -4: '4', -5: '5', -6: '6', -7: '7', -8: '8', 0: 'fire engine'}
187 |
188 | fitter = FitDevice(FitProcess())
189 | fitter.set_environment(environment=game_environment,
190 | input_type=TypeCorrect.List, output_type=TypeCorrect.Value,
191 | episode_steps=episode_steps, episode_generation=episode_generation,
192 | attacker=attacker, noise_level=noise_level)
193 | # load configuration.
194 | task_config = None
195 | if method_type == MethodType.N:
196 | self.filename += "n"
197 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
198 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
199 | "../configures/task/" + self.filename)
200 | elif method_type == MethodType.FS:
201 | self.filename += "fs"
202 | task_config = config.Config(genome.DefaultGenome, reproduction.DefaultReproduction,
203 | species.DefaultSpeciesSet, stagnation.DefaultStagnation,
204 | "../configures/task/" + self.filename)
205 | elif method_type == MethodType.BI:
206 | self.filename += "bi"
207 | task_config = config.Config(autogenome.GlobalGenome, bi.Reproduction,
208 | autospecies.StrongSpeciesSet, stagnation.DefaultStagnation,
209 | "../configures/task/" + self.filename)
210 | elif method_type == MethodType.GS:
211 | self.filename += "gs"
212 | task_config = config.Config(autogenome.GlobalGenome, gs.Reproduction,
213 | autospecies.StrongSpeciesSet, stagnation.DefaultStagnation,
214 | "../configures/task/" + self.filename)
215 |
216 | # initialize the NeuroEvolution
217 | self.operator = Operator(config=task_config, fitter=fitter, node_names=self.node_name,
218 | max_generation=max_generation, checkpoint_value=checkpoint, stdout=stdout,
219 | output_path="../output/")
220 |
221 | # set whether display results
222 | self.display_results = display_results
223 |
224 | self.max_generation = max_generation
225 |
226 | def run(self, times):
227 | """
228 | multi-run task.
229 |
230 | :param times: running times.
231 |
232 | :return: results of generation and counts of each generation in the requested times.
233 | """
234 | generations = []
235 | time = 0
236 | while True:
237 | try:
238 | if times > 1:
239 | # print current times.
240 | print()
241 | print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
242 | print("procession time: " + str(time + 1))
243 | print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
244 | print()
245 |
246 | self.operator.obtain_winner()
247 | actual_generation, fit = self.operator.get_actual_generation()
248 |
249 | time += 1
250 | if time >= times:
251 | break
252 |
253 | # reset the hyper-parameters
254 | self.operator.reset()
255 |
256 | if not fit:
257 | continue
258 |
259 | generations.append(actual_generation)
260 | except Exception or ValueError:
261 | print("something error.")
262 | self.operator.reset()
263 |
264 | counts = [0 for _ in range(self.max_generation + 1)]
265 | for generation in generations:
266 | counts[generation] += 1
267 |
268 | if self.display_results:
269 | self.operator.display_genome(filename=self.filename)
270 |
271 | return generations, counts
272 |
273 |
274 | def save_distribution(counts, parent_path, task_name, method_type):
275 | """
276 | save the distribution of the generations.
277 |
278 | :param counts: counts of each generation in the requested times.
279 | :param parent_path: parent path for saving file.
280 | :param task_name: task name, like XOR.
281 | :param method_type: type of method in evolution process.
282 | """
283 | path = parent_path + task_name + "."
284 | if method_type == MethodType.N:
285 | path += "n.csv"
286 | elif method_type == MethodType.FS:
287 | path += "fs.csv"
288 | elif method_type == MethodType.BI:
289 | path += "bi.csv"
290 | elif method_type == MethodType.GS:
291 | path += "gs.csv"
292 |
293 | with open(path, "w", encoding="utf-8") as save_file:
294 | for index, value in enumerate(counts):
295 | save_file.write(str(index) + ", " + str(value) + "\n")
296 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HaolingZHANG/ReverseEncodingTree/16558ae18d71e7b1f089bfc6f4d819ad017d0f25/utils/__init__.py
--------------------------------------------------------------------------------
/utils/operator.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import logging
3 | import pickle
4 | import random
5 | import numpy
6 | from enum import Enum
7 |
8 | from neat import population, checkpoint, statistics, reporting
9 |
10 | from ReverseEncodingTree.evolution.evolutor import LearnType, TypeCorrect
11 | from ReverseEncodingTree.utils import visualize
12 |
13 |
14 | class ReporterType(Enum):
15 | Statistic = 1
16 | Stdout = 2
17 | Checkpoint = 3
18 |
19 |
20 | class Operator(object):
21 |
22 | def __init__(self, config, fitter, node_names,
23 | max_generation=None, checkpoint_value=-1, stdout=False, output_path=None):
24 | """
25 | Initialize the operator of NeuroEvolution.
26 |
27 | :param config: configures of NEAT.
28 | :param fitter: fitter of NEAT.
29 | :param node_names: node information for display the obtained network.
30 | :param max_generation: generations (iteration times).
31 | :param checkpoint_value: the point to save the current state.
32 | :param output_path: parent path for save file of displaying the genome and checkpoint.
33 | :param stdout: whether output the log.
34 | """
35 | # load configuration.
36 | self._config = config
37 |
38 | self._fitter = fitter
39 | self._node_names = node_names
40 | self._max_generation = max_generation
41 | self._output_path = output_path
42 |
43 | # create the population by configuration, which is the top-level object for a NEAT tasks.
44 | self._population = population.Population(self._config)
45 |
46 | self._checkpoint_value = checkpoint_value
47 | if self._checkpoint_value >= 0:
48 | # create the check point reporter.
49 | self._checkpoint_reporter = checkpoint.Checkpointer(generation_interval=checkpoint_value,
50 | filename_prefix=output_path + "neat-checkpoint-")
51 | self._population.add_reporter(self._checkpoint_reporter)
52 |
53 | # create the stdout reporter.
54 | self._stdout = stdout
55 | self._stdout_reporter = reporting.StdOutReporter(stdout)
56 | self._population.add_reporter(self._stdout_reporter)
57 |
58 | # create the statistics reporter.
59 | self._statistics_reporter = statistics.StatisticsReporter()
60 | self._population.add_reporter(self._statistics_reporter)
61 |
62 | # best genome after training.
63 | self._winner = None
64 | self._obtain_success = False
65 |
66 | def obtain_winner(self):
67 | """
68 | Obtain the winning genome (network).
69 | """
70 | self._winner = self._population.run(self._fitter.genomes_fitness, self._max_generation)
71 | if self._winner.fitness >= self._config.fitness_threshold:
72 | self._obtain_success = True
73 |
74 | def get_best_genome(self):
75 | """
76 | get best genome.
77 |
78 | :return: best network.
79 | """
80 | return self._winner
81 |
82 | def get_final_genomes(self):
83 | """
84 | get final genomes (population)
85 |
86 | :return: final genomes
87 | """
88 | genomes = []
89 | for genome_index, genome in self._population.population.items():
90 | genomes.append(genome)
91 |
92 | genomes.sort(reverse=True, key=lambda g: g.fitness)
93 |
94 | return genomes
95 |
96 | def get_best_network(self):
97 | """
98 | get the winning network.
99 |
100 | :return: generated network.
101 | """
102 | if self._winner is None:
103 | logging.error("Please obtain winner first!")
104 | return self._fitter.generated_network(self._winner, self._config)
105 |
106 | def get_reporter(self, reporter_type=None):
107 | """
108 | get the choose reporter.
109 |
110 | :return: reporter.
111 | """
112 | if reporter_type == ReporterType.Statistic:
113 | return self._statistics_reporter
114 | elif reporter_type == ReporterType.Stdout:
115 | return self._stdout_reporter
116 | elif reporter_type == ReporterType.Checkpoint:
117 | return self._checkpoint_reporter
118 |
119 | return {"statistics": self._statistics_reporter,
120 | "stdout": self._stdout_reporter,
121 | "checkpoint": self._checkpoint_reporter}
122 |
123 | def display_genome(self, filename, node_names=None, genome=None, config=None, reporter=None):
124 | """
125 | display the genome.
126 |
127 | :param filename: file name of the output.
128 | :param node_names: node information for display the obtained network.
129 | :param genome: genome of network.
130 | :param config: configures of NEAT.
131 | :param reporter: statistic reporter
132 | """
133 | if node_names is None:
134 | node_names = self._node_names
135 | if genome is None:
136 | genome = self._winner
137 | if config is None:
138 | config = self._config
139 | if reporter is None:
140 | reporter = self._statistics_reporter
141 |
142 | visualize.draw_network(config, genome, True, node_names=node_names,
143 | parent_path=self._output_path, filename=filename)
144 | visualize.plot_statistics(reporter, y_log=False, show=True,
145 | parent_path=self._output_path, filename=filename)
146 | visualize.plot_species(reporter, show=True,
147 | parent_path=self._output_path, filename=filename)
148 |
149 | def evaluation(self, dataset=None, environment=None,
150 | input_type=None, output_type=None, attacker=None, noise_level=None,
151 | run_minutes=1):
152 | """
153 | evaluate the network by testing dataset or environment.
154 |
155 | :param dataset: testing dataset in Supervised Learning.
156 | :param environment: environment in Reinforcement Learning.
157 | :param input_type: type of input, TYPE_CORRECT.List or TYPE_CORRECT.Value.
158 | :param output_type: type of output, TYPE_CORRECT.List or TYPE_CORRECT.Value.
159 | :param attacker: noise attacker for observation.
160 | :param noise_level: noise level of attacker.
161 | :param run_minutes: running minutes in Reinforcement Learning.
162 | :return: result in Supervised Learning.
163 | """
164 | if self._fitter.learn_type == LearnType.Supervised:
165 | obtain_outputs = []
166 | for current_input in dataset.get("i"):
167 | obtain_outputs.append(self._winner.activate(current_input))
168 |
169 | right = 0
170 | for obtain_output, expected_output in zip(obtain_outputs, dataset.get("o")):
171 | is_right = True
172 | for obtain_value, expected_value in zip(obtain_output, expected_output):
173 | if obtain_value != expected_value:
174 | is_right = False
175 | break
176 | right += int(is_right)
177 |
178 | return right, len(dataset.get("o"))
179 |
180 | elif self._fitter.learn_type == LearnType.Reinforced:
181 | has_attack = attacker is not None and noise_level is not None
182 |
183 | network = self.get_best_network()
184 | start_time = datetime.datetime.now()
185 | while True:
186 | observation = environment.reset()
187 | attack_count = 0
188 | total_count = 0
189 | while True:
190 | environment.render()
191 |
192 | # set attack if has attack.
193 | if has_attack and random.randint(0, 100) < noise_level * 100:
194 | if type(observation) is not numpy.ndarray:
195 | observation = numpy.array([observation])
196 | actual_observation = attacker.attack(observation)
197 | attack_count += 1
198 | else:
199 | actual_observation = observation
200 |
201 | # check input type
202 | if input_type == TypeCorrect.List and type(actual_observation) is not numpy.ndarray:
203 | actual_observation = numpy.array([actual_observation])
204 | if input_type == TypeCorrect.Value and type(actual_observation) is numpy.ndarray:
205 | actual_observation = actual_observation[0]
206 |
207 | action_values = network.activate(actual_observation)
208 | action = numpy.argmax(action_values)
209 |
210 | # check output type
211 | if output_type == TypeCorrect.List and type(action) is not numpy.ndarray:
212 | action = numpy.array([action])
213 | if output_type == TypeCorrect.Value and type(action) is numpy.ndarray:
214 | action = action[0]
215 |
216 | _, _, done, _ = environment.step(action)
217 |
218 | if done:
219 | if has_attack:
220 | print("with: ", round(attack_count / float(total_count + 1), 2), "% attack.")
221 | break
222 |
223 | total_count += 1
224 |
225 | if (datetime.datetime.now() - start_time).seconds > run_minutes * 60:
226 | environment.close()
227 | break
228 |
229 | return None
230 |
231 | def get_actual_generation(self):
232 | """
233 | get actual generation from stdout reporter.
234 |
235 | :return: actual generation and whether obtain success.
236 | """
237 | return self._stdout_reporter.generation, self._obtain_success
238 |
239 | def save_best_genome(self, path):
240 | """
241 | save the best genome by file.
242 |
243 | :param path: file path.
244 | """
245 | with open(path, "wb") as file:
246 | pickle.dump(self._winner, file)
247 |
248 | def save_best_network(self, path):
249 | """
250 | save the network created by best genome to file.
251 |
252 | :param path: file path.
253 | """
254 | with open(path, "wb") as file:
255 | pickle.dump(self.get_best_network(), file)
256 |
257 | def load_best_genome(self, path):
258 | """
259 | load best genome from file.
260 |
261 | :param path: file path.
262 | """
263 | with open(path, "rb") as file:
264 | self._winner = pickle.load(file)
265 |
266 | def reset(self):
267 | while True:
268 | # noinspection PyBroadException
269 | try:
270 | # re-create the population by configuration, which is the top-level object for a NEAT tasks.
271 | self._population = population.Population(self._config)
272 | break
273 | except Exception:
274 | print("re-create again!")
275 |
276 | if self._checkpoint_value >= 0:
277 | # create the check point reporter.
278 | self._checkpoint_reporter = checkpoint.Checkpointer(generation_interval=self._checkpoint_value,
279 | filename_prefix=self._output_path + "neat-checkpoint-")
280 | self._population.add_reporter(self._checkpoint_reporter)
281 |
282 | # create the stdout reporter.
283 | self._stdout_reporter = reporting.StdOutReporter(self._stdout)
284 | self._population.add_reporter(self._stdout_reporter)
285 |
286 | # create the statistics reporter.
287 | self._statistics_reporter = statistics.StatisticsReporter()
288 | self._population.add_reporter(self._statistics_reporter)
289 |
290 | # best genome after training.
291 | self._winner = None
292 | self._obtain_success = False
293 |
--------------------------------------------------------------------------------
/utils/visualize.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import copy
4 | import warnings
5 |
6 | import graphviz
7 | import matplotlib.pyplot as plt
8 | import numpy
9 |
10 |
11 | def plot_statistics(statistics, y_log=False, show=False, parent_path="", filename=None):
12 | """
13 | Plot the population's average and best fitness.
14 |
15 | :param statistics: statistics of NeuroEvolution.
16 | :param y_log: whether y-axis needs log.
17 | :param show: whether the view is showable.
18 | :param parent_path: parent path of output files.
19 | :param filename: file name of the output.
20 | """
21 | generation = range(len(statistics.most_fit_genomes))
22 | best_fitness = [c.fitness for c in statistics.most_fit_genomes]
23 | avg_fitness = numpy.array(statistics.get_fitness_mean())
24 | stdev_fitness = numpy.array(statistics.get_fitness_stdev())
25 |
26 | plt.plot(generation, avg_fitness, 'b-', label="average")
27 | plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
28 | plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
29 | plt.plot(generation, best_fitness, 'r-', label="best")
30 |
31 | plt.title("Population's average and best fitness")
32 | plt.xlabel("Generations")
33 | plt.ylabel("Fitness")
34 | plt.grid()
35 | plt.legend(loc="best")
36 | if y_log:
37 | plt.gca().set_yscale('symlog')
38 |
39 | if filename is None:
40 | plt.savefig(parent_path + "fitness.svg")
41 | else:
42 | plt.savefig(parent_path + filename + "_fitness.svg")
43 |
44 | if show:
45 | plt.show()
46 |
47 | plt.close()
48 |
49 |
50 | def plot_species(statistics, show=False, parent_path="", filename=None):
51 | """
52 | Visualize speciation throughout evolution.
53 |
54 | :param statistics: statistics of NeuroEvolution.
55 | :param show: whether the view is showable.
56 | :param parent_path: parent path of output files.
57 | :param filename: file name of the output.
58 | """
59 | if plt is None:
60 | warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
61 | return
62 |
63 | species_sizes = statistics.get_species_sizes()
64 | num_generations = len(species_sizes)
65 | curves = numpy.array(species_sizes).T
66 |
67 | fig, ax = plt.subplots()
68 | ax.stackplot(range(num_generations), *curves)
69 |
70 | plt.title("Speciation")
71 | plt.ylabel("Size per Species")
72 | plt.xlabel("Generations")
73 |
74 | if filename is None:
75 | plt.savefig(parent_path + "speciation.svg")
76 | else:
77 | plt.savefig(parent_path + filename + "_speciation.svg")
78 |
79 | if show:
80 | plt.show()
81 |
82 | plt.close()
83 |
84 |
85 | def draw_network(config, genome, show=False, parent_path="", filename="best_network", node_names=None,
86 | show_disabled=True, prune_unused=False, node_colors=None, file_format='svg'):
87 | """
88 | Receive a genome and draw a neural network with arbitrary topology.
89 |
90 | :param config: configures of NEAT.
91 | :param genome: genome of network.
92 | :param show: whether the view is showable.
93 | :param parent_path: parent path of output files.
94 | :param filename: filename of the genome.
95 | :param node_names: node information for display the obtained network.
96 | :param show_disabled: show disabled.
97 | :param prune_unused: prune unused.
98 | :param node_colors: colors of node.
99 | :param file_format: format of the saved file.
100 | """
101 | # attributes for network nodes.
102 | if graphviz is None:
103 | warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
104 | return
105 |
106 | if node_names is None:
107 | node_names = {}
108 |
109 | assert type(node_names) is dict
110 |
111 | if node_colors is None:
112 | node_colors = {}
113 |
114 | assert type(node_colors) is dict
115 |
116 | node_attrs = {'shape': 'circle', 'fontsize': '9', 'width': '0.2', 'height': '0.2'}
117 |
118 | dot = graphviz.Digraph(format=file_format, node_attr=node_attrs)
119 |
120 | inputs = set()
121 | for k in config.genome_config.input_keys:
122 | inputs.add(k)
123 | name = node_names.get(k, str(k))
124 | input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
125 | dot.node(name, _attributes=input_attrs)
126 |
127 | outputs = set()
128 | for k in config.genome_config.output_keys:
129 | outputs.add(k)
130 | name = node_names.get(k, str(k))
131 | node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
132 |
133 | dot.node(name, _attributes=node_attrs)
134 |
135 | if prune_unused:
136 | connections = set()
137 | for cg in genome.connections.values():
138 | if cg.enabled or show_disabled:
139 | connections.add((cg.in_node_id, cg.out_node_id))
140 |
141 | used_nodes = copy.copy(outputs)
142 | pending = copy.copy(outputs)
143 | while pending:
144 | new_pending = set()
145 | for a, b in connections:
146 | if b in pending and a not in used_nodes:
147 | new_pending.add(a)
148 | used_nodes.add(a)
149 | pending = new_pending
150 | else:
151 | used_nodes = set(genome.nodes.keys())
152 |
153 | for n in used_nodes:
154 | if n in inputs or n in outputs:
155 | continue
156 |
157 | attrs = {'style': 'filled', 'fillcolor': node_colors.get(n, 'white')}
158 | dot.node(str(n), _attributes=attrs)
159 |
160 | for cg in genome.connections.values():
161 | if cg.enabled or show_disabled:
162 | input_value, output_value = cg.key
163 | a = node_names.get(input_value, str(input_value))
164 | b = node_names.get(output_value, str(output_value))
165 | style = 'solid' if cg.enabled else 'dotted'
166 | color = 'green' if cg.weight > 0 else 'red'
167 | width = str(0.1 + abs(cg.weight / 5.0))
168 | dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
169 |
170 | dot.render(parent_path + filename, view=show)
171 |
172 | return dot
173 |
--------------------------------------------------------------------------------