├── pyneurgen
├── __init__.py
├── test
│ ├── __init__.py
│ ├── test_sample_neural_genetic.py
│ ├── test_sample_neural_network.py
│ ├── test_sample_grammatical_evolution.py
│ ├── test_utilities.py
│ ├── test_recurrent.py
│ ├── test_layers.py
│ ├── test_neuralnet.py
│ ├── test_nodes.py
│ ├── test_genotypes.py
│ └── test_fitness.py
├── demo
│ ├── .directory
│ ├── sample_grammatical_evolution.py
│ ├── simple_network_with_graphs.py
│ └── sample_neural_genetic.py
├── utilities.py
├── Makefile
├── layers.py
├── recurrent.py
├── nodes.py
├── genotypes.py
└── fitness.py
├── setup.cfg
├── MANIFEST.in
├── .gitignore
├── PKG-INFO
├── setup.py
└── README.md
/pyneurgen/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/pyneurgen/test/__init__.py:
--------------------------------------------------------------------------------
1 | if __name__ == '__main__':
2 | unittest.main()
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [egg_info]
2 | tag_build =
3 | tag_date = 0
4 | tag_svn_revision = 0
5 |
6 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | recursive-include pyneurgen *
2 |
3 | recursive-exclude pyneurgen ?~ *coverage
4 |
--------------------------------------------------------------------------------
/pyneurgen/demo/.directory:
--------------------------------------------------------------------------------
1 | [Dolphin]
2 | Timestamp=2012,2,28,8,17,19
3 | Version=2
4 | ViewMode=1
5 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_sample_neural_genetic.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from os import sys
4 | from datetime import datetime, timedelta
5 |
6 | sys.path.append(r'../')
--------------------------------------------------------------------------------
/pyneurgen/test/test_sample_neural_network.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from os import sys
4 | from datetime import datetime, timedelta
5 |
6 | sys.path.append(r'../')
--------------------------------------------------------------------------------
/pyneurgen/test/test_sample_grammatical_evolution.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from os import sys
4 | from datetime import datetime, timedelta
5 |
6 | sys.path.append(r'../')
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 |
5 | # C extensions
6 | *.so
7 |
8 | # Distribution / packaging
9 | .Python
10 | env/
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | .idea/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 |
46 | # Translations
47 | *.mo
48 | *.pot
49 |
50 | # Django stuff:
51 | *.log
52 |
53 | # Sphinx documentation
54 | docs/_build/
55 |
56 | # PyBuilder
57 | target/
--------------------------------------------------------------------------------
/pyneurgen/test/test_utilities.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from pyneurgen.utilities import rand_weight, base10tobase2, base2tobase10
4 |
5 |
6 | class TestUtilities(unittest.TestCase):
7 |
8 |
9 | def test_test_rand_weight(self):
10 |
11 | constraint = 1.0
12 | sample = [rand_weight() for i in range(1000)]
13 | self.assertGreaterEqual(constraint, max(sample))
14 | self.assertLessEqual(-constraint, min(sample))
15 |
16 | constraint = 0.5
17 | sample = [rand_weight(constraint) for i in range(1000)]
18 | self.assertGreaterEqual(constraint, max(sample))
19 | self.assertLessEqual(-constraint, min(sample))
20 |
21 | def test_test_base10tobase2(self):
22 |
23 | base10 = "3"
24 | self.assertEqual("11", base10tobase2(base10))
25 |
26 | base10 = "34"
27 | self.assertEqual("100010", base10tobase2(base10))
28 |
29 | base10 = "34"
30 | zfill = 8
31 | self.assertEqual("00100010", base10tobase2(base10, zfill))
32 |
33 | base10 = "34"
34 | zfill = 5
35 | self.assertRaises(ValueError, base10tobase2, base10, zfill)
36 |
37 | base10 = "34"
38 | zfill = 5
39 | self.assertRaises(ValueError, base10tobase2, base10, zfill)
40 |
41 | base10 = "0"
42 |
43 | self.assertEqual("0", base10tobase2(base10))
44 |
45 | base10 = "-34"
46 | self.assertEqual("-100010", base10tobase2(base10))
47 |
48 |
49 | def test_test_base2tobase10(self):
50 |
51 | base2 = "100010"
52 | self.assertEqual(34, base2tobase10(base2))
53 |
54 | base2 = "1"
55 | self.assertEqual(1, base2tobase10(base2))
56 |
57 | base2 = "10"
58 | self.assertEqual(2, base2tobase10(base2))
59 |
60 |
61 | if __name__ == '__main__':
62 | unittest.main()
63 |
--------------------------------------------------------------------------------
/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.1
2 | Name: pyneurgen
3 | Version: 0.3.1
4 | Summary: Python Neural Genetic Hybrids
5 | Home-page: http://pyneurgen/sourceforge.net
6 | Author: Don Smiley
7 | Author-email: ds@sidorof.com
8 | License: GPL
9 | Description:
10 | This package provides the Python "pyneurgen" module, which contains several
11 | classes for implementing grammatical evolution, a form of genetic
12 | programming, and classes for neural networks. These classes enable the
13 | creation of hybrid models that can embody the strengths of grammatical
14 | evolution and neural networks.
15 |
16 | While neural networks can be adept at solving non-linear problems, some
17 | problems remain beyond reach. For example, a difficult search space can
18 | cause suboptimal solutions to be reached. Also, multiobjective problems
19 | become extremely difficult, if not impossible. With genetic algorithms, a
20 | more thorough search can be made.
21 | Keywords: grammatical evolution programming neural networks genetic algorithms
22 | Platform: UNKNOWN
23 | Classifier: Development Status :: 3 - Alpha
24 | Classifier: Environment :: Console
25 | Classifier: Intended Audience :: Developers
26 | Classifier: Intended Audience :: Science/Research
27 | Classifier: License :: OSI Approved :: GNU General Public License (GPL)
28 | Classifier: Natural Language :: English
29 | Classifier: Operating System :: OS Independent
30 | Classifier: Programming Language :: Python :: 2.6
31 | Classifier: Programming Language :: Python :: 2.7
32 | Classifier: Topic :: Adaptive Technologies
33 | Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
34 | Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
35 | Classifier: Topic :: Scientific/Engineering :: Mathematics
36 | Classifier: Topic :: Software Development :: Libraries :: Python Modules
37 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from setuptools import setup
3 | from setuptools import setup, find_packages
4 |
5 | setup(
6 | name='pyneurgen',
7 | version='0.3.1',
8 | description='Python Neural Genetic Hybrids',
9 | author='Don Smiley',
10 | author_email='ds@sidorof.com',
11 | url='http://pyneurgen/sourceforge.net',
12 | packages=['pyneurgen'],
13 | package_dir={'pyneurgen': 'pyneurgen'},
14 | long_description="""
15 | This package provides the Python "pyneurgen" module, which contains several
16 | classes for implementing grammatical evolution, a form of genetic
17 | programming, and classes for neural networks. These classes enable the
18 | creation of hybrid models that can embody the strengths of grammatical
19 | evolution and neural networks.
20 |
21 | While neural networks can be adept at solving non-linear problems, some
22 | problems remain beyond reach. For example, a difficult search space can
23 | cause suboptimal solutions to be reached. Also, multiobjective problems
24 | become extremely difficult, if not impossible. With genetic algorithms, a
25 | more thorough search can be made.""",
26 |
27 | keywords = 'grammatical evolution programming neural networks genetic algorithms',
28 | classifiers=[
29 | 'Development Status :: 3 - Alpha',
30 | 'Environment :: Console',
31 | 'Intended Audience :: Developers',
32 | 'Intended Audience :: Science/Research',
33 | 'License :: OSI Approved :: GNU General Public License (GPL)',
34 | 'Natural Language :: English',
35 | 'Operating System :: OS Independent',
36 | 'Programming Language :: Python :: 2.6',
37 | 'Programming Language :: Python :: 2.7',
38 | 'Topic :: Adaptive Technologies',
39 | 'Topic :: Scientific/Engineering :: Artificial Intelligence',
40 | 'Topic :: Scientific/Engineering :: Bio-Informatics',
41 | 'Topic :: Scientific/Engineering :: Mathematics',
42 | 'Topic :: Software Development :: Libraries :: Python Modules'
43 | ],
44 | license='GPL',
45 | )
46 |
--------------------------------------------------------------------------------
/pyneurgen/utilities.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2012 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see .
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This module implements some basic utilities for use with Grammatical Evolution
23 |
24 | """
25 |
26 | from random import random
27 |
28 | def rand_weight(constraint=1.0):
29 | """
30 | Returns a random weight centered around 0. The constrain limits
31 | the value to the maximum that it would return. For example, if
32 | .5 is the constraint, then the returned weight would between -.5 and +.5.
33 | """
34 | return random() * 2.0 * constraint - constraint
35 |
36 |
37 | def base10tobase2(value, zfill=0):
38 | """
39 | This function converts from base 10 to base 2 in string format. In
40 | addition, it takes a parameter for zero filling to pad out to a specific
41 | length.
42 |
43 | Note that the incoming number is converted to an int, and that if it is
44 | a negative number that the negative sign is added on to the total length,
45 | resulting in a string 1 char longer than the zfill specified.
46 |
47 | """
48 |
49 | val = int(value)
50 | new_value_str = bin(val).split('b')[1]
51 | if zfill:
52 | to_add = zfill - len(new_value_str)
53 | if to_add < 0:
54 | raise ValueError("""
55 | Base 2 version of %s is longer, %s, than the zfill limit, %s
56 | """ % (value, new_value_str, zfill))
57 | else:
58 | new_value_str = '0' * to_add + new_value_str
59 |
60 | if val < 0:
61 | new_value_str = "-" + new_value_str
62 |
63 | return new_value_str
64 |
65 |
66 | def base2tobase10(value):
67 | """
68 | This function converts from base 2 to base 10. Unlike base10tobase2,
69 | there is no zfill option, and the result is output as an int.
70 |
71 | """
72 |
73 | val = str(value)
74 | if val[0] == '-':
75 | val = val.replace('-', '-0b')
76 | else:
77 | val = '0b' + val
78 |
79 | new_value = int(val, 2)
80 |
81 | return new_value
82 |
--------------------------------------------------------------------------------
/pyneurgen/demo/sample_grammatical_evolution.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2008 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This sample program shows a simple use of grammatical evolution. The
23 | evolutionary process drives the fitness values towards zero.
24 |
25 | """
26 |
27 | from pyneurgen.grammatical_evolution import GrammaticalEvolution
28 | from pyneurgen.fitness import FitnessElites, FitnessTournament
29 | from pyneurgen.fitness import ReplacementTournament, MAX, MIN, CENTER
30 |
31 |
32 | bnf = """
33 | ::= | | |
34 | math.log(abs()) | | math.sin( )|
35 | value | ()
36 | ::= + | - | * | /
37 | ::= + | -
38 | ::= pow(, )
39 | ::= +
40 | ::= -
41 | ::= .
42 | ::= | 1 | 2 | 3 | 4 | 5 | 6 |
43 | 7 | 8 | 9 | 0
44 | ::=
45 | import math
46 | total = 0.0
47 | for i in range(100):
48 | value = float(i) / float(100)
49 | total += abs( - pow(value, 3))
50 | fitness = total
51 | self.set_bnf_variable('', fitness)
52 | """
53 |
54 |
55 | ges = GrammaticalEvolution()
56 |
57 | ges.set_bnf(bnf)
58 | ges.set_genotype_length(start_gene_length=20,
59 | max_gene_length=50)
60 | ges.set_population_size(50)
61 | ges.set_wrap(True)
62 |
63 | ges.set_max_generations(1000)
64 | ges.set_fitness_type(MIN, .01)
65 |
66 | ges.set_max_program_length(500)
67 | ges.set_timeouts(10, 120)
68 | ges.set_fitness_fail(100.0)
69 |
70 | ges.set_mutation_rate(.025)
71 | ges.set_fitness_selections(
72 | FitnessElites(ges.fitness_list, .05),
73 | FitnessTournament(ges.fitness_list, tournament_size=2))
74 | ges.set_max_fitness_rate(.5)
75 |
76 | ges.set_crossover_rate(.2)
77 | ges.set_children_per_crossover(2)
78 | ges.set_mutation_type('m')
79 | ges.set_max_fitness_rate(.25)
80 |
81 | ges.set_replacement_selections(
82 | ReplacementTournament(ges.fitness_list, tournament_size=3))
83 |
84 | ges.set_maintain_history(True)
85 | ges.create_genotypes()
86 | print(ges.run())
87 | print(ges.fitness_list.sorted())
88 | print()
89 | print()
90 | gene = ges.population[ges.fitness_list.best_member()]
91 | print(gene.get_program())
92 |
--------------------------------------------------------------------------------
/pyneurgen/demo/simple_network_with_graphs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2008 Don Smiley ds at sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | An example program showing the use of the neurons module. This program assumes
23 | that you have installed matplotlib as well, which can be obtained at
24 | matplotlib.sourceforge.net.
25 |
26 | There are two inputs, one of which is random. The neural network will need to
27 | learn to ignore that input. The target values are a modified sinusoidal.
28 |
29 | """
30 |
31 | import random
32 | import math
33 |
34 | import matplotlib
35 | from pylab import plot, legend, subplot, grid, xlabel, ylabel, show, title
36 |
37 | from pyneurgen.neuralnet import NeuralNet
38 | from pyneurgen.nodes import BiasNode, Connection
39 |
40 | # all samples are drawn from this population
41 | pop_len = 200
42 | factor = 1.0 / float(pop_len)
43 | population = [[i, math.sin(float(i) * factor * 10.0) + \
44 | random.gauss(float(i) * factor, .2)]
45 | for i in range(pop_len)]
46 |
47 | all_inputs = []
48 | all_targets = []
49 |
50 | def population_gen(population):
51 | """
52 | This function shuffles the values of the population and yields the
53 | items in a random fashion.
54 |
55 | """
56 |
57 | pop_sort = [item for item in population]
58 | random.shuffle(pop_sort)
59 |
60 | for item in pop_sort:
61 | yield item
62 |
63 | # Build the inputs
64 | for position, target in population_gen(population):
65 | pos = float(position)
66 | all_inputs.append([random.random(), pos * factor])
67 | all_targets.append([target])
68 |
69 | print("input statistics")
70 | print(" random:", min([item[0] for item in all_inputs]), \
71 | max([item[0] for item in all_inputs]))
72 | print(" useful:", min([item[1] for item in all_inputs]), \
73 | max([item[1] for item in all_inputs]))
74 | print("target statistics:", min(all_targets), max(all_targets))
75 |
76 | net = NeuralNet()
77 | net.init_layers(2, [10], 1)
78 |
79 | net.randomize_network()
80 | net.set_halt_on_extremes(True)
81 |
82 | # Set to constrain beginning weights to -.5 to .5
83 | # Just to show we can
84 | net.set_random_constraint(.5)
85 | net.set_learnrate(.1)
86 |
87 | net.set_all_inputs(all_inputs)
88 | net.set_all_targets(all_targets)
89 |
90 | length = len(all_inputs)
91 | learn_end_point = int(length * .8)
92 |
93 | net.set_learn_range(0, learn_end_point)
94 | net.set_test_range(learn_end_point + 1, length - 1)
95 |
96 | # Set the hidden layer activation type tanh
97 | net.layers[1].set_activation_type('tanh')
98 |
99 | net.learn(epochs=125, show_epoch_results=True,
100 | random_testing=False)
101 |
102 | mse = net.test()
103 | print("test mse = ", mse)
104 |
105 | test_positions = [item[0][1] * 1000.0 for item in net.get_test_data()]
106 |
107 | all_targets1 = [item[0][0] for item in net.test_targets_activations]
108 | allactuals = [item[1][0] for item in net.test_targets_activations]
109 |
110 | # This is quick and dirty, but it will show the results
111 | subplot(3, 1, 1)
112 | plot([i[1] for i in population])
113 | title("Population")
114 | grid(True)
115 |
116 | subplot(3, 1, 2)
117 | plot(test_positions, all_targets1, 'bo', label='targets')
118 | plot(test_positions, allactuals, 'ro', label='actuals')
119 | grid(True)
120 | legend(loc='lower left', numpoints=1)
121 | title("Test Target Points vs Actual Points")
122 |
123 | subplot(3, 1, 3)
124 | plot(range(1, len(net.accum_mse) + 1, 1), net.accum_mse)
125 | xlabel('epochs')
126 | ylabel('mean squared error')
127 | grid(True)
128 | title("Mean Squared Error by Epoch")
129 |
130 | show()
131 |
132 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_recurrent.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from os import sys
3 | sys.path.append(r'../')
4 |
5 | from pyneurgen.recurrent import RecurrentConfig, ElmanSimpleRecurrent
6 | from pyneurgen.recurrent import JordanRecurrent, NARXRecurrent
7 |
8 | from pyneurgen.neuralnet import NeuralNet
9 | from pyneurgen.nodes import Node, CopyNode, Connection
10 | from pyneurgen.nodes import NODE_OUTPUT, NODE_HIDDEN, NODE_INPUT, NODE_COPY, NODE_BIAS
11 |
12 |
13 | class RecurrentConfigTest(unittest.TestCase):
14 | """
15 | Tests RecurrentConfig
16 |
17 | """
18 |
19 | def setUp(self):
20 |
21 | self.net = NeuralNet()
22 | self.net.init_layers(2, [1], 1)
23 |
24 | self.rec_config = RecurrentConfig()
25 |
26 | def test_apply_config(self):
27 |
28 | self.assertRaises(
29 | ValueError,
30 | self.rec_config.apply_config, 'not neural net')
31 |
32 | def test__apply_config(self):
33 |
34 | print('test__apply_config not yet implemented')
35 |
36 | def test_fully_connect(self):
37 |
38 | node = Node()
39 | unode1 = Node()
40 | unode2 = Node()
41 |
42 | self.rec_config._fully_connect(node, [unode1, unode2])
43 |
44 | conn = unode1.input_connections[0]
45 | self.assertEqual(node, conn.lower_node)
46 | self.assertEqual(unode1, conn.upper_node)
47 | conn = unode2.input_connections[0]
48 | self.assertEqual(node, conn.lower_node)
49 | self.assertEqual(unode2, conn.upper_node)
50 |
51 | def test_get_source_nodes(self):
52 |
53 | self.assertEqual(True, isinstance(
54 | self.rec_config.get_source_nodes(self.net),
55 | NeuralNet))
56 |
57 | def test_get_upper_nodes(self):
58 |
59 | self.assertEqual(1, len(self.rec_config.get_upper_nodes(self.net)))
60 |
61 |
62 | class ElmanSimpleRecurrentTest(unittest.TestCase):
63 | """
64 | Tests ElmanSimpleRecurrent
65 |
66 | """
67 |
68 | def setUp(self):
69 |
70 | self.net = NeuralNet()
71 | self.net.init_layers(2, [1], 1)
72 |
73 | self.rec_config = ElmanSimpleRecurrent()
74 |
75 | def test_class_init_(self):
76 |
77 | self.assertEqual('a', self.rec_config.source_type)
78 | self.assertEqual(1.0, self.rec_config.incoming_weight)
79 | self.assertEqual(0.0, self.rec_config.existing_weight)
80 | self.assertEqual('m', self.rec_config.connection_type)
81 | self.assertEqual(1, self.rec_config.copy_levels)
82 | self.assertEqual(0, self.rec_config.copy_nodes_layer)
83 |
84 | def test_get_source_nodes(self):
85 |
86 | nodes1 = self.net.layers[1].get_nodes(NODE_HIDDEN)
87 | nodes2 = self.rec_config.get_source_nodes(self.net)
88 |
89 | # Should be the same
90 | self.assertEqual(len(nodes1), len(nodes2))
91 | self.assertEqual(
92 | self.net.layers[1].get_nodes(NODE_HIDDEN),
93 | self.rec_config.get_source_nodes(self.net))
94 |
95 |
96 | class JordanRecurrentTest(unittest.TestCase):
97 | """
98 | Tests JordanRecurrent
99 |
100 | """
101 |
102 | def setUp(self):
103 |
104 | self.net = NeuralNet()
105 | self.net.init_layers(2, [1], 1)
106 |
107 | self.rec_config = JordanRecurrent(existing_weight=.8)
108 |
109 | def test_class_init_(self):
110 |
111 | self.assertEqual('a', self.rec_config.source_type)
112 | self.assertEqual(1.0, self.rec_config.incoming_weight)
113 | self.assertEqual(0.8, self.rec_config.existing_weight)
114 | self.assertEqual('m', self.rec_config.connection_type)
115 | self.assertEqual(1, self.rec_config.copy_levels)
116 | self.assertEqual(0, self.rec_config.copy_nodes_layer)
117 |
118 | def test_get_source_nodes(self):
119 |
120 | self.assertEqual(
121 | self.net.layers[2].nodes,
122 | self.rec_config.get_source_nodes(self.net))
123 |
124 |
125 | class NARXRecurrentTest(unittest.TestCase):
126 | """
127 | Tests NARXRecurrent
128 |
129 | """
130 |
131 | def setUp(self):
132 |
133 | self.net = NeuralNet()
134 | self.net.init_layers(2, [1], 1)
135 |
136 | self.rec_config = NARXRecurrent(
137 | output_order=1,
138 | incoming_weight_from_output=.9,
139 | input_order=1,
140 | incoming_weight_from_input=.7)
141 |
142 | def test_class_init_(self):
143 |
144 | self.assertEqual(0, self.rec_config.existing_weight)
145 | self.assertEqual(None, self.rec_config._node_type)
146 | self.assertEqual([1, .9], self.rec_config.output_values)
147 | self.assertEqual([1, .7], self.rec_config.input_values)
148 |
149 | def test_get_source_nodes(self):
150 |
151 | self.rec_config._node_type = NODE_OUTPUT
152 | self.assertEqual(
153 | self.net.layers[-1].get_nodes(NODE_OUTPUT),
154 | self.rec_config.get_source_nodes(self.net))
155 |
156 | self.rec_config._node_type = NODE_INPUT
157 | self.assertEqual(
158 | self.net.layers[0].get_nodes(NODE_INPUT),
159 | self.rec_config.get_source_nodes(self.net))
160 |
161 |
162 | if __name__ == '__main__':
163 | unittest.main()
164 |
--------------------------------------------------------------------------------
/pyneurgen/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
18 |
19 | help:
20 | @echo "Please use \`make ' where is one of"
21 | @echo " html to make standalone HTML files"
22 | @echo " dirhtml to make HTML files named index.html in directories"
23 | @echo " singlehtml to make a single large HTML file"
24 | @echo " pickle to make pickle files"
25 | @echo " json to make JSON files"
26 | @echo " htmlhelp to make HTML files and a HTML help project"
27 | @echo " qthelp to make HTML files and a qthelp project"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
31 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
32 | @echo " text to make text files"
33 | @echo " man to make manual pages"
34 | @echo " texinfo to make Texinfo files"
35 | @echo " info to make Texinfo files and run them through makeinfo"
36 | @echo " gettext to make PO message catalogs"
37 | @echo " changes to make an overview of all changed/added/deprecated items"
38 | @echo " linkcheck to check all external links for integrity"
39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
40 |
41 | clean:
42 | -rm -rf $(BUILDDIR)/*
43 |
44 | html:
45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
48 |
49 | dirhtml:
50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
53 |
54 | singlehtml:
55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
56 | @echo
57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
58 |
59 | pickle:
60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
61 | @echo
62 | @echo "Build finished; now you can process the pickle files."
63 |
64 | json:
65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
66 | @echo
67 | @echo "Build finished; now you can process the JSON files."
68 |
69 | htmlhelp:
70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
71 | @echo
72 | @echo "Build finished; now you can run HTML Help Workshop with the" \
73 | ".hhp project file in $(BUILDDIR)/htmlhelp."
74 |
75 | qthelp:
76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
77 | @echo
78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyNeurGen.qhcp"
81 | @echo "To view the help file:"
82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyNeurGen.qhc"
83 |
84 | devhelp:
85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
86 | @echo
87 | @echo "Build finished."
88 | @echo "To view the help file:"
89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PyNeurGen"
90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyNeurGen"
91 | @echo "# devhelp"
92 |
93 | epub:
94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
95 | @echo
96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
97 |
98 | latex:
99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
100 | @echo
101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
103 | "(use \`make latexpdf' here to do that automatically)."
104 |
105 | latexpdf:
106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
107 | @echo "Running LaTeX files through pdflatex..."
108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
110 |
111 | text:
112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
113 | @echo
114 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
115 |
116 | man:
117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
118 | @echo
119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
120 |
121 | texinfo:
122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
123 | @echo
124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
125 | @echo "Run \`make' in that directory to run these through makeinfo" \
126 | "(use \`make info' here to do that automatically)."
127 |
128 | info:
129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
130 | @echo "Running Texinfo files through makeinfo..."
131 | make -C $(BUILDDIR)/texinfo info
132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
133 |
134 | gettext:
135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
136 | @echo
137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
138 |
139 | changes:
140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
141 | @echo
142 | @echo "The overview file is in $(BUILDDIR)/changes."
143 |
144 | linkcheck:
145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
146 | @echo
147 | @echo "Link check complete; look for any errors in the above output " \
148 | "or in $(BUILDDIR)/linkcheck/output.txt."
149 |
150 | doctest:
151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
152 | @echo "Testing of doctests in the sources finished, look at the " \
153 | "results in $(BUILDDIR)/doctest/output.txt."
154 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_layers.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from copy import deepcopy
4 |
5 | from pyneurgen.layers import Layer
6 | from pyneurgen.nodes import Node, CopyNode, BiasNode, Connection
7 | from pyneurgen.nodes import sigmoid, sigmoid_derivative, tanh, tanh_derivative
8 | from pyneurgen.nodes import linear, linear_derivative
9 |
10 |
11 | class TestLayer(unittest.TestCase):
12 | """
13 | Tests Layer
14 |
15 | """
16 |
17 | def test__init__(self):
18 |
19 | self.assertEqual('input', Layer(0, 'input').layer_type)
20 | self.assertEqual('hidden', Layer(1, 'hidden').layer_type)
21 | self.assertEqual('output', Layer(2, 'output').layer_type)
22 |
23 | layer = Layer(0, 'input')
24 | self.assertEqual('linear', layer.default_activation_type)
25 | layer = Layer(0, 'hidden')
26 | self.assertEqual('sigmoid', layer.default_activation_type)
27 | layer = Layer(0, 'output')
28 | self.assertEqual('linear', layer.default_activation_type)
29 |
30 | self.failUnlessRaises(ValueError, Layer, 0, 'test')
31 | self.failUnlessRaises(ValueError, Layer, 1, 'input')
32 |
33 | layer = Layer(0, 'input')
34 |
35 | def test_total_nodes(self):
36 |
37 | layer = Layer(0, 'input')
38 | layer.add_nodes(2, 'input')
39 | layer.add_nodes(2, 'copy')
40 | layer.add_node(BiasNode())
41 |
42 | self.assertEqual(5, layer.total_nodes())
43 | self.assertEqual(2, layer.total_nodes('input'))
44 | self.assertEqual(2, layer.total_nodes('copy'))
45 | self.assertEqual(0, layer.total_nodes('hidden'))
46 |
47 |
48 | def test_unconnected_nodes(self):
49 |
50 | layer = Layer(1, 'hidden')
51 | conn = Connection(Node(), Node())
52 |
53 | layer.add_nodes(2, 'hidden')
54 |
55 | layer.nodes[0].add_input_connection(
56 | Connection(Node(), layer.nodes[0]))
57 | input_side = layer.unconnected_nodes()
58 |
59 | self.assertEqual(1, input_side[0])
60 | self.assertNotEqual(0, input_side[0])
61 |
62 | def test_values(self):
63 |
64 | layer = Layer(1, 'hidden')
65 | layer.add_nodes(2, 'hidden')
66 |
67 | layer.nodes[0].set_value(.2)
68 | layer.nodes[1].set_value(.3)
69 |
70 | values = layer.values()
71 |
72 | self.assertEqual(True, isinstance(values, list))
73 | self.assertEqual(.2, values[0])
74 | self.assertEqual(.3, values[1])
75 |
76 | def test_activations(self):
77 |
78 | layer = Layer(1, 'hidden')
79 | layer.add_nodes(2, 'hidden')
80 | layer.set_activation_type('linear')
81 |
82 | layer.nodes[0].set_value(.2)
83 | layer.nodes[1].set_value(.3)
84 |
85 | activations = layer.activations()
86 |
87 | self.assertEqual(True, isinstance(activations, list))
88 | self.assertEqual(.2, activations[0])
89 | self.assertEqual(.3, activations[1])
90 |
91 | def test_set_activation_type(self):
92 |
93 | layer = Layer(1, 'hidden')
94 | layer.add_nodes(1, 'hidden')
95 |
96 | self.assertEqual('sigmoid', layer.nodes[0].get_activation_type())
97 |
98 | layer.set_activation_type('linear')
99 |
100 | self.assertEqual('linear', layer.nodes[0].get_activation_type())
101 |
102 | self.failUnlessRaises(
103 | ValueError,
104 | layer.set_activation_type, 'fail')
105 |
106 | def test_add_nodes(self):
107 |
108 | layer = Layer(0, 'input')
109 |
110 | layer.add_nodes(1, 'input')
111 | layer.add_nodes(1, 'copy')
112 |
113 | self.assertEqual(2, len(layer.nodes))
114 | self.assertEqual('copy', layer.nodes[1].node_type)
115 | self.assertNotEqual('copy', layer.nodes[0].node_type)
116 |
117 | def test_add_node(self):
118 |
119 | layer = Layer(0, 'input')
120 | layer.default_activation_type = 'linear'
121 | node = Node()
122 | layer.add_node(node)
123 |
124 | self.assertEqual(1, layer.total_nodes())
125 | self.assertEqual(0, layer.nodes[0].node_no)
126 | self.assertEqual('linear', layer.nodes[0].get_activation_type())
127 |
128 | layer.default_activation_type = 'sigmoid'
129 | node = Node()
130 | layer.add_node(node)
131 |
132 | self.assertEqual(2, layer.total_nodes())
133 | self.assertEqual(1, layer.nodes[1].node_no)
134 | self.assertEqual('sigmoid', layer.nodes[1].get_activation_type())
135 |
136 | node = BiasNode()
137 | layer.add_node(node)
138 |
139 | self.assertEqual(3, layer.total_nodes())
140 | self.assertEqual(2, layer.nodes[2].node_no)
141 |
142 | node = Node()
143 | node.set_activation_type('tanh')
144 | layer.add_node(node)
145 |
146 | self.assertEqual('tanh', layer.nodes[3].get_activation_type())
147 |
148 | def test_get_node(self):
149 |
150 | layer = Layer(0, 'input')
151 | layer.add_nodes(6, 'input')
152 |
153 | del(layer.nodes[3])
154 |
155 | node = layer.get_node(4)
156 | self.assertEqual(node, layer.nodes[3])
157 |
158 | def test_get_nodes(self):
159 |
160 | pass
161 |
162 | def test_connect_layer(self):
163 |
164 | pass
165 |
166 | def test_load_inputs(self):
167 |
168 | pass
169 |
170 | def test_load_targets(self):
171 |
172 | pass
173 |
174 | def test_randomize(self):
175 |
176 | pass
177 |
178 | def test_feed_forward(self) :
179 |
180 | layer0 = Layer(0, 'input')
181 | layer0.add_nodes(2, 'input')
182 | layer0.set_activation_type('sigmoid')
183 |
184 | layer1 = Layer(1, 'hidden')
185 | layer1.add_nodes(1, 'hidden')
186 |
187 | inode1 = layer0.nodes[0]
188 | inode2 = layer0.nodes[1]
189 |
190 | inode1.set_value(.25)
191 | inode2.set_value(.5)
192 |
193 | node = layer1.nodes[0]
194 | node.add_input_connection(
195 | Connection(inode1, node, .25))
196 | node.add_input_connection(
197 | Connection(inode2, node, .5))
198 |
199 | layer1.feed_forward()
200 |
201 | self.assertAlmostEqual(
202 | sigmoid(.25) * .25 + sigmoid(.5) * .5, node.get_value())
203 |
204 |
205 | def test_update_error(self):
206 |
207 | pass
208 | ## At this level test to see if went through all nodes
209 | #layer = Layer(0, 'input')
210 | #layer.add_nodes(2, 'input')
211 |
212 | #def update_error(node):
213 | #node.error = 1
214 |
215 | #for node in layer.nodes:
216 | #node.error = 1000
217 | #node.update_error = update_error
218 |
219 | #layer.update_error(True)
220 |
221 | #self.assertEqual(1, layer.nodes[0].error)
222 | #self.assertEqual(1, layer.nodes[1].error)
223 |
224 | def test_adjust_weights(self):
225 |
226 | pass
227 |
228 | def test_get_errors(self):
229 |
230 | pass
231 |
232 | def test_get_weights(self):
233 |
234 | pass
235 |
236 | if __name__ == '__main__':
237 | unittest.main()
238 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Python Neural Genetic Algorithm Hybrids (PyNeurGen)
2 |
3 | This is a fork of the PyNeurGen based on release 0.3.1. The original site is http://pyneurgen.sourceforge.net/ with the source code at https://sourceforge.net/projects/pyneurgen/
4 |
5 | **NOTE** The [owner](jacksonpradolima) of this repository has no affiliation with official PyNeurGen project. This repo is periodically updated as a kindness to others who have shown interest in it. Therefore, this exists to provide an easy way to access and contribute with the project, nothing more.
6 |
7 | # About
8 |
9 | This package provides the Python "pyneurgen" module, which contains several
10 | classes for implementing grammatical evolution, a form of genetic programming,
11 | and classes for neural networks. These classes enable the creation of hybrid
12 | models that can embody the strengths.
13 |
14 | While neural networks can be adept at solving non-linear problems, some
15 | problems remain beyond reach. For example, a difficult search space can cause
16 | suboptimal solutions to be reached. Also, multiobjective problems become
17 | extremely difficult, if not impossible. With genetic algorithms, a more
18 | thorough search can be made.
19 |
20 | This latest version has additional features added to enable the construction
21 | of recurrent neural networks easier. Recurrent neural networks are used
22 | with time series data, because the structure of the network enables a
23 | memory of past events to be included in the process.
24 |
25 | There is an additional helper class that configures such recurrent network
26 | types as Elman Simple Recurrent Network, Jordan style recurrent networks, and
27 | NARX (Non-Linear AutoRegressive with eXogenous inputs) recurrent networks. And,
28 | there are instructions on making your own structures for your unique
29 | requirements.
30 |
31 | # Documentation
32 |
33 | https://jacksonpradolima.github.io/PyNeurGen/
34 |
35 |
36 | # How to install this package
37 |
38 | ```shell
39 | python setup.py install
40 | ```
41 |
42 | or
43 |
44 | ```shell
45 | sudo pip install git+https://github.com/jacksonpradolima/pyneurgen.git@master
46 | ```
47 |
48 | # Change Log
49 |
50 | pyneurgen-0.3.1 Minor change made to the MANIFEST file that interfered with
51 | installation on Windows machines.
52 |
53 | pyneurgen-0.3 Extensive testing has been implemented, although there is
54 | room for more. Unit testing is now somewhere over 90%.
55 |
56 | An attempt is made to formalize constants to clean up the
57 | code.
58 |
59 | Validation testing was added to neural nets.
60 |
61 | The demo programs were updated slightly.
62 |
63 | A bug when loading saved neural nets with recurrent features
64 | was fixed.
65 |
66 | Some bugs in grammatical evolution were fixed.
67 | Queuing/threading and garbage collection was removed. Queing
68 | never worked very well, and it is not clear that garbage collection
69 | was much help either, so it is gone.
70 |
71 | In grammatical evolution, the completion process has been
72 | streamlined. In addition, there is a new feature for building
73 | families of solutions and using your custom built fitness
74 | landscape function to evaluate stopping.
75 |
76 | There was a bug in the cross-over function spotted by Franco in
77 | Argentina (blamaeda@gmail.com) whom I wish to thank.
78 |
79 | There were some bugs corrected in the fitness functions in the
80 | min and max functions in fitness lists. Unit testing coverage
81 | is essentially 100% on fitness functions. FitnessLinearRanking
82 | was streamlined for inputs.
83 |
84 | There is a first pass at logging, but unwieldy.
85 |
86 | With python 2.6, there is math.isnan, which replaces a
87 | homemade function. Other utility functions were removed
88 | as useless abstractions.
89 |
90 |
91 | pyneurgen-0.2 Updates primarily related to recurrent networks.
92 |
93 | In attempt to enable an easier use of recurrent features, a
94 | new set of classes has been implemented.
95 |
96 | NeuralNet.init_layers no longer takes copy_levels as a
97 | parameter. Rather, it accepts along with the number of nodes
98 | for input, hidden and output layers, classes derived from a
99 | class called RecurrentConfig, which modifies the network
100 | structure to embody recurrence. Using the feature, a class
101 | such as JordanRecurrent, ElmanSimpleRecurrent, or NARXRecurrent
102 | can be passed in and the appropriate structure configured.
103 |
104 | The RecurrentConfig class can easily be subclassed for other
105 | custom types. As before, the copy nodes can be easily be added
106 | manually to network structures to achieve other kinds of
107 | structures.
108 |
109 | CopyNode now has some additional features to enable the above
110 | classes. In the prior version, the Copy node value was always
111 | replaced by the source node value. Now, a weighting system of
112 | existing_weight and incoming weight is used to modify the
113 | incoming values and/or retain part of the existing value.
114 |
115 | For flexibility, the copy node can take the source node's
116 | value, or activation, and have its activation type, independent
117 | of the source node's activation.
118 |
119 | Test modules are slightly more complete.
120 |
121 | pyneurgen-0.1 Initial version
122 |
123 | # To Do
124 |
125 | Immediate / Near Future
126 |
127 | Samples:
128 | More samples would be helpful to express different applications of the
129 | modules. Also, additional samples could highlight various features
130 | currently available.
131 |
132 | Testing:
133 | Unit tests is much more complete than it was (90%?), but more can be done.
134 |
135 | Flesh out logging
136 |
137 | # Author
138 |
139 | Don Smiley
140 |
141 | # Author's message
142 |
143 | I have benefitted from a number of great resources. I should particularly note the book Biologically Inspired Algorithms for Financial Modelling by Anthony Brabazon and Michael O'Neill where I read about the technique 'grammatical evolution'. I particularly like the simplicity for mapping genotypes to programming applications, coupled with ease of controlling the
144 | level of complexity necessary accomplish the task. There are a number of other applications for grammatical evolution that would great to implement as well.
145 |
146 | I should also note the book Adaptive Learning of Polynomial Networks by Nikolay Y. Nikolaev and Hitoshi Iba. While I would like to support polynomial networks in this package, it was not possible in the initial version. This book has a very lucid discussion of fitness landscapes. My fitness objects benefitted greatly from their discussions of fitness functions.
147 |
148 | Any mistakes in implementation are my own.
149 |
150 | Don Smiley
151 | May, 2008
152 |
153 |
154 |
--------------------------------------------------------------------------------
/pyneurgen/demo/sample_neural_genetic.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2008 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This program shows an example of a combination of genetic and neural network
23 | techniques. Each genotype creates a particular network structure subject to a
24 | maximum number of nodes, and then specifies node types, connections, and
25 | starting weights.
26 |
27 | """
28 | import math
29 | import random
30 | import matplotlib
31 | from pylab import plot, legend, subplot, grid, xlabel, ylabel, show, title
32 |
33 | from pyneurgen.grammatical_evolution import GrammaticalEvolution
34 | from pyneurgen.fitness import FitnessElites, FitnessTournament
35 | from pyneurgen.fitness import ReplacementTournament
36 | from pyneurgen.neuralnet import NeuralNet
37 |
38 |
39 |
40 | bnf = """
41 | ::= sample.nn
42 | ::= 40
43 | ::= sigmoid | linear | tanh
44 | ::= 0.
45 | ::= | 1 | 2 | 3 | 4 | 5 | 6 |
46 | 7 | 8 | 9 | 0
47 | ::= + | -
48 | ::= 1000
49 | ::= 0. | 1. |
50 | 2. | 3. |
51 | 4. | 5.
52 | ::= 0.
53 | ::= None
54 | ::=
55 | import math
56 | import random
57 |
58 | from pyneurgen.neuralnet import NeuralNet
59 | from pyneurgen.nodes import Node, BiasNode, CopyNode, Connection
60 | from pyneurgen.layers import Layer
61 | from pyneurgen.recurrent import JordanRecurrent
62 |
63 | net = NeuralNet()
64 | hidden_nodes = max(int(round( * float())), 1)
65 |
66 | net.init_layers(len(self.all_inputs[0]),
67 | [hidden_nodes],
68 | len(self.all_targets[0]))
69 |
70 | net.layers[1].set_activation_type('')
71 | net.output_layer.set_activation_type('')
72 |
73 | # Use the genotype to get starting weights
74 | for layer in net.layers[1:]:
75 | for node in layer.nodes:
76 | for conn in node.input_connections:
77 | # every time it is asked, another starting weight is given
78 | conn.set_weight(self.runtime_resolve('', 'float'))
79 |
80 | # Note the injection of data from the genotype
81 | # In a real project, the genotype might pull the data from elsewhere.
82 | net.set_all_inputs(self.all_inputs)
83 | net.set_all_targets(self.all_targets)
84 |
85 | length = len(self.all_inputs)
86 | learn_end_point = int(length * .6)
87 | validation_end_point = int(length * .8)
88 |
89 | net.set_learn_range(0, learn_end_point)
90 |
91 | net.set_validation_range(0, learn_end_point)
92 | net.set_validation_range(learn_end_point + 1, validation_end_point)
93 | net.set_test_range(validation_end_point + 1, length - 1)
94 |
95 | net.set_learnrate()
96 | epochs = int(round( * float()))
97 |
98 | if epochs > 0:
99 | # Use learning to further set the weights
100 | net.learn(epochs=epochs, show_epoch_results=True,
101 | random_testing=False)
102 |
103 | # Use validation for generating the fitness value
104 | mse = net.validate(show_sample_interval=0)
105 |
106 | print "mse", mse
107 | modelname = self.runtime_resolve('', 'str')
108 |
109 | net.save(modelname)
110 |
111 | self.set_bnf_variable('', modelname)
112 |
113 | # This method can be used to look at all the particulars
114 | # of what happened...uses disk space
115 | self.net = net
116 | fitness = mse
117 | self.set_bnf_variable('', fitness)
118 |
119 | """
120 |
121 | ges = GrammaticalEvolution()
122 |
123 | ges.set_bnf(bnf)
124 | ges.set_genotype_length(start_gene_length=100,
125 | max_gene_length=200)
126 |
127 | ges.set_population_size(20)
128 | ges.set_max_generations(50)
129 | ges.set_fitness_type('center', 0.01)
130 |
131 | ges.set_max_program_length(4000)
132 |
133 | ges.set_wrap(True)
134 | ges.set_fitness_fail(2.0)
135 | ges.set_mutation_type('m')
136 | ges.set_max_fitness_rate(.25)
137 | ges.set_mutation_rate(.025)
138 | ges.set_fitness_selections(
139 | FitnessElites(ges.fitness_list, .05),
140 | FitnessTournament(ges.fitness_list, tournament_size=2))
141 |
142 | ges.set_crossover_rate(.2)
143 | ges.set_children_per_crossover(2)
144 |
145 | ges.set_replacement_selections(
146 | ReplacementTournament(ges.fitness_list, tournament_size=3))
147 |
148 | ges.set_maintain_history(True)
149 | ges.set_timeouts(10, 360)
150 | # All the parameters have been set.
151 |
152 | ges.create_genotypes()
153 |
154 | # all samples are drawn from this population
155 | pop_len = 200
156 | factor = 1.0 / float(pop_len)
157 | population = [[i, math.sin(float(i) * factor * 10.0) + \
158 | random.gauss(factor, .2)]
159 | for i in range(pop_len)]
160 |
161 | all_inputs = []
162 | all_targets = []
163 |
164 |
165 | def population_gen(population):
166 | """
167 | This function shuffles the values of the population and yields the
168 | items in a random fashion.
169 |
170 | """
171 |
172 | pop_sort = [item for item in population]
173 | random.shuffle(pop_sort)
174 |
175 | for item in pop_sort:
176 | yield item
177 |
178 | # Build the inputs
179 | for position, target in population_gen(population):
180 | all_inputs.append([float(position) / float(pop_len), random.random()])
181 | all_targets.append([target])
182 |
183 | for g in ges.population:
184 | g.all_inputs = all_inputs
185 | g.all_targets = all_targets
186 |
187 | print(ges.run())
188 | print("Final Fitness list sorted best to worst:")
189 | print(ges.fitness_list.sorted())
190 | print()
191 | print()
192 | g = ges.population[ges.fitness_list.best_member()]
193 | program = g.local_bnf['program']
194 |
195 | saved_model = g.local_bnf[''][0]
196 |
197 | # We will create a brand new model
198 | net = NeuralNet()
199 | net.load(saved_model)
200 |
201 | net.set_all_inputs(all_inputs)
202 | net.set_all_targets(all_targets)
203 |
204 | test_start_point = int(pop_len * .8) + 1
205 | net.set_test_range(test_start_point, pop_len - 1)
206 | mse = net.test()
207 |
208 | print("The selected model has the following characteristics")
209 | print("Activation Type:", net.layers[1].nodes[1].get_activation_type())
210 | print("Hidden Nodes:", len(net.layers[1].nodes), ' + 1 bias node')
211 | print("Learn Rate:", net.get_learnrate())
212 | print("Epochs:", net.get_epochs())
213 |
214 | test_positions = [item[0][0] * pop_len for item in net.get_test_data()]
215 |
216 | all_targets1 = [item[0][0] for item in net.test_targets_activations]
217 | allactuals = [item[1][0] for item in net.test_targets_activations]
218 |
219 | # This is quick and dirty, but it will show the results
220 | fig = figure()
221 | ax1 = subplot(211)
222 | ax1.plot([i[1] for i in population])
223 | ax1.set_title("Population")
224 | grid(True)
225 | a = [i for i in ax1.get_yticklabels()]
226 | for i in a: i.set_fontsize(9)
227 | a = [i for i in ax1.get_xticklabels()]
228 | for i in a: i.set_fontsize(9)
229 |
230 | ax2 = subplot(2, 1, 2)
231 | ax2.plot(test_positions, all_targets1, 'bo', label='targets')
232 | ax2.plot(test_positions, allactuals, 'ro', label='actuals')
233 | grid(True)
234 | legend(loc='lower left', numpoints=1)
235 | a = [i for i in ax2.get_yticklabels()]
236 | for i in a: i.set_fontsize(9)
237 | a = [i for i in ax2.get_xticklabels()]
238 | for i in a: i.set_fontsize(9)
239 |
240 | show()
241 |
242 | fig = figure()
243 | ax1 = subplot(111)
244 | ax1.plot(ges.get_fitness_history())
245 | xlabel('generations')
246 | ylabel('fitness (mse)')
247 | grid(True)
248 | title("Best Fitness Values by Generation")
249 | a = [i for i in ax1.get_yticklabels()]
250 | for i in a: i.set_fontsize(9)
251 | a = [i for i in ax1.get_xticklabels()]
252 | for i in a: i.set_fontsize(9)
253 |
254 | show()
255 |
256 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_neuralnet.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from pyneurgen.neuralnet import NeuralNet
4 | from pyneurgen.layers import Layer
5 | from pyneurgen.nodes import Node, CopyNode, BiasNode, Connection
6 | from pyneurgen.nodes import sigmoid, sigmoid_derivative, tanh, tanh_derivative
7 | from pyneurgen.nodes import linear, linear_derivative
8 |
9 |
10 | class TestNeuralNet(unittest.TestCase):
11 | """
12 | Tests NeuralNet
13 |
14 | """
15 |
16 | def setUp(self):
17 |
18 | self.net = NeuralNet()
19 |
20 | layer = Layer(0, 'input')
21 | layer.add_nodes(1, 'input')
22 | self.net.layers.append(layer)
23 |
24 | layer = Layer(1, 'hidden')
25 | layer.add_nodes(1, 'hidden')
26 | self.net.layers.append(layer)
27 |
28 | layer = Layer(2, 'output')
29 | layer.add_nodes(1, 'output')
30 | self.net.layers.append(layer)
31 |
32 | # Specify connections
33 | self.net.layers[1].nodes[0].add_input_connection(
34 | Connection(
35 | self.net.layers[0].nodes[0],
36 | self.net.layers[1].nodes[0],
37 | 1.00))
38 |
39 | self.net.layers[2].nodes[0].add_input_connection(
40 | Connection(
41 | self.net.layers[1].nodes[0],
42 | self.net.layers[2].nodes[0],
43 | .75))
44 |
45 | self.net._epochs = 1
46 | self.net.copy_levels = 0
47 | self.net._allinputs = [[.1], [.2], [.3], [.4], [.5]]
48 | self.net._alltargets = [[.2], [.4], [.6], [.8], [1.0]]
49 |
50 | self.net.input_layer = self.net.layers[0]
51 | self.net.output_layer = self.net.layers[-1]
52 |
53 | def test_set_halt_on_extremes(self):
54 |
55 | self.net._halt_on_extremes = 'fail'
56 | self.net.set_halt_on_extremes(True)
57 | self.assertEqual(True, self.net._halt_on_extremes)
58 |
59 | self.net._halt_on_extremes = 'fail'
60 | self.net.set_halt_on_extremes(False)
61 | self.assertEqual(False, self.net._halt_on_extremes)
62 |
63 | self.net._halt_on_extremes = 'fail'
64 | self.failUnlessRaises(ValueError, self.net.set_halt_on_extremes, 'a')
65 |
66 | self.net._halt_on_extremes = 'fail'
67 | self.failUnlessRaises(ValueError, self.net.set_halt_on_extremes, 3)
68 |
69 | def test_get_halt_on_extremes(self):
70 |
71 | self.net.set_halt_on_extremes(True)
72 | self.assertEqual(True, self.net.get_halt_on_extremes())
73 |
74 | self.net.set_halt_on_extremes(False)
75 | self.assertEqual(False, self.net.get_halt_on_extremes())
76 |
77 | def test_set_random_constraint(self):
78 |
79 | self.net._random_constraint = 'fail'
80 | self.net.set_random_constraint(.1)
81 | self.assertEqual(.1, self.net._random_constraint)
82 |
83 | self.failUnlessRaises(ValueError, self.net.set_random_constraint, 3)
84 | self.failUnlessRaises(ValueError, self.net.set_random_constraint, 1)
85 | self.failUnlessRaises(ValueError, self.net.set_random_constraint, 0.0)
86 | self.failUnlessRaises(ValueError, self.net.set_random_constraint, -.2)
87 | self.failUnlessRaises(ValueError, self.net.set_random_constraint, 'a')
88 |
89 | def test_get_random_constraint(self):
90 |
91 | self.net.set_random_constraint(.2)
92 | self.assertEqual(.2, self.net.get_random_constraint())
93 |
94 | self.net.set_random_constraint(.8)
95 | self.assertEqual(.8, self.net.get_random_constraint())
96 |
97 | def test_set_epochs(self):
98 |
99 | self.net._epochs = 'fail'
100 | self.net.set_epochs(3)
101 | self.assertEqual(3, self.net._epochs)
102 |
103 | self.failUnlessRaises(ValueError, self.net.set_epochs, .3)
104 | self.failUnlessRaises(ValueError, self.net.set_epochs, 0)
105 | self.failUnlessRaises(ValueError, self.net.set_epochs, -3)
106 | self.failUnlessRaises(ValueError, self.net.set_epochs, -.2)
107 | self.failUnlessRaises(ValueError, self.net.set_epochs, 'a')
108 |
109 | def test_get_epochs(self):
110 |
111 | self.net.set_epochs(3)
112 | self.assertEqual(3, self.net.get_epochs())
113 |
114 | def test_set_time_delay(self):
115 |
116 | self.net._time_delay = 'fail'
117 | self.net.set_time_delay(3)
118 | self.assertEqual(3, self.net._time_delay)
119 |
120 | self.failUnlessRaises(ValueError, self.net.set_time_delay, .3)
121 | self.failUnlessRaises(ValueError, self.net.set_time_delay, -3)
122 | self.failUnlessRaises(ValueError, self.net.set_time_delay, -.2)
123 | self.failUnlessRaises(ValueError, self.net.set_time_delay, 'a')
124 |
125 | def test_get_time_delay(self):
126 |
127 | self.net.set_time_delay(3)
128 | self.assertEqual(3, self.net.get_time_delay())
129 |
130 | def test_set_all_inputs(self):
131 |
132 | pass
133 |
134 | def test_set_all_targets(self):
135 |
136 | pass
137 |
138 | def test_set_learnrate(self):
139 |
140 | pass
141 |
142 | def test_get_learnrate(self):
143 |
144 | pass
145 |
146 | def test__set_data_range(self):
147 |
148 | pass
149 |
150 | def test_set_learn_range(self):
151 |
152 | pass
153 |
154 | def test_get_learn_range(self):
155 |
156 | pass
157 |
158 | def test__check_time_delay(self):
159 |
160 | pass
161 |
162 | def test_get_learn_data(self):
163 |
164 | pass
165 |
166 | def test_get_validation_data(self):
167 |
168 | pass
169 |
170 | def test_get_test_data(self):
171 |
172 | pass
173 |
174 | def test__get_data(self):
175 |
176 | pass
177 |
178 | def test__get_randomized_position(self):
179 |
180 | pass
181 |
182 | def test__check_positions(self):
183 |
184 | pass
185 |
186 | def test_set_validation_range(self):
187 |
188 | pass
189 |
190 | def test_get_validation_range(self):
191 |
192 | pass
193 |
194 | def test_set_test_range(self):
195 |
196 | pass
197 |
198 | def test_get_test_range(self):
199 |
200 | pass
201 |
202 | def test_init_layers(self):
203 |
204 | pass
205 |
206 | def test__init_connections(self):
207 |
208 | pass
209 |
210 | def test__connect_layer(self):
211 |
212 | pass
213 |
214 | def test__build_output_conn(self):
215 |
216 | pass
217 |
218 | def test_randomize_network(self):
219 |
220 | pass
221 |
222 | def test_learn(self):
223 |
224 | pass
225 |
226 | def test_test(self):
227 |
228 | pass
229 |
230 | def test_calc_mse(self):
231 |
232 | self.assertAlmostEqual(10.0 / 2.0, self.net.calc_mse(100.0, 10))
233 |
234 | def test_process_sample(self):
235 |
236 | pass
237 |
238 | def test__feed_forward(self):
239 |
240 | # simplify activations
241 | self.net.layers[0].set_activation_type('sigmoid')
242 | self.net.layers[1].set_activation_type('sigmoid')
243 | self.net.layers[2].set_activation_type('sigmoid')
244 |
245 | # These values should be replaced
246 | self.net.layers[1].nodes[0].set_value(1000.0)
247 | self.net.layers[2].nodes[0].set_value(1000.0)
248 |
249 | self.assertEqual(1000.0, self.net.layers[1].nodes[0].get_value())
250 | self.assertEqual(1000.0, self.net.layers[2].nodes[0].get_value())
251 |
252 | self.net.layers[0].load_inputs([.2])
253 |
254 | self.net._feed_forward()
255 |
256 | self.assertEqual(.2, self.net.layers[0].nodes[0].get_value())
257 | self.assertEqual(
258 | sigmoid(.2) * 1.0,
259 | self.net.layers[1].nodes[0].get_value())
260 |
261 | self.assertEqual(
262 | sigmoid(sigmoid(.2) * 1.0) * .75,
263 | self.net.layers[2].nodes[0].get_value())
264 |
265 | def test__back_propagate(self):
266 |
267 | pass
268 |
269 | def test__update_error(self):
270 |
271 | pass
272 |
273 | def test__adjust_weights(self):
274 | """
275 | This function goes through layers starting with the top hidden layer
276 | and working its way down to the input layer.
277 |
278 | At each layer, the weights are adjusted based upon the errors.
279 |
280 | """
281 | halt_on_extremes = True
282 |
283 | for layer_no in range(len(self.net.layers) - 2, 0, -1):
284 | layer = self.net.layers[layer_no + 1]
285 | layer.adjust_weights(self.net._learnrate, halt_on_extremes)
286 |
287 | def test__zero_errors(self):
288 |
289 | for layer in self.net.layers[1:]:
290 | for node in layer.nodes:
291 | node.error = 1000
292 |
293 | self.net._zero_errors()
294 |
295 | for layer in self.net.layers[1:]:
296 | for node in layer.nodes:
297 | self.failIfEqual(1000, node.error)
298 |
299 | def test_calc_output_error(self):
300 |
301 | pass
302 |
303 | def test_calc_sample_error(self):
304 |
305 | pass
306 |
307 | def test__copy_levels(self):
308 |
309 | pass
310 |
311 | def test__parse_inputfile_layer(self):
312 |
313 | pass
314 |
315 | def test__parse_inputfile_node(self):
316 |
317 | pass
318 |
319 | def test__parse_inputfile_conn(self):
320 |
321 | pass
322 |
323 | def test__parse_inputfile_copy(self):
324 |
325 | pass
326 |
327 | def test__parse_node_id(self):
328 |
329 | pass
330 |
331 | def test_load(self):
332 |
333 | pass
334 |
335 | def test_output_values(self):
336 |
337 | pass
338 |
339 | def test__node_id(self):
340 |
341 | pass
342 |
343 | def test_save(self):
344 |
345 | pass
346 |
347 |
348 | if __name__ == '__main__':
349 | unittest.main()
350 |
--------------------------------------------------------------------------------
/pyneurgen/layers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2012 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see .
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This module implements a layer class for an artficial neural network.
23 |
24 | """
25 |
26 | from pyneurgen.nodes import Node, CopyNode, BiasNode, Connection
27 |
28 | LAYER_TYPE_INPUT = 'input'
29 | LAYER_TYPE_HIDDEN = 'hidden'
30 | LAYER_TYPE_OUTPUT = 'output'
31 |
32 |
33 | class Layer(object):
34 | """
35 | A layer comprises a list of nodes and behaviors appropriate for their
36 | place in the hierarchy. A layer_type can be either 'input', 'hidden',
37 | or 'output'.
38 |
39 | """
40 |
41 | def __init__(self, layer_no, layer_type):
42 | """
43 | The layer class initializes with the layer number and the type of
44 | layer. Lower layer numbers are toward the input end of the network,
45 | with higher numbers toward the output end.
46 | """
47 | self.nodes = []
48 | self.layer_no = layer_no
49 |
50 | if layer_type in [LAYER_TYPE_INPUT, LAYER_TYPE_HIDDEN,
51 | LAYER_TYPE_OUTPUT]:
52 | self.layer_type = layer_type
53 | else:
54 | raise ValueError(
55 | "Layer type must be 'input', 'hidden', or 'output'")
56 |
57 | if layer_type == LAYER_TYPE_INPUT and layer_no != 0:
58 | raise ValueError("the input layer must always be layer_no 0")
59 |
60 | if self.layer_type == LAYER_TYPE_INPUT:
61 | self.default_activation_type = 'linear'
62 | elif self.layer_type == LAYER_TYPE_OUTPUT:
63 | self.default_activation_type = 'linear'
64 | else:
65 | self.default_activation_type = 'sigmoid'
66 |
67 | self.set_activation_type(self.default_activation_type)
68 |
69 | def total_nodes(self, node_type=None):
70 | """
71 | This function returns the total nodes. It can also return the total
72 | nodes of a particular type, such as 'copy'.
73 |
74 | """
75 |
76 | count = 0
77 | if node_type:
78 | for node in self.nodes:
79 | if node.node_type == node_type:
80 | count += 1
81 | return count
82 | else:
83 | return len(self.nodes)
84 |
85 | def unconnected_nodes(self):
86 | """
87 | This function looks for nodes that do not have an input
88 | connection.
89 |
90 | """
91 |
92 | return [node.node_no for node in self.nodes
93 | if not node.input_connections]
94 |
95 | def values(self):
96 | """
97 | This function returns the values for each node as a list.
98 |
99 | """
100 |
101 | return [node.get_value() for node in self.nodes]
102 |
103 | def activations(self):
104 | """
105 | This function returns the activation values for each node as a list.
106 |
107 | """
108 |
109 | return [node.activate() for node in self.nodes]
110 |
111 | def set_activation_type(self, activation_type):
112 | """
113 | This function is a mechanism for setting the activation type
114 | for an entire layer. If most nodes need to one specific type,
115 | this function can be used, then set whatever nodes individually
116 | after this use.
117 |
118 | """
119 |
120 | for node in self.nodes:
121 | if node.node_type != 'bias':
122 | node.set_activation_type(activation_type)
123 |
124 | def add_nodes(self, number_nodes, node_type, activation_type=None):
125 | """
126 | This function adds nodes in bulk for initialization.
127 |
128 | If an optional activation type is passed through, that will be set for
129 | the nodes. Otherwise, the default activation type for the layer will
130 | be used.
131 |
132 | """
133 |
134 | count = 0
135 | while count < number_nodes:
136 | if node_type == 'copy':
137 | node = CopyNode()
138 | else:
139 | node = Node(node_type)
140 |
141 | if activation_type:
142 | node.set_activation_type(activation_type)
143 |
144 | self.add_node(node)
145 | count += 1
146 |
147 | def add_node(self, node):
148 | """
149 | This function adds a node that has already been formed. Since it can
150 | originate outside of the initialization process, the activation type is
151 | assumed to be set appropriately already.
152 |
153 | """
154 |
155 | node.node_no = self.total_nodes()
156 | if node.node_type != 'bias':
157 | if not node.get_activation_type():
158 | node.set_activation_type(self.default_activation_type)
159 | node.layer = self
160 | self.nodes.append(node)
161 |
162 | def get_node(self, node_no):
163 | """
164 | This function returns the node associated with the node_no.
165 | Although it would seem to be reasonable to look it up by
166 | position within the node list, because sparse nodes are supported,
167 | there might be a mis-match between node_no and position within the
168 | list.
169 |
170 | """
171 |
172 | for node in self.nodes:
173 | if node.node_no == node_no:
174 | return node
175 |
176 | return False
177 |
178 | def get_nodes(self, node_type=None):
179 | """
180 | This function returns all the nodes of a layer. Optionally it can
181 | return all of the nodes of a particular type, such as 'copy'.
182 |
183 | """
184 |
185 | if node_type is None:
186 | return [node for node in self.nodes]
187 | else:
188 | return [node for node in self.nodes if node.node_type == node_type]
189 |
190 | def connect_layer(self, lower_layer):
191 | """
192 | This function accepts a lower layer within a network and for each node
193 | in that layer connects the node to nodes in the current layer.
194 |
195 | An exception is made for bias nodes. There is no reason to
196 | connect a bias node to a lower layer, since it always produces a 1.0
197 | for its value and activation.
198 |
199 | """
200 |
201 | for node in self.nodes:
202 | if node.node_type != 'bias':
203 | for lower_node in lower_layer.nodes:
204 | conn = Connection(lower_node, node)
205 | node.add_input_connection(conn)
206 |
207 | def load_inputs(self, inputs):
208 | """
209 | This takes a list of inputs that applied sequentially to
210 | each node in the input_layer
211 |
212 | """
213 |
214 | if self.layer_type != LAYER_TYPE_INPUT:
215 | raise ValueError("inputs are only entered into the input layer")
216 |
217 | for i in range(len(inputs)):
218 | node = self.nodes[i]
219 | if node.node_type != LAYER_TYPE_INPUT:
220 | raise ValueError(
221 | "Attempting to load an input value into a non-input node")
222 | if isinstance(inputs[i], float):
223 | node.set_value(inputs[i])
224 | else:
225 | raise ValueError(
226 | "Invalid value, most be float: %s" % (inputs[i]))
227 |
228 | def load_targets(self, targets):
229 | """
230 | This takes a list of targets that applied sequentially to
231 | each node in the output_layer
232 |
233 | """
234 |
235 | if self.layer_type != LAYER_TYPE_OUTPUT:
236 | raise ValueError(
237 | "target values are only loaded to the output layer")
238 |
239 | if len(targets) != len(self.nodes):
240 | raise ValueError(
241 | "Number of targets: %s, Number of nodes: %s""" % (
242 | (len(targets), len(self.nodes))))
243 | for i in range(self.total_nodes()):
244 | node = self.nodes[i]
245 | if isinstance(targets[i], float):
246 | node.set_value(targets[i])
247 | else:
248 | raise ValueError(
249 | "Invalid value, most be float: %s" % (targets[i]))
250 | node = self.nodes[i]
251 | node.target = targets[i]
252 |
253 | def randomize(self, random_constraint):
254 | """
255 | This function builds random weights for all the input connections in
256 | the layer.
257 |
258 | """
259 |
260 | for node in self.nodes:
261 | node.randomize(random_constraint)
262 |
263 | def feed_forward(self):
264 | """
265 | This function loops through the nodes on the layer and causes each
266 | node to feedforward values from nodes below that node.
267 |
268 | """
269 |
270 | for node in self.nodes:
271 | if not isinstance(node, BiasNode):
272 | node.feed_forward()
273 |
274 | def update_error(self, halt_on_extremes):
275 | """
276 | This function loops through the nodes on the layer and causes each
277 | node to update errors as part of the back propagation process.
278 |
279 | """
280 |
281 | for node in self.nodes:
282 | node.update_error(halt_on_extremes)
283 |
284 | def adjust_weights(self, learnrate, halt_on_extremes):
285 | """
286 | This function loops through the nodes causing each node to adjust the
287 | weights as a result of errors and the learning rate.
288 |
289 | """
290 |
291 | for node in self.nodes:
292 | if node.node_type != 'bias':
293 | node.adjust_weights(learnrate, halt_on_extremes)
294 |
295 | def get_errors(self):
296 | """
297 | This function returns a list of the error with each node.
298 |
299 | """
300 |
301 | return [node.error for node in self.nodes]
302 |
303 | def get_weights(self):
304 | """
305 | This function returns a list of the weights of input connections into
306 | each node in the layer.
307 |
308 | """
309 | weights = []
310 |
311 | for node in self.nodes:
312 | for conn in node.input_connections:
313 | weights.append(conn.get_weight())
314 |
315 | return weights
316 |
--------------------------------------------------------------------------------
/pyneurgen/recurrent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2012 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see .
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This module implements various approaches to recurrence.
23 |
24 | Elman Simple Recurrent Network:
25 |
26 | * Source nodes are hidden
27 | * One level of copy nodes
28 | * Source Value is activation value
29 | * Source value replaces existing copy node value
30 | * Copy node activation is linear
31 |
32 | Jordan
33 |
34 | * Souce nodes are output nodes
35 | * One level of copy nodes
36 | * Source Value is activation value
37 | * Existing copy node value is discounted
38 | and combined with new source value
39 |
40 | NARX Non-Linear AutoRegressive with eXogenous inputs
41 |
42 | * Using the Narendra and Parthasathy variation
43 | * Source nodes can come from outputs, inputs
44 | Outputs -- multple copies or orders
45 | Inputs -- multple copies
46 | * Order == Number of copies
47 | * Copy value can be discounted
48 |
49 |
50 | """
51 |
52 | from pyneurgen.neuralnet import NeuralNet
53 | from pyneurgen.nodes import CopyNode, Connection
54 | from pyneurgen.nodes import NODE_OUTPUT, NODE_HIDDEN, NODE_INPUT
55 | from pyneurgen.nodes import NODE_BIAS, ACTIVATION_LINEAR
56 |
57 |
58 | class RecurrentConfig(object):
59 | """
60 | This is the base class for recurrent modifications. It is not intended to
61 | be used directly.
62 |
63 | """
64 |
65 | def __init__(self):
66 | """
67 | This function initializes the configuration class.
68 |
69 | """
70 |
71 | self.source_type = 'a'
72 | self.activation_type = ACTIVATION_LINEAR
73 | self.incoming_weight = 1.0
74 | self.existing_weight = 0.0
75 | self.connection_type = 'm'
76 | self.copy_levels = 1
77 | self.copy_nodes_layer = 0
78 | self.connect_nodes_layer = 1
79 |
80 | def apply_config(self, neural_net):
81 | """
82 | This function modifies the neural net that is passed in by taking the
83 | parameters that have been set in this class. By having _apply_config,
84 | subclassed versions of apply_config can take multiple passes with less
85 | code.
86 |
87 | """
88 |
89 | self._apply_config(neural_net)
90 |
91 | def _apply_config(self, neural_net):
92 | """
93 | This function actually does the work.
94 |
95 | """
96 |
97 | if not isinstance(neural_net, NeuralNet):
98 | raise ValueError("neural_net must be of the NeuralNet class.")
99 | for snode in self.get_source_nodes(neural_net):
100 | prev_copy_node = None
101 | for level in range(self.copy_levels):
102 | copy_node = CopyNode()
103 | if level == 0:
104 | copy_node.set_source_node(snode)
105 | else:
106 | copy_node.set_source_node(prev_copy_node)
107 |
108 | copy_node.source_update_config(
109 | self.source_type,
110 | self.incoming_weight,
111 | self.existing_weight)
112 |
113 | copy_node.set_activation_type(self.activation_type)
114 |
115 | if self.connection_type == 'm':
116 | self._fully_connect(
117 | copy_node,
118 | self.get_upper_nodes(neural_net))
119 | elif self.connection_type == 's':
120 | copy_node.add_input_connection(
121 | Connection(copy_node, snode))
122 | else:
123 | raise ValueError("Invalid connection_type")
124 |
125 | neural_net.layers[self.copy_nodes_layer].add_node(copy_node)
126 | prev_copy_node = copy_node
127 |
128 | @staticmethod
129 | def _fully_connect(lower_node, upper_nodes):
130 | """
131 | This function creates connections to each of the upper nodes.
132 |
133 | This is a separate function from the one in layers, because using this
134 | version does not require ALL of the nodes on a layer to be used.
135 |
136 | """
137 |
138 | for upper_node in upper_nodes:
139 | upper_node.add_input_connection(Connection(lower_node, upper_node))
140 |
141 | def get_source_nodes(self, neural_net):
142 | """
143 | This function is a stub for getting the appropriate source nodes.
144 |
145 | """
146 |
147 | return neural_net
148 |
149 | def get_upper_nodes(self, neural_net):
150 | """
151 | This function is a stub for getting the appropriate nodes to which the
152 | copy nodes will connect.
153 |
154 | """
155 | layer = neural_net.layers[self.connect_nodes_layer]
156 | return [node for node in layer.nodes
157 | if node.node_type != NODE_BIAS]
158 |
159 |
160 | class ElmanSimpleRecurrent(RecurrentConfig):
161 | """
162 | This class implements a process for converting a standard neural network
163 | into an Elman Simple Recurrent Network. The following is used to define
164 | such a configuration:
165 | Source nodes are nodes in the hidden layer.
166 | One level of copy nodes is used, in this situation referred to as
167 | context units.
168 | The source value from the hidden node is the activation value and the
169 | copy node (context) activation is linear; in other words simply a
170 | copy of the activation. The source value replaces any previous
171 | value.
172 |
173 | In the case of multiple hidden layers, this class will take the lowest
174 | hidden layer.
175 |
176 | The class defaults to context nodes being fully connected to nodes in
177 | the hidden layer.
178 |
179 | """
180 |
181 | def __init__(self):
182 | """
183 | This function initializes the weights and default connection type
184 | consistent with an Elman Network.
185 |
186 | """
187 |
188 | RecurrentConfig.__init__(self)
189 | self.source_type = 'a'
190 | self.incoming_weight = 1.0
191 | self.existing_weight = 0.0
192 | self.connection_type = 'm'
193 | self.copy_levels = 1
194 | self.copy_nodes_layer = 0
195 |
196 | def get_source_nodes(self, neural_net):
197 | """
198 | This function returns the hidden nodes from layer 1.
199 |
200 | """
201 |
202 | return neural_net.layers[1].get_nodes(NODE_HIDDEN)
203 |
204 |
205 | class JordanRecurrent(RecurrentConfig):
206 | """
207 | This class implements a process for converting a standard neural network
208 | into an Jordan style recurrent metwork. The following is used to define
209 | such a configuration:
210 |
211 | * Source nodes are nodes in the output layer.
212 | * One level of copy nodes is used, in this situation referred to as
213 | context units.
214 | * The source value from the output node is the activation value and the
215 | copy node (context) activation is linear; in other words simply a
216 | copy of the activation.
217 |
218 | * The source value is added to the slightly discounted previous copy
219 | value. So, the existing weight is some value less than 1.0 and
220 | greater than zero.
221 |
222 | * In the case of multiple hidden layers, this class will take the lowest
223 | hidden layer.
224 |
225 | * The class defaults to context nodes being fully connected to nodes in
226 | the output layer.
227 |
228 | """
229 |
230 | def __init__(self, existing_weight):
231 | """
232 | Initialization in this class means passing the weight that will be
233 | multiplied time the existing value in the copy node.
234 |
235 | """
236 | RecurrentConfig.__init__(self)
237 | self.source_type = 'a'
238 | self.incoming_weight = 1.0
239 | self.existing_weight = existing_weight
240 | self.connection_type = 'm'
241 | self.copy_levels = 1
242 | self.copy_nodes_layer = 0
243 |
244 | def get_source_nodes(self, neural_net):
245 | """
246 | This function returns the output nodes.
247 |
248 | """
249 |
250 | return neural_net.layers[-1].get_nodes(NODE_OUTPUT)
251 |
252 |
253 | class NARXRecurrent(RecurrentConfig):
254 | """
255 | This class implements a process for converting a standard neural network
256 | into a NARX (Non-Linear AutoRegressive with eXogenous inputs) recurrent
257 | network.
258 |
259 | It also contains some modifications suggested by Narendra and Parthasathy
260 | (1990).
261 |
262 | Source nodes can come from outputs and inputs. There can be multiple
263 | levels of copies (or order in this nomenclature) from either outputs or
264 | inputs.
265 |
266 | The source value can be weighted fully, or the incoming weight adjusted
267 | lower.
268 |
269 | This class applies changes to the neural network by first applying the
270 | configurations related to the output nodes and then to the input nodes.
271 |
272 | """
273 |
274 | def __init__(self, output_order, incoming_weight_from_output,
275 | input_order, incoming_weight_from_input):
276 | """
277 | This function takes:
278 | the output order, or number of copy levels of
279 | output values,
280 | the weight to apply to the incoming values from output nodes,
281 | the input order, or number of copy levels of input values,
282 | the weight to apply to the incoming values from input nodes
283 |
284 | """
285 | RecurrentConfig.__init__(self)
286 | self.existing_weight = 0.0
287 | self._node_type = None
288 |
289 | self.output_values = [output_order, incoming_weight_from_output]
290 | self.input_values = [input_order, incoming_weight_from_input]
291 |
292 | def get_source_nodes(self, neural_net):
293 | """
294 | This function returns either the output nodes or input nodes depending
295 | upon self._node_type.
296 |
297 | """
298 |
299 | if self._node_type == NODE_OUTPUT:
300 | return neural_net.layers[-1].get_nodes(self._node_type)
301 | elif self._node_type == NODE_INPUT:
302 | return neural_net.layers[0].get_nodes(self._node_type)
303 |
304 | def apply_config(self, neural_net):
305 | """
306 | This function first applies any parameters related to the output nodes
307 | and then any with the input nodes.
308 |
309 | """
310 |
311 | if self.output_values[0] > 0:
312 | self._node_type = NODE_OUTPUT
313 | self.copy_levels = self.output_values[0]
314 | self.incoming_weight = self.output_values[1]
315 |
316 | self._apply_config(neural_net)
317 |
318 | if self.input_values[0] > 0:
319 | self._node_type = NODE_INPUT
320 | self.copy_levels = self.input_values[0]
321 | self.incoming_weight = self.input_values[1]
322 |
323 | self._apply_config(neural_net)
324 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_nodes.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from pyneurgen.nodes import ProtoNode, Node, CopyNode, BiasNode, Connection
4 | from pyneurgen.nodes import sigmoid, sigmoid_derivative, tanh, tanh_derivative
5 | from pyneurgen.nodes import linear, linear_derivative
6 | from pyneurgen.nodes import NODE_OUTPUT, NODE_HIDDEN, NODE_INPUT, NODE_COPY, NODE_BIAS
7 | from pyneurgen.nodes import ACTIVATION_SIGMOID, ACTIVATION_TANH, ACTIVATION_LINEAR
8 |
9 |
10 | class ProtoNodeTest(unittest.TestCase):
11 | """
12 | Tests ProtoNode
13 |
14 | """
15 |
16 | def setUp(self):
17 |
18 | self.node = ProtoNode()
19 | self.node._activation_type = ACTIVATION_SIGMOID
20 | self.node._error_func = sigmoid_derivative
21 |
22 | def test_get_value(self):
23 |
24 | self.node._value = .4
25 | self.assertEqual(.4, self.node.get_value())
26 |
27 | def test_randomize(self):
28 |
29 | pass
30 |
31 | def test_get_activation_type(self):
32 | """
33 | This function returns the activation type of the node.
34 |
35 | """
36 | self.assertEqual(ACTIVATION_SIGMOID, self.node.get_activation_type())
37 |
38 | class NodeTest(unittest.TestCase):
39 | """
40 | Tests Node
41 |
42 | """
43 |
44 | def setUp(self):
45 |
46 | self.node = Node()
47 |
48 | node1 = Node()
49 | node2 = Node()
50 |
51 | node1._value = .2
52 | node2._value = .1
53 | node1.error = .8
54 | node2.error = .4
55 |
56 | node1.set_activation_type(ACTIVATION_SIGMOID)
57 | node2.set_activation_type(ACTIVATION_SIGMOID)
58 |
59 | self.node.add_input_connection(
60 | Connection(node1, self.node, .3))
61 | self.node.input_connections.append(
62 | Connection(node2, self.node, .7))
63 |
64 | def test_set_activation_type(self):
65 |
66 | self.node._activate = 'error'
67 | self.node._error_func = 'error'
68 | self.node._activation_type = 'error'
69 |
70 | self.node.set_activation_type(ACTIVATION_SIGMOID)
71 |
72 | self.assertEqual(sigmoid, self.node._activate)
73 | self.assertEqual(sigmoid_derivative, self.node._error_func)
74 | self.assertEqual(ACTIVATION_SIGMOID, self.node._activation_type)
75 |
76 | self.node._activate = 'error'
77 | self.node._error_func = 'error'
78 | self.node._activation_type = 'error'
79 |
80 | self.node.set_activation_type(ACTIVATION_TANH)
81 |
82 | self.assertEqual(tanh, self.node._activate)
83 | self.assertEqual(tanh_derivative, self.node._error_func)
84 | self.assertEqual(ACTIVATION_TANH, self.node._activation_type)
85 |
86 | self.node._activate = 'error'
87 | self.node._error_func = 'error'
88 | self.node._activation_type = 'error'
89 |
90 | self.node.set_activation_type(ACTIVATION_LINEAR)
91 |
92 | self.assertEqual(linear, self.node._activate)
93 | self.assertEqual(linear_derivative, self.node._error_func)
94 | self.assertEqual(ACTIVATION_LINEAR, self.node._activation_type)
95 |
96 | self.failUnlessRaises(
97 | ValueError,
98 | self.node.set_activation_type, 'error')
99 |
100 | def test_set_error_func(self):
101 |
102 | self.node._error_func = 'error'
103 | self.node._set_error_func(ACTIVATION_SIGMOID)
104 | self.assertEqual(sigmoid_derivative, self.node._error_func)
105 |
106 | self.node._error_func = 'error'
107 | self.node._set_error_func(ACTIVATION_TANH)
108 | self.assertEqual(tanh_derivative, self.node._error_func)
109 |
110 | self.node._error_func = 'error'
111 | self.node._set_error_func(ACTIVATION_LINEAR)
112 | self.assertEqual(linear_derivative, self.node._error_func)
113 |
114 | def test_set_value(self):
115 |
116 | self.node._value = .2
117 | self.node.set_value(.3)
118 | self.assertAlmostEqual(.3, self.node._value)
119 |
120 | def test_get_value(self):
121 |
122 | self.node._value = .2
123 | self.assertAlmostEqual(.2, self.node.get_value())
124 |
125 | def test_error_func(self):
126 |
127 | self.node.set_activation_type(ACTIVATION_SIGMOID)
128 | self.assertAlmostEqual(
129 | sigmoid_derivative(.2),
130 | self.node.error_func(.2))
131 |
132 | def test_feed_forward(self):
133 |
134 | self.node_value = 1000.0
135 |
136 | self.node.feed_forward()
137 |
138 | total = sigmoid(.2) * .3 + sigmoid(.1) * .7
139 |
140 | self.assertAlmostEqual(total, self.node._value)
141 |
142 | def test__init__(self):
143 |
144 | self.node = Node('test')
145 | self.assertEqual('test', self.node.node_type)
146 |
147 | def test_add_input_connection(self):
148 |
149 | connections = len(self.node.input_connections)
150 | self.node.add_input_connection(
151 | Connection(ProtoNode(), self.node))
152 |
153 | self.assertEqual(connections + 1, len(self.node.input_connections))
154 |
155 |
156 | self.failUnlessRaises(
157 | ValueError,
158 | self.node.add_input_connection, Connection(Node(), Node()))
159 |
160 | def test_update_error(self):
161 |
162 | # upper_node1.error = .8
163 | # upper_node2.error = .4
164 | # conn1 weight = .3
165 | # conn2 weight = .7
166 |
167 | self.node.node_type = NODE_OUTPUT
168 | self.node.set_activation_type(ACTIVATION_SIGMOID)
169 | halt_on_extremes = True
170 | self.node._value = .4
171 | self.node.target = .55
172 | self.node.error = 0.0
173 |
174 | self.node.update_error(halt_on_extremes)
175 |
176 | self.assertAlmostEqual(.55 - sigmoid(.4), self.node.error)
177 |
178 | #
179 | self.node.node_type = NODE_HIDDEN
180 | self.node.set_activation_type(ACTIVATION_SIGMOID)
181 | halt_on_extremes = True
182 | self.node._value = .4
183 | self.node.error = .55
184 |
185 | self.node.update_error(halt_on_extremes)
186 |
187 | self.assertAlmostEqual(
188 | .55 * sigmoid_derivative(sigmoid(.4)),
189 | self.node.error)
190 |
191 |
192 | def test__update_lower_node_errors(self):
193 |
194 | self.node.error = .55
195 | halt_on_extremes = True
196 |
197 | node1 = self.node.input_connections[0].lower_node
198 | node2 = self.node.input_connections[1].lower_node
199 |
200 | node1.error = 0.0
201 | node2.error = 0.0
202 |
203 | self.node._update_lower_node_errors(halt_on_extremes)
204 |
205 | self.assertAlmostEqual(
206 | .3 * .55,
207 | self.node.input_connections[0].lower_node.error)
208 |
209 | self.assertAlmostEqual(
210 | .7 * .55,
211 | self.node.input_connections[1].lower_node.error)
212 |
213 | def test_adjust_weights(self):
214 |
215 | learnrate = .35
216 | halt_on_extremes = True
217 | self.node.error = .9
218 | self.node.set_activation_type(ACTIVATION_SIGMOID)
219 |
220 | # adjusts incoming values
221 | conn1 = .3 + .35 * sigmoid(.2) * .9
222 | conn2 = .7 + .35 * sigmoid(.1) * .9
223 |
224 | self.node.adjust_weights(learnrate, halt_on_extremes)
225 |
226 | self.assertAlmostEqual(
227 | conn1,
228 | self.node.input_connections[0]._weight)
229 |
230 | self.assertAlmostEqual(
231 | conn2,
232 | self.node.input_connections[1]._weight)
233 |
234 | def test__adjust_weight(self):
235 |
236 | # learnrate = .20
237 | # activate_value = .25
238 | # error = .10
239 |
240 | self.assertAlmostEqual(
241 | .20 * .25 * .10,
242 | self.node._adjust_weight(.20, .25, .10))
243 |
244 |
245 | class CopyNodeTest(unittest.TestCase):
246 | """
247 | Tests CopyNode
248 |
249 | """
250 |
251 | def setUp(self):
252 | self.node = CopyNode()
253 |
254 | def test__init__(self):
255 | self.assertEqual(NODE_COPY, self.node.node_type)
256 |
257 | def test_set_source_node(self):
258 |
259 | source_node = Node()
260 | self.node.set_source_node(source_node)
261 |
262 | self.assertEqual(source_node, self.node._source_node)
263 |
264 | def test_get_source_node(self):
265 |
266 | self.node._source_node = Node()
267 | self.assertEqual(self.node._source_node, self.node.get_source_node())
268 |
269 | def test_load_source_value(self):
270 |
271 | self.node._value = .25
272 | self.node._existing_weight = .25
273 | self.node._incoming_weight = .5
274 |
275 | source_node = Node()
276 | source_node.set_value(.3)
277 | source_node.set_activation_type(ACTIVATION_SIGMOID)
278 | self.node.set_source_node(source_node)
279 |
280 | # activate
281 | self.node._source_type = 'a'
282 | self.node.load_source_value()
283 | self.assertAlmostEqual(sigmoid(.3) * .5 + .25 * .25, self.node._value)
284 |
285 | # value
286 | self.node._value = .25
287 | self.node._source_type = 'v'
288 | self.node.load_source_value()
289 | self.assertAlmostEqual(.3 * .5 + .25 * .25, self.node._value)
290 |
291 | # invalid source type
292 | self.node._source_type = 'f'
293 | self.failUnlessRaises(ValueError, self.node.load_source_value)
294 |
295 | def test_get_source_type(self):
296 | self.node._source_type = 'a'
297 | self.assertEqual('a', self.node.get_source_type())
298 |
299 | def test_get_incoming_weight(self):
300 | self.node._incoming_weight = .3
301 | self.assertAlmostEqual(.3, self.node.get_incoming_weight())
302 |
303 | def test_get_existing_weight(self):
304 | self.node._existing_weight = .3
305 | self.assertAlmostEqual(.3, self.node.get_existing_weight())
306 |
307 | def test_source_update_config(self):
308 |
309 | self.node.source_update_config('a', .3, .2)
310 | self.assertEqual('a', self.node._source_type)
311 | self.assertAlmostEqual(.3, self.node._incoming_weight)
312 | self.assertAlmostEqual(.2, self.node._existing_weight)
313 |
314 | self.failUnlessRaises(
315 | ValueError, self.node.source_update_config, 'e', .3, .2)
316 | self.failUnlessRaises(
317 | ValueError, self.node.source_update_config, 'a', 1.3, .2)
318 | self.failUnlessRaises(
319 | ValueError, self.node.source_update_config, 'a', .3, 1.2)
320 |
321 |
322 | class BiasNodeTest(unittest.TestCase):
323 | """
324 | Tests BiasNode
325 |
326 | """
327 |
328 | def setUp(self):
329 |
330 | self.node = BiasNode()
331 |
332 | def test__init__(self):
333 |
334 | self.assertEqual(NODE_BIAS, self.node.node_type)
335 | self.assertEqual(1.0, self.node._value)
336 | self.assertEqual(1.0, self.node._activated)
337 |
338 | def test_activate(self):
339 |
340 | self.assertEqual(1.0, self.node.activate())
341 |
342 | def test_error_func(self):
343 |
344 | # should always be 1.0
345 | self.assertEqual(1.0, self.node.error_func(.3))
346 |
347 |
348 | class ConnectionTest(unittest.TestCase):
349 | """
350 | This class tests the Connection class
351 |
352 | """
353 |
354 | def setUp(self):
355 |
356 | self.upper_node = Node()
357 | self.lower_node = Node()
358 |
359 | self.upper_node._value = .2
360 | self.lower_node._value = .1
361 | self.upper_node.error = .8
362 |
363 | self.conn = Connection(self.lower_node, self.upper_node)
364 |
365 | def test_set_weight(self):
366 | self.conn.set_weight(.3)
367 | self.assertAlmostEqual(.3, self.conn._weight)
368 |
369 | def test_add_weight(self):
370 | self.conn.set_weight(.3)
371 | self.conn.add_weight(.3)
372 | self.assertAlmostEqual(.6, self.conn._weight)
373 |
374 | def test_get_weight(self):
375 | self.conn.set_weight(.3)
376 | self.assertAlmostEqual(.3, self.conn.get_weight())
377 |
378 |
379 | ## remaining tests
380 | #def test sigmoid(value):
381 |
382 | #pass
383 |
384 | #def sigmoid_derivative(value):
385 |
386 | #pass
387 |
388 | #def tanh(value):
389 |
390 | #pass
391 |
392 | #def tanh_derivative(value):
393 |
394 | #pass
395 |
396 | #def linear(value):
397 |
398 | #pass
399 |
400 | #def linear_derivative(value):
401 |
402 | #pass
403 |
404 | #nodesTestSuite = unittest.TestSuite()
405 | #nodesTestSuite.addTest(ProtoNodeTest('proto_node_test'))
406 | #nodesTestSuite.addTest(NodeTest('node_test'))
407 | #nodesTestSuite.addTest(BiasNodeTest('bias_node_test'))
408 | #nodesTestSuite.addTest(ConnectionTest('connection_test'))
409 |
410 |
411 |
412 |
413 | if __name__ == '__main__':
414 | unittest.main()
415 |
--------------------------------------------------------------------------------
/pyneurgen/nodes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2012 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see .
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This module implements the nodes for an artficial neural network.
23 |
24 | """
25 | import math
26 | from pyneurgen.utilities import rand_weight
27 |
28 | ACTIVATION_SIGMOID = 'sigmoid'
29 | ACTIVATION_TANH = 'tanh'
30 | ACTIVATION_LINEAR = 'linear'
31 |
32 | RANDOM_CONSTRAINT = 1.0
33 |
34 | NODE_OUTPUT = 'output'
35 | NODE_HIDDEN = 'hidden'
36 | NODE_INPUT = 'input'
37 | NODE_COPY = 'copy'
38 | NODE_BIAS = 'bias'
39 |
40 |
41 | class ProtoNode(object):
42 | """
43 | This class is the prototype for nodes. Nodes are the holder of values,
44 | they activate and they maintain connnections to other nodes.
45 |
46 | """
47 |
48 | def __init__(self):
49 | """
50 | This function initializes the internal values of the node. Since
51 | the class is a prototype, much of this is overridden with the actual
52 | classes.
53 | """
54 |
55 | self.node_no = None
56 | self.node_type = None
57 | self._value = 0.0
58 | self.input_connections = []
59 | self._activation_type = None
60 | self.error = 0.0
61 | self.target = None
62 |
63 | def get_value(self):
64 | """
65 | This function returns the value of the node. This is the value prior
66 | to activation.
67 |
68 | """
69 |
70 | return self._value
71 |
72 | @staticmethod
73 | def _activate(value):
74 | """
75 | This is a stub function. Activations will vary by node.
76 |
77 | """
78 |
79 | return value
80 |
81 | @staticmethod
82 | def _error_func(value):
83 | """
84 | This is a stub function.
85 |
86 | """
87 |
88 | return value
89 |
90 | def activate(self):
91 | """
92 | This function applies the activation function to the value of the node.
93 |
94 | """
95 |
96 | return self._activate(self._value)
97 |
98 | def error_func(self, value):
99 | """
100 | This function computes the error function, typically the derivative of
101 | the error.
102 |
103 | """
104 |
105 | return self._error_func(value)
106 |
107 | def randomize(self, random_constraint=RANDOM_CONSTRAINT):
108 | """
109 | This function assigns a random value to the input connections.
110 | The random constraint limits the scope of random variables.
111 |
112 | """
113 |
114 | for conn in self.input_connections:
115 | conn.set_weight(rand_weight(random_constraint))
116 |
117 | def get_activation_type(self):
118 | """
119 | This function returns the activation type of the node.
120 |
121 | """
122 |
123 | return self._activation_type
124 |
125 | def update_error(self, halt_on_extremes):
126 | """
127 | This function updates the error of the node from upstream errors.
128 |
129 | Depending upon halting on extremes, it also may adjust or halt if
130 | overflows occur.
131 |
132 | Finally, it computes the derivative of the activation type, and
133 | modifies the error.
134 |
135 | """
136 | if self.node_type == NODE_OUTPUT:
137 | self.error = self.target - self.activate()
138 | else:
139 | # Other than output layer, will have accumulated errors from
140 | # above
141 | self.error *= self.error_func(self.activate())
142 | if halt_on_extremes:
143 | if math.isnan(self.error):
144 | raise ValueError("Error term has become Nan.")
145 |
146 | self._update_lower_node_errors(halt_on_extremes)
147 |
148 | def _update_lower_node_errors(self, halt_on_extremes):
149 | """
150 | This function goes through each of the input connections to the node
151 | and updates the lower nodes.
152 |
153 | The error from the current node is multiplied times the connection
154 | weight, inspected for bounds limits and posted in the lower node's
155 | error.
156 |
157 | """
158 |
159 | for conn in self.input_connections:
160 | conn.lower_node.error += conn.get_weight() * self.error
161 | if halt_on_extremes:
162 | if math.isnan(conn.lower_node.error):
163 | raise ValueError("Error term has become Nan.")
164 |
165 |
166 | class Node(ProtoNode):
167 | """
168 | This class implements normal nodes used in the network. The node type is
169 | specified, and must be in [ACTIVATION_SIGMOID, ACTIVATION_TANH,
170 | ACTIVATION_LINEAR].
171 |
172 | """
173 |
174 | def __init__(self, node_type=None):
175 | """
176 | This function initializes the node type.
177 | """
178 | ProtoNode.__init__(self)
179 | self.node_type = node_type
180 | self._error_func = None
181 |
182 | def set_activation_type(self, activation_type):
183 | """
184 | This function sets the activation type for the node. Currently
185 | available values are ACTIVATION_SIGMOID, ACTIVATION_TANH,
186 | ACTIVATION_LINEAR. When specifying the activation type, the
187 | corresponding derivative type for the error functions are assigned as
188 | well.
189 |
190 | """
191 |
192 | if activation_type == ACTIVATION_SIGMOID:
193 | self._activate = sigmoid
194 | elif activation_type == ACTIVATION_TANH:
195 | self._activate = tanh
196 | elif activation_type == ACTIVATION_LINEAR:
197 | self._activate = linear
198 | else:
199 | raise ValueError("invalid activation type: %s" % (activation_type))
200 |
201 | self._set_error_func(activation_type)
202 | self._activation_type = activation_type
203 |
204 | def _set_error_func(self, activation_type):
205 | """
206 | This function sets the error function type.
207 |
208 | """
209 |
210 | if activation_type == ACTIVATION_SIGMOID:
211 | self._error_func = sigmoid_derivative
212 | elif activation_type == ACTIVATION_TANH:
213 | self._error_func = tanh_derivative
214 | elif activation_type == ACTIVATION_LINEAR:
215 | self._error_func = linear_derivative
216 | else:
217 | raise ValueError("Invalid activation function")
218 |
219 | def set_value(self, value):
220 | """
221 | Set value used to avoid the accidental use of setting a value on a
222 | bias node. The bias node value is always 1.0.
223 |
224 | """
225 |
226 | self._value = value
227 |
228 | def get_value(self):
229 | """
230 | This function returns the internal value of the node.
231 |
232 | """
233 |
234 | return self._value
235 |
236 | def feed_forward(self):
237 | """
238 | This function walks the input connections, summing gets the lower node
239 | activation values times the connection weight. Then, node is
240 | activated.
241 |
242 | """
243 |
244 | sum1 = 0.0
245 | for conn in self.input_connections:
246 | if conn.lower_node.get_value() is None:
247 | raise ValueError("Uninitialized node %s" % (
248 | conn.lower_node.node_no))
249 |
250 | sum1 += conn.lower_node.activate() * conn.get_weight()
251 |
252 | self.set_value(sum1)
253 |
254 | def add_input_connection(self, conn):
255 | """
256 | This function adds an input connection. This is defined as a
257 | connection that comes from a layer on the input side, or in this
258 | applicaion, a lower number layer.
259 |
260 | The reason that there is a specific function rather than using just an
261 | append is to avoid accidentally adding an input connection to a bias
262 | node.
263 |
264 | """
265 | if conn.upper_node == self:
266 | self.input_connections.append(conn)
267 | else:
268 | raise ValueError("The upper node is always current node.")
269 |
270 | def adjust_weights(self, learnrate, halt_on_extremes):
271 | """
272 | This function adjusts incoming weights as part of the back propagation
273 | process, taking into account the node error. The learnrate moderates
274 | the degree of change applied to the weight from the errors.
275 |
276 | """
277 |
278 | for conn in self.input_connections:
279 | conn.add_weight(self._adjust_weight(
280 | learnrate,
281 | conn.lower_node.activate(),
282 | self.error))
283 | # Fix this
284 | conn.weight_adjusted = True
285 | weight = conn.get_weight()
286 | if halt_on_extremes:
287 | if math.isnan(weight):
288 | raise ValueError("Weight term has become Nan.")
289 |
290 | conn.set_weight(weight)
291 |
292 | @staticmethod
293 | def _adjust_weight(learnrate, activate_value, error):
294 | """
295 | This function accepts the learn rate, the activated value received
296 | from a node connected from below, and the current error of the node.
297 |
298 | It then multiplies those altogether, which is an adjustment to the
299 | weight of the connection as a result of the error.
300 |
301 | """
302 |
303 | return learnrate * activate_value * error
304 |
305 |
306 | class CopyNode(Node):
307 | """
308 | This class maintains the form used for copy nodes in recurrent networks.
309 | The copy nodes are used after propagation. The values from nodes in upper
310 | layers, such as the hidden nodes are copied to the CopyNode. The
311 | source_node defines the node from where the value arrives.
312 |
313 | An issue with using copy nodes, is that you must be careful to
314 | adhere to a sequence when using the nodes. For example, if a copy node
315 | value is a source to another copy node, you will want to copy the values
316 | from downstream nodes first.
317 |
318 | """
319 |
320 | def __init__(self):
321 | """
322 | This function initializes the node and sets up initial values for
323 | weights copied to it.
324 | """
325 |
326 | Node.__init__(self)
327 | self.node_type = NODE_COPY
328 | self._source_node = None
329 | self._source_type = None
330 | self._incoming_weight = 1.0
331 | self._existing_weight = 0.0
332 |
333 | self.set_activation_type(ACTIVATION_LINEAR)
334 |
335 | def set_source_node(self, node):
336 | """
337 | Sets the source of previous recurrent values.
338 |
339 | """
340 |
341 | self._source_node = node
342 |
343 | def get_source_node(self):
344 | """
345 | Gets the source of previous recurrent values.
346 |
347 | """
348 |
349 | return self._source_node
350 |
351 | def load_source_value(self):
352 | """
353 | This function transfers the source node value to the copy node value.
354 |
355 | """
356 | if self._source_type == 'a':
357 | value = self._source_node.activate()
358 | elif self._source_type == 'v':
359 | value = self._source_node.get_value()
360 | else:
361 | raise ValueError("Invalid source type")
362 |
363 | self._value = self._value * self._existing_weight + \
364 | value * self._incoming_weight
365 |
366 | def get_source_type(self):
367 | """
368 | This function gets the type of source value to use.
369 |
370 | Source type will be either 'a' for the activation value or 'v' for the
371 | summed input value.
372 |
373 | """
374 |
375 | return self._source_type
376 |
377 | def get_incoming_weight(self):
378 | """
379 | This function gets the value that will be multiplied times the
380 | incoming source value.
381 |
382 | """
383 |
384 | return self._incoming_weight
385 |
386 | def get_existing_weight(self):
387 | """
388 | This function gets the value that will be multiplied times the
389 | existing value.
390 |
391 | """
392 |
393 | return self._existing_weight
394 |
395 | def source_update_config(self, source_type, incoming_weight,
396 | existing_weight):
397 | """
398 | This function accepts parameters governing what the source information
399 | is used, and how the incoming and existing values are discounted.
400 |
401 | Source type can be either 'a' for the activation value or 'v' for the
402 | summed input value.
403 |
404 | By setting the existing weight to zero, and the incoming discount to
405 | 1.0. An Elman style update takes place.
406 |
407 | By setting the existing weight to some fraction of 1.0 such as .5, a
408 | Jordan style update can take place.
409 |
410 | """
411 |
412 | if source_type in ['a', 'v']:
413 | self._source_type = source_type
414 | else:
415 | raise ValueError(
416 | "Invalid source type, %s. Valid choices are 'a' or 'v'")
417 |
418 | errmsg = """The incoming weight, %s must be a float value
419 | from 0.0 to 1.0""" % (incoming_weight)
420 | if not isinstance(incoming_weight, float):
421 | raise ValueError(errmsg)
422 | if not (0.0 <= incoming_weight <= 1.0):
423 | raise ValueError(errmsg)
424 |
425 | self._incoming_weight = incoming_weight
426 |
427 | errmsg = """The existing_weight, %s must be a float value
428 | from 0.0 to 1.0""" % (existing_weight)
429 | if not isinstance(existing_weight, float):
430 | raise ValueError(errmsg)
431 | if not (0.0 <= existing_weight <= 1.0):
432 | raise ValueError(errmsg)
433 |
434 | self._existing_weight = existing_weight
435 |
436 |
437 | class BiasNode(ProtoNode):
438 | """
439 | Bias nodes provide value because of their connections, and their value and
440 | activation is always 1.0.
441 |
442 | """
443 |
444 | def __init__(self):
445 | """
446 | This function initializes the node, sets the type, and sets the return
447 | value to always 1.0.
448 | """
449 |
450 | ProtoNode.__init__(self)
451 | self.node_type = NODE_BIAS
452 | # value is always 1.0, it's the connections that matter
453 | self._value = 1.0
454 | self._activated = self._value
455 |
456 | @staticmethod
457 | def activate(value=None):
458 | """
459 | The activation of the bias node is always 1.0.
460 |
461 | """
462 |
463 | return 1.0
464 |
465 | @staticmethod
466 | def error_func(value=1.0):
467 | """
468 | The activation of the bias node is always 1.0.
469 |
470 | """
471 |
472 | value = 1.0
473 | return value
474 |
475 |
476 | class Connection(object):
477 | """
478 | Connection object that holds the weighting information between nodes
479 | as well as a reference to the nodes that are connected.
480 |
481 | """
482 |
483 | def __init__(self, lower_node, upper_node, weight=0.0):
484 | """
485 | The lower_node lives on a lower layer, closer to the input layer.
486 | The upper mode lives on a higher layer, closer to the output layer.
487 |
488 | """
489 |
490 | self.lower_node = lower_node
491 | self.upper_node = upper_node
492 | self._weight = None
493 | self.set_weight(weight)
494 |
495 | def set_weight(self, weight):
496 | """
497 | This function sets the weight of the connection, which relates to
498 | the impact that a lower node's activation will have on an upper node's
499 | value.
500 |
501 | """
502 |
503 | err_msg = "The weight, %s, must be a float value" % (weight)
504 | if not isinstance(weight, float):
505 | raise ValueError(err_msg)
506 | else:
507 | self._weight = weight
508 |
509 | def add_weight(self, weight):
510 | """
511 | This function adds to the weight of the connection, which is
512 | proportional to the impact that a lower node's activation will
513 | have on an upper node's value.
514 |
515 | """
516 |
517 | err_msg = "The weight, %s, must be a float value" % (weight)
518 | if not isinstance(weight, float):
519 | raise ValueError(err_msg)
520 | else:
521 | self._weight += weight
522 |
523 | def get_weight(self):
524 | """
525 | This function sets the weight of the connection, which is relates to
526 | the impact that a lower node's activation will have on an upper node's
527 | value.
528 |
529 | """
530 |
531 | return self._weight
532 |
533 |
534 | def sigmoid(value):
535 | """
536 | Calculates the sigmoid .
537 |
538 | """
539 |
540 | try:
541 | value = 1.0 / (1.0 + math.exp(-value))
542 | except OverflowError:
543 | value = 0.0
544 |
545 | return value
546 |
547 |
548 | def sigmoid_derivative(value):
549 | """
550 | Calculates the derivative of the sigmoid for the value.
551 |
552 | """
553 |
554 | return value * (1.0 - value)
555 |
556 |
557 | def tanh(value):
558 | """
559 | This function calculates the hyperbolic tangent function.
560 |
561 | """
562 |
563 | return math.tanh(value)
564 |
565 |
566 | def tanh_derivative(value):
567 | """
568 | This function calculates the tanh derivative of the value.
569 |
570 | """
571 |
572 | return 1.0 - pow(math.tanh(value), 2)
573 |
574 |
575 | def linear(value):
576 | """
577 | This function simply returns the value given to it.
578 |
579 | """
580 |
581 | return value
582 |
583 |
584 | def linear_derivative(value):
585 | """
586 | This function returns 1.0. Normally, I would just return 1.0, but pylint
587 | was complaining.
588 |
589 | """
590 |
591 | value = 1.0
592 | return value
593 |
--------------------------------------------------------------------------------
/pyneurgen/genotypes.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright (C) 2012 Don Smiley ds@sidorof.com
4 |
5 | # This program is free software: you can redistribute it and/or modify
6 | # it under the terms of the GNU General Public License as published by
7 | # the Free Software Foundation, either version 3 of the License, or
8 | # (at your option) any later version.
9 |
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 |
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see .
17 |
18 | # See the LICENSE file included in this archive
19 | #
20 |
21 | """
22 | This module implements genotypes for grammatical evolution.
23 |
24 | """
25 | from datetime import datetime
26 | import logging
27 | import random
28 | import re
29 | import traceback
30 |
31 | from pyneurgen.utilities import base10tobase2, base2tobase10
32 |
33 | STOPLIST = ['runtime_resolve', 'set_bnf_variable']
34 | VARIABLE_FORMAT = '(\<([^\>|^\s]+)\>)'
35 | MUT_TYPE_M = 'm'
36 | MUT_TYPE_S = 's'
37 | BNF_PROGRAM = 'program'
38 |
39 | # Positions in _timeouts
40 | TIMEOUT_PROG_BUILD = 0
41 | TIMEOUT_PROG_EXECUTE = 1
42 |
43 | DEFAULT_LOG_FILE = 'pyneurgen.log'
44 | DEFAULT_LOG_LEVEL = logging.INFO
45 |
46 | logging.basicConfig(format='%(asctime)s %(message)s',
47 | filename=DEFAULT_LOG_FILE,
48 | level=DEFAULT_LOG_LEVEL)
49 |
50 |
51 | class Genotype(object):
52 | """
53 | The Genotype class holds the genetic material. It has the ability to run
54 | fitness functions and mutate. It is an internal object, and so few aspects
55 | of it would be regarded as public.
56 |
57 | The class takes many properties from the grammatical evolution class, and
58 | so in some ways it might seem to be unnecessarily duplicative. The reason
59 | for doing it this way is to make each genotype relatively complete on its
60 | own. That way, if the genotype is packed up and marshalled off to a remote
61 | device for processing, everything is there to handle the tasks.
62 |
63 | """
64 |
65 | def __init__(self, start_gene_length,
66 | max_gene_length,
67 | member_no):
68 | """
69 | This function initiates the genotype. It must open with the starting
70 | gene length and the maximum gene length. These lengths are the decimal
71 | lengths not the binary lengths. In addition, the member number is
72 | needed, since the genotype creation process is controlled by the
73 | grammatic evolution class.
74 |
75 | """
76 |
77 | self.member_no = member_no
78 | self.local_bnf = {}
79 | self._max_program_length = None
80 | self._fitness = None
81 | self._fitness_fail = None
82 | self._wrap = True
83 | self._extend_genotype = True
84 | self.starttime = None
85 | self._timeouts = (0, 0)
86 |
87 | self._gene_length = start_gene_length
88 | self._max_gene_length = max_gene_length
89 |
90 | self.binary_gene = None
91 | self.decimal_gene = None
92 | self._generate_binary_gene(self._gene_length)
93 | self.generate_decimal_gene()
94 |
95 | self._position = (0, 0)
96 |
97 | self.errors = []
98 |
99 | def _generate_binary_gene(self, length):
100 | """
101 | This function creates a random set of bits.
102 |
103 | """
104 |
105 | geno = []
106 | count = 0
107 | while count < length * 8:
108 | geno.append(str(random.randint(0, 1)))
109 | count += 1
110 | self.binary_gene = ''.join(geno)
111 |
112 | def set_binary_gene(self, binary_gene):
113 | """
114 | This function sets the value of the binary gene directly. This is
115 | used in the crossover and mutation functions. There is an automatic
116 | adjustment to trim the length to a multiple of 8.
117 |
118 | """
119 |
120 | length = len(binary_gene)
121 | trunc_binary_gene = binary_gene[:length - (length % 8)]
122 | self.binary_gene = trunc_binary_gene
123 | self._gene_length = len(self.binary_gene) / 8
124 |
125 | def generate_decimal_gene(self):
126 | """
127 | This function converts the binary gene to a usable decimal gene.
128 |
129 | """
130 |
131 | if self._gene_length == 0:
132 | raise ValueError("Invalid gene length")
133 | dec_geno = []
134 |
135 | for i in range(0, int(self._gene_length * 8), 8):
136 | item = self.binary_gene[i:i + 8]
137 | str_trans = base2tobase10(item)
138 | dec_geno.append(int(str_trans))
139 |
140 | self.decimal_gene = dec_geno
141 | self._position = (0, 0)
142 |
143 | @staticmethod
144 | def _dec2bin_gene(dec_gene):
145 | """
146 | This is a utility function that converts a decimal list to binary
147 | string.
148 |
149 | """
150 |
151 | bin_gene = []
152 | for item in dec_gene:
153 | bin_gene.append(base10tobase2(item, zfill=8))
154 | return ''.join(bin_gene)
155 |
156 | @staticmethod
157 | def _place_material(program, item, start_pos, end_pos):
158 | """
159 | This is a utility function that replaces a part of a string in a
160 | specific location.
161 |
162 | """
163 |
164 | if end_pos > len(program) - 1:
165 | raise ValueError("end_pos greater than len(program)")
166 | if start_pos < 0:
167 | raise ValueError("starting position cannot be less than 0")
168 | if start_pos > end_pos:
169 | raise ValueError("starting position > end postion")
170 | if start_pos == 0:
171 | if end_pos == len(program) - 1:
172 | program = item
173 | else:
174 | program = item + program[end_pos + 1:]
175 | else:
176 | if end_pos == len(program) - 1:
177 | program = program[:start_pos] + item
178 | else:
179 | program = program[:start_pos] + item + \
180 | program[end_pos:]
181 | return program
182 |
183 | def runtime_resolve(self, item, return_type):
184 | """
185 | This function is callable by the generated program to enable
186 | additional values be pulled from genotype and BNF as the need arises
187 | during execution of the program.
188 |
189 | Usage is self.runtime_resolve('', return_type);
190 |
191 | The return type casts the result back to the format needed. Supported
192 | return types are: 'int', 'float', 'str', and 'bool'.
193 |
194 | """
195 |
196 | value = self._map_variables(item, False)
197 | value = self._fmt_resolved_vars(value, return_type)
198 | return value
199 |
200 | @staticmethod
201 | def _fmt_resolved_vars(value, return_type):
202 | """
203 | This method formats the result for a resolved variable for use
204 | during runtime so that the information can fit into the context of what
205 | is running.
206 |
207 | Note that if the execute code was to be subclassed to a parser to avoid
208 | the use of exec, then this funtion should also be done as well, since
209 | it uses eval.
210 |
211 | """
212 |
213 | return_types = ['int', 'float', 'str', 'bool']
214 |
215 | if return_type == 'str':
216 | return value
217 | elif return_type == 'int':
218 | return conv_int(value)
219 | elif return_type == 'float':
220 | try:
221 | value = float(value)
222 | except:
223 | # allow normal error message to bubble up
224 | value = eval(value)
225 | elif return_type == 'bool':
226 | if value in 'True':
227 | value = True
228 | elif value == 'False':
229 | value = False
230 | else:
231 | msg = "return_type must be either True or False: %s"
232 | raise ValueError(msg, value)
233 | else:
234 | msg = "return_type, %s must be in %s" % (value, return_types)
235 | raise ValueError(msg)
236 |
237 | return value
238 |
239 | def set_bnf_variable(self, variable_name, value):
240 | """
241 | This function adds a variable to the bnf. The format is the name,
242 | typically bounded by <>, such as "", and the parameters
243 | are in the form of a list. The items in the list will be converted to
244 | strings, if not already.
245 |
246 | """
247 |
248 | if isinstance(value, list):
249 | self.local_bnf[variable_name] = value
250 | else:
251 | self.local_bnf[variable_name] = [str(value)]
252 |
253 | def resolve_variable(self, variable):
254 | """
255 | This function receives a variable and using the variable as a key
256 | looks it up in the local_bnf. The list of possible values available
257 | are then used by the genotype via a codon to select a final value that
258 | would be used.
259 |
260 | """
261 |
262 | values = self.local_bnf[variable]
263 | #try:
264 | value = self._select_choice(self._get_codon(), values)
265 | #except:
266 | #raise ValueError("""
267 | #Failure to resolve variable: %s values: %s
268 | #""" % (variable, values))
269 |
270 | return str(value)
271 |
272 | def _map_variables(self, program, check_stoplist):
273 | """
274 | This function looks for a variable in the form of . If
275 | check_stoplist is True, then there will be a check to determine if it
276 | is a run-time variable, and therefor will be resolved later.
277 |
278 | This process runs until all of the variables have been satisfied, or a
279 | time limit on the process has been reached.
280 |
281 | """
282 |
283 | def on_stoplist(item):
284 | """
285 | Checks the stop list for runtime variables
286 |
287 | """
288 |
289 | status = False
290 | for stopitem in STOPLIST:
291 | if item.find(stopitem) > -1:
292 | status = True
293 |
294 | return status
295 |
296 | self.errors = []
297 | incomplete = True
298 | prg_list = re.split(VARIABLE_FORMAT, program)
299 | while incomplete:
300 | position = 0
301 | continue_map = False
302 | while position < len(prg_list):
303 | item = prg_list[position]
304 | if item.strip() == '':
305 | del(prg_list[position])
306 | else:
307 | if item[0] == "<" and item[-1] == ">":
308 | # check stop list
309 | status = True
310 | if check_stoplist and position > 0:
311 | if on_stoplist(prg_list[position - 1]):
312 | status = False
313 | if status:
314 | prg_list[position] = self.resolve_variable(item)
315 | continue_map = True
316 |
317 | del(prg_list[position + 1])
318 | position += 1
319 |
320 | program = ''.join(prg_list)
321 | prg_list = re.split(VARIABLE_FORMAT, program)
322 | elapsed = datetime.now() - self.starttime
323 |
324 | # Reasons to fail the process
325 | if check_stoplist:
326 | # Program already running
327 | if elapsed.seconds > self._timeouts[TIMEOUT_PROG_EXECUTE]:
328 | msg = "elapsed time greater than program timeout"
329 | logging.debug(msg)
330 | self.errors.append(msg)
331 | raise StandardError(msg)
332 | #continue_map = False
333 | else:
334 | # Preprogram
335 | if elapsed.seconds > self._timeouts[TIMEOUT_PROG_BUILD]:
336 | msg = "elapsed time greater than preprogram timeout"
337 | logging.debug(msg)
338 | self.errors.append(msg)
339 | raise StandardError(msg)
340 | #continue_map = False
341 |
342 | if len(program) > self._max_program_length:
343 | # Runaway process
344 | msg = "program length, %s is beyond max program length: %s" % (
345 | len(program), self._max_program_length)
346 | logging.debug(msg)
347 | logging.debug("program follows:")
348 | #logging.debug(program)
349 | self.errors.append(msg)
350 | raise StandardError(msg)
351 | #continue_map = False
352 |
353 | if continue_map is False:
354 | return program
355 |
356 | def _get_codon(self):
357 | """
358 | This function gets the next decimal codon from the genotype.
359 |
360 | There are two counters for this function. One pointer is used to
361 | indicate the next location of the decimal code that is to be returned.
362 | The other pointer is the index of the codon that has been drawn
363 | regardless if process has wrapped around.
364 |
365 | If the end of the genotype is reached, and the wrap flag is True, then
366 | the position for the next codon is taken from the front again.
367 | Additionally, if wrapping has taken place and the extend_genotype flag
368 | is set, then the genotype will continue to grow in length until the
369 | max_gene_length is reached.
370 |
371 | If the wrap flag is not set, when the end of the genotype is
372 | reached, an error is raised.
373 |
374 | At the start of this function, the position has been already
375 | incremented to get the codon. Therefore, the position has to be
376 | checked to determine whether it is pointing past the end of of the
377 | maximum length of the gene. If it is, then the position is just
378 | reset back to the starting position.
379 |
380 | """
381 |
382 | # position is the location on the gene, sequence_no is the number of
383 | # codons used since the start
384 | position, sequence_no = self._position
385 | length = len(self.decimal_gene)
386 | wrap = self._wrap
387 |
388 | status = True
389 | while status:
390 | if not wrap:
391 | if sequence_no == self._max_gene_length:
392 | raise ValueError("Max length of genotype reached.")
393 | codon = self.decimal_gene[position]
394 | if self._extend_genotype:
395 | if sequence_no == length:
396 | # modify var directly
397 | self.decimal_gene.append(codon)
398 | self._gene_length = len(self.decimal_gene)
399 |
400 | position += 1
401 | sequence_no += 1
402 | if position == length:
403 | if wrap:
404 | position = 0
405 |
406 | self._position = (position, sequence_no)
407 | return codon
408 |
409 | def _reset_gene_position(self):
410 | """
411 | This function resets the next decimal gene that will be selected back
412 | to 0. The point of this is when reusing a gene that is already
413 | instantiated, you can regenerate your program using exactly the same
414 | characteristics as before.
415 |
416 | """
417 |
418 | self._position = (0, 0)
419 |
420 | def _update_genotype(self):
421 | """
422 | This function updates the binary genotype from the decimal gene if the
423 | genotype is extended.
424 |
425 | """
426 |
427 | self.set_binary_gene(self._dec2bin_gene(self.decimal_gene))
428 |
429 | def compute_fitness(self):
430 | """
431 | This function computes the fitness function. The process consists
432 | mapping the codon to the program variables and running the resulting
433 | program and computing the fitness value. In addition, the binary gene
434 | is updated if the decimal gene has been extended.
435 |
436 | """
437 |
438 | self._reset_gene_position()
439 | self._map_gene()
440 | if self._extend_genotype:
441 | logging.debug("updating genotype...")
442 | self._update_genotype()
443 | logging.debug("Finished updating genotype...")
444 |
445 | return self._fitness
446 |
447 | def _map_gene(self):
448 | """
449 | This function applies the genotype information to build a program.
450 | Mapping the variables into the search space is an initial load, and can
451 | also iteratively accept values as the program that has been created
452 | executes via the runtime_resolve function.
453 |
454 | If for any reason the mapping process fails to create a viable
455 | program, or it takes too long, then the process is aborted and the
456 | fitness_fail value is applied instead.
457 |
458 | This function uses the print command to show the program that has been
459 | generated as well as print the fitness value. It is expected that this
460 | will be converted to a more useful logging system at some point.
461 |
462 | """
463 |
464 | self.local_bnf[''] = [str(self._fitness_fail)]
465 | try:
466 | logging.debug("==================================================")
467 | logging.debug("mapping variables to program...")
468 | self.local_bnf[BNF_PROGRAM] = [
469 | 'mapping variables into program failed']
470 | program = self._map_variables(self.local_bnf[''][0], True)
471 | logging.debug("finished mapping variables to program...")
472 | self.local_bnf[BNF_PROGRAM] = [program]
473 | #print program[program.find('def'):]
474 | logging.debug(program)
475 | self._execute_code(program)
476 | logging.debug("==================================================")
477 | except:
478 | #traceback.print_exc()
479 | #a = raw_input("waiting")
480 | logging.debug("program failed")
481 | program = self.local_bnf['program'][0]
482 | logging.debug("errors: %s", (self.errors))
483 | logging.debug(program)
484 | #logging.debug(traceback.print_exc())
485 | logging.debug(traceback.format_exc())
486 | logging.debug("end of failure report")
487 | #a = raw_input("Program failed")
488 | #if a == "stop":
489 | #raise ValueError("Program halted")
490 |
491 | self._fitness = float(self.local_bnf[''][0])
492 |
493 | def _execute_code(self, program):
494 | """
495 | This function executes code that has been generated. This function
496 | would be subclassed if executing the code on a remote server, or
497 | swapping in a custom parser.
498 |
499 | """
500 |
501 | self.local_bnf['program'] = program
502 |
503 | # I'll revisit this again sometime.
504 | #print "compiling code..."
505 | #program_comp = compile(program, '', 'exec')
506 | #print "executing code..."
507 | #exec program_comp
508 | ns = locals()
509 | exec(program) in ns
510 |
511 | def mutate(self, mutation_rate, mutation_type):
512 | """
513 | This is function randomly mutates a binary genotype by changing 1 to 0
514 | and vice versa. It is not context-perserving at this level.
515 |
516 | """
517 |
518 | if mutation_type == MUT_TYPE_S:
519 | if random.random() < mutation_rate:
520 | self._single_mutate()
521 | elif mutation_type == MUT_TYPE_M:
522 | self._multiple_mutate(mutation_rate)
523 | else:
524 | raise ValueError("The mutation type must be either '%s' or '%s'",
525 | MUT_TYPE_S, MUT_TYPE_M)
526 |
527 | def _multiple_mutate(self, mutation_rate):
528 | """
529 | This function walks the gene and based upon the mutation rate will
530 | alter a bit.
531 |
532 | """
533 |
534 | if mutation_rate < 0.0:
535 | raise ValueError("The mutation rate must be >= 0.0")
536 | elif mutation_rate > 1.0:
537 | raise ValueError("The mutation rate must be <= 1.0")
538 | else:
539 | pass
540 |
541 | gene = self.binary_gene
542 | length = len(gene)
543 | for i in range(length):
544 | if random.random() < mutation_rate:
545 | gene = self._mutate(gene, i)
546 |
547 | self.set_binary_gene(''.join(gene))
548 | self.generate_decimal_gene()
549 |
550 | def get_binary_gene_length(self):
551 | """
552 | This function returns the length of the binary gene. Which is
553 | 8 times the length of the decimal gene.
554 |
555 | """
556 |
557 | return self._gene_length * 8
558 |
559 | def _single_mutate(self):
560 | """
561 | This function with a randomly selects a mutation point within the gene
562 | and changes a 1 to 0, or vice versa.
563 |
564 | """
565 |
566 | position = random.randint(0, self._gene_length * 8 - 1)
567 | gene = self.binary_gene
568 |
569 | self.binary_gene = self._mutate(gene, position)
570 | self.generate_decimal_gene()
571 |
572 | @staticmethod
573 | def _mutate(gene, position):
574 | """
575 | This function does the actual mutation of the gene at a specific
576 | position.
577 |
578 | """
579 |
580 | if gene[position] == '0':
581 | gene = ''.join([gene[:position], "1", gene[position + 1:]])
582 | else:
583 | gene = ''.join([gene[:position], "0", gene[position + 1:]])
584 |
585 | return gene
586 |
587 | @staticmethod
588 | def _select_choice(codon, selection):
589 | """
590 | This function, based upon the codon, makes a choice from the list.
591 | The determination is based upon the module of the codon to the length
592 | of the list of choices. For example, if the codon is 10 and the list
593 | is 7 choices long, then the selection would be from selection[3]. This
594 | ensures that for every codon for every selection there is some choice.
595 |
596 | """
597 |
598 | if isinstance(selection, list):
599 | return selection[codon % len(selection)]
600 | else:
601 | msg = "selection. %s, must be a list" % (selection)
602 | raise ValueError(msg)
603 |
604 | def get_program(self):
605 | """
606 | This function returns the program that has been generated. It is only
607 | valid after the gene has been mapped.
608 |
609 | """
610 |
611 | return self.local_bnf['program']
612 |
613 | def get_preprogram(self):
614 | """
615 | This function returns the prototype program to which the variables
616 | will be applied.
617 |
618 | """
619 |
620 | return self.local_bnf['']
621 |
622 | def get_fitness(self):
623 | """
624 | This function returns the fitness value that has been created as a
625 | result of running the fitness function.
626 |
627 | """
628 |
629 | return self._fitness
630 |
631 | def get_fitness_fail(self):
632 | """
633 | This function returns the fitness value that constitutes failure as
634 | assigned by the parent grammatical evolution.
635 |
636 | """
637 |
638 | return self._fitness_fail
639 |
640 |
641 | def conv_int(str_value):
642 | """
643 | This method attempts to convert string value to an int. This function
644 | used to live inside self._fmt_resolved_vars, but was taken out because
645 | it is easier to do unit testing this way.
646 |
647 | """
648 |
649 | try:
650 | value = int(str_value)
651 | except:
652 | # Try the expensive eval -- if fails, then let error bubble up.
653 | value = eval(str_value)
654 |
655 | return value
656 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_genotypes.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from datetime import datetime, timedelta
4 |
5 | from pyneurgen.genotypes import Genotype, MUT_TYPE_M, MUT_TYPE_S, conv_int
6 |
7 |
8 | class TestGenotype(unittest.TestCase):
9 | """
10 | The Genotype class holds the genetic material. It has the ability to run
11 | fitness functions and mutate. It is an internal object, and so few aspects
12 | of it would be regarded as public.
13 |
14 | """
15 |
16 | def setUp(self):
17 |
18 | start_gene_length = 10
19 | max_gene_length = 20
20 | member_no = 1
21 |
22 | self.g = Genotype(start_gene_length, max_gene_length, member_no)
23 |
24 | def test_class_init__(self):
25 | """Testing class init"""
26 |
27 | self.assertEqual(1, self.g.member_no)
28 | self.assertEqual(10, self.g._gene_length)
29 | self.assertEqual(20, self.g._max_gene_length)
30 |
31 | self.assertEqual({}, self.g.local_bnf)
32 | self.assertEqual(None, self.g._max_program_length)
33 | self.assertEqual(None, self.g._fitness)
34 | self.assertEqual(None, self.g._fitness_fail)
35 | self.assertEqual(True, self.g._wrap)
36 | self.assertEqual(True, self.g._extend_genotype)
37 | self.assertEqual(None, self.g.starttime)
38 | self.assertEqual((0, 0), self.g._timeouts)
39 | self.assertEqual((0, 0), self.g._position)
40 | self.assertEqual(None, self.g._max_program_length)
41 | self.assertEqual(None, self.g._max_program_length)
42 | self.assertEqual([], self.g.errors)
43 |
44 | # Not tested here
45 | # self.binary_gene = None
46 | # self.decimal_gene = None
47 | # self._generate_binary_gene(self._gene_length)
48 | # self.generate_decimal_gene()
49 |
50 | def test__generate_binary_gene(self):
51 | """
52 | This function tests the generation process for a binary gene.
53 |
54 | Basically, all that is being tested on this is proper length, and
55 | whether it consists of ones or zeros. No test is made of rand_int.
56 |
57 | """
58 |
59 | self.g._generate_binary_gene(20)
60 | self.assertEqual(20 * 8, len(self.g.binary_gene))
61 |
62 | gene = self.g.binary_gene
63 | self.assertEqual(20 * 8, gene.count("0") + gene.count("1"))
64 |
65 | def test_set_binary_gene(self):
66 | """
67 | This function tests setting a binary gene.
68 |
69 | """
70 |
71 | # should truncate automatically if too long
72 | binary_gene = "0110100101"
73 | gene_result = "01101001"
74 | self.g.set_binary_gene(binary_gene)
75 | self.assertEqual(gene_result, self.g.binary_gene)
76 |
77 | # should set the gene length correctly
78 | self.assertEqual(1, self.g._gene_length)
79 |
80 | def test_generate_decimal_gene(self):
81 | """
82 | This function tests the generation of the decimal gene from a binary
83 | gene
84 |
85 | Tested:
86 | There must be decimal gene greater than length 0.
87 | The length of generated decimal gene will be 1/8 the size of the
88 | binary gene.
89 | A specific example of binary gene/decimal gene is tested.
90 | The position pointer for the next gene is reset back to (0, 0))
91 |
92 | """
93 |
94 | # There must be decimal gene greater than length 0.
95 | self.g.binary_gene = []
96 | self.assertRaises(ValueError, self.g.generate_decimal_gene)
97 |
98 | # The length of generated decimal gene will be 1/8 the size of the
99 | dec_gene = [2, 1, 3, 4]
100 |
101 | # The length of generated decimal gene will be 1/8 the size of the
102 | # binary gene.
103 | # Uses set binary gene to force the recalculation fo the gene length
104 | self.g.set_binary_gene('00000010000000010000001100000100')
105 | length = len(self.g.binary_gene)
106 | self.g.generate_decimal_gene()
107 | self.assertEqual(length / 8, len(self.g.decimal_gene))
108 |
109 |
110 | # A specific example of binary gene/decimal gene is tested.
111 | self.assertEqual(dec_gene, self.g.decimal_gene)
112 |
113 | # The position pointer for the next gene is reset back to (0, 0))
114 | self.assertEqual((0, 0), self.g._position)
115 |
116 | def test__dec2bin_gene(self):
117 | """
118 | This function tests the process of computing a binary gene from the
119 | decimal gene.
120 |
121 | """
122 |
123 | dec_gene = [2, 1, 3, 4]
124 | binary_gene = '00000010000000010000001100000100'
125 |
126 | self.assertEqual(binary_gene, self.g._dec2bin_gene(dec_gene))
127 |
128 | def test__place_material(self):
129 | """
130 | This function tests whether the process of a string replacement takes
131 | place properly.
132 |
133 | Is the start position consistent with main string?
134 | Is the end position consistent with the main string?
135 | Does the replacement actually work?
136 |
137 | """
138 |
139 | main_string = "this is a test"
140 | item = " very big"
141 |
142 | # Valid start position- too big?
143 | start_pos = 100
144 | end_pos = 10
145 | self.assertRaises(ValueError, self.g._place_material,
146 | main_string, item, start_pos, end_pos)
147 |
148 | # Valid start position- too small?
149 | start_pos = -1
150 | self.assertRaises(ValueError, self.g._place_material,
151 | main_string, item, start_pos, end_pos)
152 |
153 | # Valid end position - too big
154 | start_pos = 10
155 | end_pos = 100
156 | self.assertRaises(ValueError, self.g._place_material,
157 | main_string, item, start_pos, end_pos)
158 |
159 | # Valid end position
160 | start_pos = 10
161 | end_pos = 5
162 | self.assertRaises(ValueError, self.g._place_material,
163 | main_string, item, start_pos, end_pos)
164 |
165 | # Valid insertion
166 | start_pos = 9
167 | end_pos = 9
168 | self.assertEqual("this is a very big test",
169 | self.g._place_material(main_string, item,
170 | start_pos, end_pos))
171 |
172 | # Valid replacement
173 | start_pos = 10
174 | end_pos = 13
175 | item = "drill"
176 | self.assertEqual("this is a drill",
177 | self.g._place_material(main_string, item,
178 | start_pos, end_pos))
179 |
180 |
181 | # testing the gene bit use
182 | binary_gene = "11010111"
183 | bit = "3" # to make it really stand out
184 | start_pos = 5
185 | end_pos = 6
186 | self.assertEqual("11010311", self.g._place_material(
187 | binary_gene, bit, start_pos, end_pos))
188 |
189 | def test_runtime_resolve(self):
190 | """
191 | This function tests the process that resolves a to value
192 | that can be used in a program as it executes.
193 |
194 | Note that starttime must be initialized. This is to control for a
195 | runaway process during runtime.
196 |
197 | """
198 |
199 | self.g.set_bnf_variable("", ["this", "is", "test"])
200 | self.g.set_bnf_variable("", [1, "is", "test"])
201 | self.g.decimal_gene = [3, 2, 5, 6]
202 | self.g._max_gene_length = 4
203 | self.g._position = (0, 0)
204 | self.g.starttime = datetime.now()
205 | self.g._max_program_length = 10000
206 |
207 | self.assertEqual("this", self.g.runtime_resolve("", 'str'))
208 | self.assertEqual("test", self.g.runtime_resolve("", 'str'))
209 |
210 | def test__fmt_resolved_vars(self):
211 | """
212 | This function tests the process of converting various kinds of
213 | variables into specific types for use in program lines.
214 |
215 | """
216 |
217 | # int tests
218 | self.assertEqual(10, self.g._fmt_resolved_vars(10.0, 'int'))
219 |
220 | #self.g._fmt_resolved_vars("ten", 'int')
221 | self.assertRaises(NameError, self.g._fmt_resolved_vars, "ten", 'int')
222 | self.assertEqual(10, self.g._fmt_resolved_vars("10", 'int'))
223 | self.assertEqual(8, self.g._fmt_resolved_vars("3 + 5", 'int'))
224 |
225 | # float tests
226 | self.assertEqual(10.0, self.g._fmt_resolved_vars(10, 'float'))
227 | self.assertRaises(NameError, self.g._fmt_resolved_vars, "ten", 'float')
228 | self.assertEqual(10.0, self.g._fmt_resolved_vars("10", 'float'))
229 | self.assertEqual(8.0, self.g._fmt_resolved_vars("3 + 5", 'float'))
230 |
231 | # bool tests
232 | self.assertEqual(True, self.g._fmt_resolved_vars('True', 'bool'))
233 | self.assertEqual(False, self.g._fmt_resolved_vars("False", 'bool'))
234 | self.assertRaises(ValueError, self.g._fmt_resolved_vars, 'not true',
235 | 'bool')
236 |
237 | def test_set_bnf_variable(self):
238 | """
239 | Test setting bnf variables
240 | """
241 |
242 | # new variable name
243 | self.g.set_bnf_variable("test_variable", "test_value")
244 | self.assertEqual(True, "test_variable" in self.g.local_bnf)
245 | self.assertEqual(["test_value"], self.g.local_bnf["test_variable"])
246 |
247 | # new value
248 | self.g.set_bnf_variable("test_variable", "new_value")
249 | self.assertEqual(["new_value"], self.g.local_bnf["test_variable"])
250 |
251 | # Does it modify a list?
252 | self.g.set_bnf_variable("test_variable", ["is", "a", "list"])
253 | self.assertEqual(["is", "a", "list"],
254 | self.g.local_bnf["test_variable"])
255 |
256 | # Does it convert a value to a string?
257 | self.g.set_bnf_variable("test_variable", 523.45)
258 | self.assertEqual(["523.45"], self.g.local_bnf["test_variable"])
259 |
260 |
261 | def test_resolve_variable(self):
262 | """
263 | This function tests the process of converting a variable received to a
264 | to a list of possible variables as found in the bnf.
265 |
266 | """
267 |
268 | self.g.set_bnf_variable("", ["this", "is", "test"])
269 | self.g.decimal_gene = [3, 2, 5, 6]
270 | self.g._max_gene_length = 4
271 | self.g._position = (0, 0)
272 |
273 | self.assertEqual("this", self.g.resolve_variable(""))
274 | self.assertEqual("test", self.g.resolve_variable(""))
275 |
276 | def test__map_variables(self):
277 | """
278 | This function test the process of mapping variables\
279 |
280 | Test:
281 | mapping of variables with check_stoplist
282 | mapping of variables without check_stoplist
283 |
284 | """
285 |
286 | self.g._position = (0, 0)
287 | self.g.starttime = datetime.now()
288 | self.g._max_program_length = 10000
289 | self.g.set_bnf_variable("", [-1, 2, 0])
290 | self.g.set_bnf_variable("", [1, 2, 3])
291 |
292 | program = ''.join([
293 | 'a = \n',
294 | 'b = \n',
295 | 'fitness = a + b\n',
296 | 'self.set_bnf_variable("", fitness)'])
297 |
298 | completed_program = ''.join([
299 | 'a = -1\n',
300 | 'b = 2\n',
301 | 'fitness = a + b\n',
302 | 'self.set_bnf_variable("", fitness)'])
303 |
304 | self.g.decimal_gene = [0, 1, 5, 6]
305 | self.g._max_gene_length = 4
306 |
307 | self.assertEqual(completed_program, self.g._map_variables(
308 | program, True))
309 |
310 | def test_conv_int(self):
311 | """
312 | This function tests the process of converting a string value to an int.
313 |
314 | """
315 |
316 | self.assertEqual(23, conv_int("23"))
317 | self.assertEqual(23, conv_int(" 23 "))
318 | self.assertRaises(NameError, conv_int, "test")
319 | self.assertEqual(5, conv_int("3 + 2"))
320 |
321 | def test__get_codon(self):
322 | """
323 | This function tests getting a codon.
324 |
325 | Test:
326 | Testable conditions:
327 | Wrap True/False
328 | Extend Genotype True/False
329 | Beyond max gene length
330 |
331 | Both position and sequence_no are less than the length of the
332 | decimal gene.
333 | Gene is set to not wrap, and sequence_no is set beyond the length
334 | of the gene.
335 | Gene is set to wrap and sequence_no is greater than gene length.
336 |
337 |
338 | """
339 |
340 | self.g.decimal_gene = [3, 34, 5, 6]
341 | self.g._max_gene_length = 4
342 | self.g._wrap = False
343 | self.g._extend_genotype = False
344 | self.g._position = (0, 0)
345 |
346 | # no wrap -- gets to end of sequence, raises exception
347 | self.assertEqual(3, self.g._get_codon())
348 | self.assertEqual(34, self.g._get_codon())
349 | self.assertEqual(5, self.g._get_codon())
350 | self.assertEqual(6, self.g._get_codon())
351 | self.assertEqual((4, 4), self.g._position)
352 | self.assertRaises(ValueError, self.g._get_codon)
353 |
354 | # wrap -- no extend -- _max_gene_length = length of gene
355 | # gets to end of sequence, wraps around to start,
356 | # length not increased, raises error
357 | self.g._wrap = True
358 | self.g._max_gene_length = 5
359 |
360 | self.g._position = (0, 0)
361 | self.assertEqual(3, self.g._get_codon())
362 | self.assertEqual(34, self.g._get_codon())
363 | self.assertEqual(5, self.g._get_codon())
364 | self.assertEqual(6, self.g._get_codon())
365 | self.assertEqual((0, 4), self.g._position)
366 | self.assertEqual(3, self.g._get_codon())
367 | self.assertEqual((1, 5), self.g._position)
368 | self.assertEqual(4, len(self.g.decimal_gene))
369 |
370 | # wrap -- extend -- _max_gene_length > length of gene
371 | # gets to end of sequence, wraps around to start,
372 | self.g._wrap = True
373 | self.g._extend_genotype = True
374 | self.g._position = (0, 0)
375 | self.assertEqual(3, self.g._get_codon())
376 | self.assertEqual(34, self.g._get_codon())
377 | self.assertEqual(5, self.g._get_codon())
378 | self.assertEqual(6, self.g._get_codon())
379 | self.assertEqual((0, 4), self.g._position)
380 | self.assertEqual(3, self.g._get_codon())
381 | self.assertEqual(5, len(self.g.decimal_gene))
382 | self.assertEqual((1, 5), self.g._position)
383 |
384 | def test__reset_gene_position(self):
385 | """
386 | This function tests whether the starting position is reset back to 0.
387 |
388 | """
389 |
390 | self.g._position = "something else"
391 | self.g._reset_gene_position()
392 | self.assertEqual((0, 0), self.g._position)
393 |
394 | def test__update_genotype(self):
395 | """
396 | This function tests whether the binary gene is properly updated with a
397 | new decimal gene.
398 |
399 | """
400 |
401 | self.g.binary_gene = '0000001000000001'
402 |
403 | # new dec_gene
404 | self.g.decimal_gene = [2, 1, 3, 4]
405 |
406 | self.g._update_genotype()
407 | self.assertEqual('00000010000000010000001100000100',
408 | self.g.binary_gene)
409 |
410 | def test_compute_fitness(self):
411 | """
412 | This function tests the process of computing fitness.
413 |
414 | Because this process is an amalgamation of other processes, which are
415 | already tested, what will be tested here, is setting up a sample
416 | template program with variables, mapping the gene
417 |
418 | Test:
419 | Program that matches fitness
420 | Program that fails due to program error
421 | Program that fails due to time out.
422 |
423 | """
424 |
425 | self.g._fitness_fail = "-999999"
426 | self.g.set_bnf_variable('', ''.join([
427 | 'a = \n',
428 | 'b = \n',
429 | 'fitness = a + b\n',
430 | 'self.set_bnf_variable("", fitness)']))
431 |
432 |
433 | self.g.set_bnf_variable('', 0)
434 |
435 | self.g.set_bnf_variable("", [-1, 2, 0])
436 | self.g.set_bnf_variable("", [1, 2, 3])
437 | self.g.decimal_gene = [0, 1, 5, 6]
438 | self.g._max_gene_length = 4
439 |
440 | # intentionally incorrect position set to test reset
441 | self.g._position = (3, 3)
442 |
443 | self.g.starttime = datetime.now()
444 | self.g._max_program_length = 10000
445 |
446 | self.assertEqual(1, self.g.compute_fitness())
447 | self.assertEqual(1, self.g._fitness)
448 |
449 | # Faulty program -- incorrect variable
450 | self.g._fitness = "test"
451 | self.g.set_bnf_variable('', ''.join([
452 | 'logging.debug("Executing Example of a Faulty Program")\n',
453 | 'a = \n',
454 | 'b1 = \n',
455 | 'fitness = a + b\n',
456 | 'self.set_bnf_variable("", fitness)']))
457 |
458 | self.assertEqual(-999999, self.g.compute_fitness())
459 |
460 | # Long running program
461 | self.g._fitness = -999999
462 | self.g.starttime = datetime.now() - timedelta(seconds=1000)
463 | self.g._timeouts = (1, 1)
464 |
465 | self.g.set_bnf_variable('', ''.join([
466 | 'logging.debug("Executing Example of a Long Program")\n',
467 | 'a = \n',
468 | 'b = \n',
469 | 'fitness = a + b\n',
470 | 'self.set_bnf_variable("", fitness)']))
471 |
472 | self.assertEqual(-999999, self.g.compute_fitness())
473 |
474 | ## Long Program creation time
475 | # not yet implemented
476 |
477 | # Program size
478 | # not yet implemented
479 |
480 | def test__map_gene(self):
481 | """
482 | This function tests the production and execution of a program by
483 | mapping the gene to the template program.
484 |
485 | This is not implemented. test_compute_fitness would not function if
486 | map_gene() did not work. Nonetheless, at some point, a separate
487 | test for this should be written.
488 |
489 | """
490 |
491 | pass
492 |
493 | def test__execute_code(self):
494 | """
495 | This function tests whether code can be executed.
496 | In addition, prior to executing, the program is put into the local bnf.
497 |
498 | """
499 |
500 | test_program = "a = 3\nb=2\nc = a + b"
501 |
502 | self.g._execute_code(test_program)
503 |
504 | self.g.local_bnf['program'] = test_program
505 | self.assertEqual("a = 3\nb=2\nc = a + b", self.g.local_bnf['program'])
506 |
507 | def test__mutate(self):
508 | """
509 | This function tests whether _mutate changes a bit in the appropriate
510 | spot.
511 | Note that it does not check for appropriateness of position value,
512 | because it has already been cleared by the calling routines.
513 |
514 | In these test functions, test__mutate and test_mutate are ambiguous.
515 | test__mutate tests self._mutate for altering a gene at a particular
516 | spot.
517 | test_mutate tests self.mutate which is an umbrella function for both
518 | single and multiple mutations. It's possible that self._mutate should
519 | be remained.
520 |
521 | """
522 |
523 | gene = '1110101'
524 |
525 | position = 0
526 | self.assertEqual('0110101', self.g._mutate(gene, position))
527 |
528 | position = 4
529 | self.assertEqual('1110001', self.g._mutate(gene, position))
530 |
531 | position = 6
532 | self.assertEqual('1110100', self.g._mutate(gene, position))
533 |
534 | def test_mutate(self):
535 | """
536 | This function tests the routing to mutation type functions.
537 | Tested:
538 | Is an error generated if the mutation type is not 's' or 'm'.
539 | Is an error generated if the mutation rate outside of (0, 1).
540 | """
541 |
542 | mutation_rate = .05
543 | mutation_type = 'wrong'
544 | self.assertRaises(ValueError, self.g.mutate, mutation_rate,
545 | mutation_type)
546 |
547 | # Invalid mutation rates
548 | mutation_rate = -0.5
549 | mutation_type = MUT_TYPE_M
550 | self.assertRaises(ValueError, self.g.mutate, mutation_rate,
551 | mutation_type)
552 |
553 | mutation_rate = -1.5
554 | self.assertRaises(ValueError, self.g.mutate, mutation_rate,
555 | mutation_type)
556 |
557 | # Edge values - Failure if ValueError raised in function
558 | mutation_rate = 0.0
559 | mutation_type = MUT_TYPE_M
560 | self.g.mutate(mutation_rate, mutation_type)
561 |
562 | mutation_rate = 1.0
563 | mutation_type = MUT_TYPE_M
564 | self.g.mutate(mutation_rate, mutation_type)
565 |
566 | def test__multiple_mutate(self):
567 | """
568 | This function tests multiple mutations. Because the rate value is
569 | already tested in the upstream test, this will simply test to see if
570 | any mutations are made. The looping process is currently not being
571 | tested until a good way of isolating without unnecessary complication
572 | can be found.
573 |
574 | """
575 |
576 | mutation_rate = 1.0
577 |
578 | gene = self.g.binary_gene
579 |
580 | self.g._multiple_mutate(mutation_rate)
581 |
582 | self.assertEqual(len(gene), len(self.g.binary_gene))
583 | self.assertNotEqual(gene, self.g.binary_gene)
584 |
585 | def test_get_binary_gene_length(self):
586 | """
587 | This function tests getting the length of binary gene.
588 |
589 | """
590 |
591 | length = self.g._gene_length
592 | self.assertEqual(length * 8, self.g.get_binary_gene_length())
593 |
594 | def test_single_mutate(self):
595 | """
596 | Tests a single mutation.
597 |
598 | """
599 |
600 | length = len(self.g.binary_gene)
601 |
602 | original_gene = self.g.binary_gene
603 | self.g._single_mutate()
604 |
605 | changes = 0
606 | for i in range(length):
607 | if original_gene[i] != self.g.binary_gene[i]:
608 | changes += 1
609 | self.assertEqual(1, changes)
610 |
611 | def test__select_choice(self):
612 | """
613 | This function tests the process of selecting an item from a list based
614 | on the codon.
615 |
616 | """
617 |
618 | self.assertRaises(ValueError, self.g._select_choice,
619 | 1, "no list")
620 | self.assertRaises(ValueError, self.g._select_choice,
621 | 1, 3)
622 |
623 | codon = 0
624 | selection = ["this", "is", "test"]
625 | self.assertEqual("this", self.g._select_choice(
626 | codon, selection))
627 | codon = 40
628 | selection = ["this", "is", "test"]
629 | self.assertEqual("is", self.g._select_choice(
630 | codon, selection))
631 | codon = 65
632 | selection = ["this", "is", "test"]
633 | self.assertEqual("test", self.g._select_choice(
634 | codon, selection))
635 | def test_get_program(self):
636 | """Test Get program"""
637 | self.g.local_bnf["program"] = "program here"
638 | self.assertEqual("program here", self.g.get_program())
639 |
640 | def test_get_preprogram(self):
641 | """Test Get preprogram"""
642 | self.g.local_bnf[""] = "preprogram"
643 | self.assertEqual("preprogram", self.g.get_preprogram())
644 |
645 | def test_get_fitness(self):
646 | """Test Get Fitness Fail"""
647 | self.g._fitness = "Fitness"
648 | self.assertEqual("Fitness", self.g.get_fitness())
649 |
650 | def test_get_fitness_fail(self):
651 | """Test Get Fitness Fail"""
652 | self.g._fitness_fail = "Fitness Fail"
653 | self.assertEqual("Fitness Fail", self.g.get_fitness_fail())
654 |
655 |
656 | if __name__ == '__main__':
657 | unittest.main()
658 |
--------------------------------------------------------------------------------
/pyneurgen/test/test_fitness.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from pyneurgen.fitness import FitnessList, Fitness, Selection, Tournament
4 | from pyneurgen.fitness import FitnessProportionate, FitnessTournament
5 | from pyneurgen.fitness import FitnessElites, FitnessLinearRanking
6 | from pyneurgen.fitness import FitnessTruncationRanking
7 | from pyneurgen.fitness import Replacement, ReplacementDeleteWorst
8 | from pyneurgen.fitness import ReplacementTournament
9 | from pyneurgen.fitness import MAX, MIN, CENTER, FITNESS_TYPES
10 | from pyneurgen.fitness import SCALING_LINEAR, SCALING_TRUNC, SCALING_EXPONENTIAL
11 | from pyneurgen.fitness import SCALING_LOG, SCALING_TYPES
12 |
13 |
14 | class TestFitnessList(unittest.TestCase):
15 | """
16 | This class tests the base class of fitness.
17 | """
18 |
19 | def setUp(self):
20 | self.fitness_list = FitnessList(MAX)
21 |
22 | self.fitness_list.append([3.0, 0])
23 | self.fitness_list.append([2.0, 1])
24 | self.fitness_list.append([5.0, 2])
25 | self.fitness_list.append([4.0, 3])
26 | self.fitness_list.append([1.0, 4])
27 |
28 | def test_class_init_(self):
29 |
30 | fitness_list = FitnessList(MAX)
31 |
32 | # Is it a list?
33 | self.assertEqual(True, isinstance(fitness_list, list))
34 |
35 | # Does the fitness type get set?
36 | self.assertEqual(MAX, fitness_list._fitness_type)
37 |
38 | # Does the target_value get set?
39 | self.assertEqual(0.0, fitness_list._target_value)
40 |
41 | fitness_list = FitnessList(MAX, .5)
42 | self.assertAlmostEqual(0.5, fitness_list._target_value)
43 |
44 | def test_set_fitness_type(self):
45 |
46 | self.fitness_list.set_fitness_type(MIN)
47 | self.assertEqual(MIN, self.fitness_list._fitness_type)
48 |
49 | def test_get_fitness_type(self):
50 |
51 | self.assertEqual(MAX, self.fitness_list.get_fitness_type())
52 |
53 | self.fitness_list._fitness_type = MIN
54 | self.assertEqual(MIN, self.fitness_list.get_fitness_type())
55 |
56 | def test_set_target_value(self):
57 |
58 | self.fitness_list.set_target_value(0.3)
59 | self.assertEqual(0.3, self.fitness_list._target_value)
60 |
61 | def test_get_target_value(self):
62 | self.fitness_list._target_value = .45
63 | self.assertAlmostEqual(.45, self.fitness_list.get_target_value())
64 |
65 | def test_min_value(self):
66 | self.assertEqual(1.0, self.fitness_list.min_value())
67 |
68 | def test_max_value(self):
69 | self.assertEqual(5.0, self.fitness_list.max_value())
70 |
71 | def test_best_value(self):
72 | self.assertEqual(5.0, self.fitness_list.best_value())
73 |
74 | self.fitness_list.set_fitness_type(MIN)
75 | self.assertEqual(1.0, self.fitness_list.best_value())
76 |
77 | self.fitness_list.set_fitness_type(CENTER)
78 | self.fitness_list.set_target_value(3.0)
79 | self.assertEqual(3.0, self.fitness_list.best_value())
80 |
81 | def test_worst_value(self):
82 | self.assertEqual(1.0, self.fitness_list.worst_value())
83 |
84 | self.fitness_list.set_fitness_type(MIN)
85 | self.assertEqual(5.0, self.fitness_list.worst_value())
86 |
87 | self.fitness_list.set_fitness_type(CENTER)
88 | self.fitness_list.set_target_value(3.0)
89 | self.assertEqual(1.0, self.fitness_list.worst_value())
90 |
91 | def test_min_member(self):
92 | self.assertEqual(4, self.fitness_list.min_member())
93 |
94 | def test_max_member(self):
95 | self.assertEqual(2, self.fitness_list.max_member())
96 |
97 | def test_best_member(self):
98 | # max
99 | self.assertEqual(2, self.fitness_list.best_member())
100 |
101 | # min
102 | self.fitness_list.set_fitness_type(MIN)
103 | self.assertEqual(4, self.fitness_list.best_member())
104 |
105 | # center
106 | self.fitness_list.set_fitness_type(CENTER)
107 | self.assertEqual(4, self.fitness_list.best_member())
108 |
109 | # check absolute value on center
110 | self.fitness_list.append([-.5, 5])
111 | self.assertEqual(5, self.fitness_list.best_member())
112 |
113 | def test_worst_member(self):
114 | # max
115 | self.assertEqual(4, self.fitness_list.worst_member())
116 |
117 | # min
118 | self.fitness_list.set_fitness_type(MIN)
119 | self.assertEqual(2, self.fitness_list.worst_member())
120 |
121 | # center
122 | self.fitness_list.set_fitness_type(CENTER)
123 | self.assertEqual(2, self.fitness_list.worst_member())
124 |
125 | # check absolute value on center
126 | self.fitness_list.append([-5.0, 5])
127 | self.assertEqual(5, self.fitness_list.worst_member())
128 |
129 | def test_mean(self):
130 |
131 | mean = (3.0 + 2.0 + 5.0 + 4.0 + 1.0) / 5
132 | self.assertAlmostEqual(mean, self.fitness_list.mean())
133 |
134 | def test_median(self):
135 |
136 | self.assertAlmostEqual(3.0, self.fitness_list.median())
137 |
138 | def test_stddev(self):
139 | self.assertAlmostEqual(1.5811388301, self.fitness_list.stddev())
140 |
141 | def test_sorted(self):
142 |
143 | # max
144 | sorted_list = self.fitness_list.sorted()
145 | self.assertAlmostEqual(5.0, sorted_list[0][0])
146 | self.assertEqual(2, sorted_list[0][1])
147 | self.assertAlmostEqual(4.0, sorted_list[1][0])
148 | self.assertEqual(3, sorted_list[1][1])
149 | self.assertAlmostEqual(3.0, sorted_list[2][0])
150 | self.assertEqual(0, sorted_list[2][1])
151 | self.assertAlmostEqual(2.0, sorted_list[3][0])
152 | self.assertEqual(1, sorted_list[3][1])
153 | self.assertAlmostEqual(1.0, sorted_list[4][0])
154 | self.assertEqual(4, sorted_list[4][1])
155 |
156 | # min
157 | self.fitness_list.set_fitness_type(MIN)
158 | sorted_list = self.fitness_list.sorted()
159 |
160 | self.assertAlmostEqual(1.0, sorted_list[0][0])
161 | self.assertEqual(4, sorted_list[0][1])
162 | self.assertAlmostEqual(2.0, sorted_list[1][0])
163 | self.assertEqual(1, sorted_list[1][1])
164 | self.assertAlmostEqual(3.0, sorted_list[2][0])
165 | self.assertEqual(0, sorted_list[2][1])
166 | self.assertAlmostEqual(4.0, sorted_list[3][0])
167 | self.assertEqual(3, sorted_list[3][1])
168 | self.assertAlmostEqual(5.0, sorted_list[4][0])
169 | self.assertEqual(2, sorted_list[4][1])
170 |
171 | # center
172 | self.fitness_list.set_fitness_type(CENTER)
173 | self.fitness_list.append([-0.5, 5])
174 | sorted_list = self.fitness_list.sorted()
175 |
176 | self.assertAlmostEqual(-0.5, sorted_list[0][0])
177 | self.assertEqual(5, sorted_list[0][1])
178 | self.assertAlmostEqual(1.0, sorted_list[1][0])
179 | self.assertEqual(4, sorted_list[1][1])
180 | self.assertAlmostEqual(2.0, sorted_list[2][0])
181 | self.assertEqual(1, sorted_list[2][1])
182 | self.assertAlmostEqual(3.0, sorted_list[3][0])
183 | self.assertEqual(0, sorted_list[3][1])
184 | self.assertAlmostEqual(4.0, sorted_list[4][0])
185 | self.assertEqual(3, sorted_list[4][1])
186 | self.assertAlmostEqual(5.0, sorted_list[5][0])
187 | self.assertEqual(2, sorted_list[5][1])
188 |
189 | class TestSelection(unittest.TestCase):
190 |
191 | def test_class_init_(self):
192 |
193 | sel_type = Selection([1.0, 2.0, 3.0, 4.0, 5.0])
194 |
195 | # check default selection type
196 | self.assertEqual(MAX, sel_type._selection_type)
197 |
198 | # check list
199 | self.assertEqual(True, isinstance(sel_type._selection_list, list))
200 |
201 | sel_type = Selection()
202 | self.assertEqual(None, sel_type._selection_list)
203 |
204 | def test_set_selection_list(self):
205 |
206 | sel_type = Selection()
207 | self.assertEqual(None, sel_type._selection_list)
208 | sel_type.set_selection_list([1.0, 2.0, 3.0, 4.0, 5.0])
209 | self.assertEqual(True, isinstance(sel_type._selection_list, list))
210 |
211 | # test for fitness_list
212 | fitl = FitnessList(MAX)
213 | self.assertRaises(ValueError, sel_type.set_selection_list, fitl)
214 |
215 | def test_set_selection_type(self):
216 |
217 | sel_type = Selection()
218 | self.assertEqual(None, sel_type._selection_list)
219 | sel_type.set_selection_list([1.0, 2.0, 3.0, 4.0, 5.0])
220 | self.assertEqual(True, isinstance(sel_type._selection_list, list))
221 |
222 | def test_roulette_wheel(self):
223 |
224 | slist = [0.06666666666666667, 0.13333333333333333, 0.2,
225 | 0.26666666666666666, 0.3333333333333333]
226 | sel_type = Selection(slist)
227 |
228 | for rand_position in sel_type._roulette_wheel(slist):
229 | self.assertGreaterEqual(5, rand_position)
230 | self.assertLessEqual(0, rand_position)
231 |
232 | def test_make_sort_list(self):
233 |
234 | slist = [0.06666666666666667, 0.13333333333333333, 0.2,
235 | 0.26666666666666666, 0.3333333333333333]
236 | sel_type = Selection(slist)
237 |
238 | sorted_list = sel_type._make_sort_list()
239 |
240 | self.assertAlmostEqual(0.06666666666666667, sorted_list[0][0])
241 | self.assertAlmostEqual(0.13333333333333333, sorted_list[1][0])
242 | self.assertAlmostEqual(0.2, sorted_list[2][0])
243 | self.assertAlmostEqual(0.26666666666666666, sorted_list[3][0])
244 | self.assertAlmostEqual(0.3333333333333333, sorted_list[4][0])
245 | self.assertAlmostEqual(0, sorted_list[0][1])
246 | self.assertAlmostEqual(1, sorted_list[1][1])
247 | self.assertAlmostEqual(2, sorted_list[2][1])
248 | self.assertAlmostEqual(3, sorted_list[3][1])
249 | self.assertAlmostEqual(4, sorted_list[4][1])
250 |
251 |
252 | class TestTournament(unittest.TestCase):
253 |
254 | def setUp(self):
255 | slist = [0.06666666666666667, 0.13333333333333333, 0.2,
256 | 0.26666666666666666, 0.3333333333333333]
257 |
258 | self.sel_type = Tournament(selection_list=slist, tournament_size=2)
259 |
260 | def test_classinit_(self):
261 |
262 | self.assertEqual(2, self.sel_type._tournament_size)
263 | self.assertEqual(None, self.sel_type._minmax)
264 |
265 | def test_set_tournament_size(self):
266 | self.sel_type.set_tournament_size(3)
267 | self.assertEqual(3, self.sel_type._tournament_size)
268 | self.assertRaises(ValueError, self.sel_type.set_tournament_size, 10)
269 | self.assertRaises(ValueError, self.sel_type.set_tournament_size, 6)
270 |
271 | def test_set_minmax(self):
272 |
273 | self.sel_type._set_minmax(MIN)
274 | self.assertEqual(MIN, self.sel_type._minmax)
275 |
276 | self.sel_type._set_minmax(MAX)
277 | self.assertEqual(MAX, self.sel_type._minmax)
278 |
279 | self.assertRaises(ValueError, self.sel_type._set_minmax, 'WRONG')
280 |
281 | def test_select(self):
282 | # This test should be revisited to prove:
283 | # the selection pool was really the tournament_size
284 | # the best member was really selected
285 |
286 | # Min
287 | self.sel_type._set_minmax(MIN)
288 |
289 | # Selections are 2 then 1
290 | count = 0
291 | for member in self.sel_type.select():
292 | count += 1
293 |
294 | self.assertEqual(count, len(self.sel_type._selection_list))
295 |
296 | class TestFitness(unittest.TestCase):
297 |
298 | def test_classinit_(self):
299 | """
300 | Does it set the fitness_list?
301 | """
302 |
303 | fitness = FitnessList(MAX)
304 | fit = Fitness(fitness)
305 |
306 | self.assertEqual(fit._fitness_list, fitness)
307 |
308 | def test_set_fitness_list(self):
309 |
310 | # Do MAX
311 | fitness = FitnessList(MAX)
312 | fit = Fitness(fitness)
313 | fit._fitness_list = None
314 | self.assertEqual(fit._fitness_list, None)
315 | fit.set_fitness_list(fitness)
316 | self.assertEqual(fit._fitness_list, fitness)
317 |
318 | # Do CENTER selection_list converted to distance from target
319 | fitness = FitnessList(CENTER, .15)
320 | fitness.append([.5, 0])
321 | fitness.append([.25, 1])
322 | fitness.append([2.5, 2])
323 | fit = Fitness(fitness)
324 | fit._fitness_list = None
325 | self.assertEqual(fit._fitness_list, None)
326 | fit.set_fitness_list(fitness)
327 | self.assertEqual(fit._selection_list, [.35, .1, 2.35])
328 |
329 | def test_invert(self):
330 | fitness = FitnessList(MAX)
331 | fit = Fitness(fitness)
332 |
333 | self.assertAlmostEqual(.25, fit._invert(4.0))
334 | self.assertAlmostEqual(-.25, fit._invert(-4.0))
335 | self.assertAlmostEqual(2.0, fit._invert(0.5))
336 |
337 | def test_scale_list(self):
338 |
339 | #
340 | # fitness type MAX, test select type MAX
341 | #
342 | fitness = FitnessList(MAX)
343 | fitness.extend([[.5, 0], [.25, 1], [2.5, 2]])
344 |
345 | fit = Fitness(fitness)
346 | fit.set_selection_type(MAX)
347 | fit._scale_list()
348 |
349 | # not inverted
350 | self.assertAlmostEqual(.5, fit._selection_list[0])
351 | self.assertAlmostEqual(.25, fit._selection_list[1])
352 | self.assertAlmostEqual(2.5, fit._selection_list[2])
353 |
354 | #
355 | # fitness type MAX, test select type MIN
356 | #
357 | fit.set_fitness_list(fitness)
358 | fit.set_selection_type(MIN)
359 | fit._scale_list()
360 |
361 | # inverted
362 | self.assertAlmostEqual(2.0, fit._selection_list[0])
363 | self.assertAlmostEqual(4.0, fit._selection_list[1])
364 | self.assertAlmostEqual(0.4, fit._selection_list[2])
365 |
366 | #
367 | # fitness type MIN, test select type MAX
368 | #
369 | fitness.set_fitness_type(MIN)
370 | fit.set_fitness_list(fitness)
371 | fit.set_selection_type(MAX)
372 | fit._scale_list()
373 |
374 | # inverted
375 | self.assertAlmostEqual(2.0, fit._selection_list[0])
376 | self.assertAlmostEqual(4.0, fit._selection_list[1])
377 | self.assertAlmostEqual(0.4, fit._selection_list[2])
378 |
379 | #
380 | # fitness type MIN, test select type MIN
381 | #
382 | fit.set_fitness_list(fitness)
383 | fit.set_selection_type(MIN)
384 | fit._scale_list()
385 |
386 | # not inverted
387 | self.assertAlmostEqual(.5, fit._selection_list[0])
388 | self.assertAlmostEqual(.25, fit._selection_list[1])
389 | self.assertAlmostEqual(2.5, fit._selection_list[2])
390 |
391 | #
392 | # fitness type CENTER, test select type MAX
393 | #
394 | fitness.set_fitness_type(CENTER)
395 | fitness.set_target_value(.75)
396 | fit.set_fitness_list(fitness)
397 | fit.set_selection_type(MAX)
398 | fit._scale_list()
399 |
400 | # inverted
401 | self.assertAlmostEqual(4.0, fit._selection_list[0])
402 | self.assertAlmostEqual(2.0, fit._selection_list[1])
403 | self.assertAlmostEqual(0.5714285714, fit._selection_list[2])
404 |
405 | #
406 | # fitness type CENTER, test select type MIN
407 | #
408 | fit.set_fitness_list(fitness)
409 | fit.set_selection_type(MIN)
410 | fit._scale_list()
411 |
412 | # not inverted
413 | self.assertAlmostEqual(.25, fit._selection_list[0])
414 | self.assertAlmostEqual(.5, fit._selection_list[1])
415 | self.assertAlmostEqual(1.75, fit._selection_list[2])
416 |
417 | def test_make_prob_list(self):
418 |
419 | prob_list = Fitness._make_prob_list([.5, .25, 2.5])
420 |
421 | self.assertAlmostEqual(1.0, sum(prob_list))
422 | self.assertAlmostEqual(0.153846153846, prob_list[0])
423 | self.assertAlmostEqual(0.0769230769231, prob_list[1])
424 | self.assertAlmostEqual(0.769230769231, prob_list[2])
425 |
426 | class TestFitnessProportionate(unittest.TestCase):
427 | def setUp(self):
428 | # Check truncation
429 | self.fitness = FitnessList(MAX)
430 | self.fitness.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
431 |
432 | def test_class_init_(self):
433 | # is the scaling type set
434 | # does it check the number range
435 | fit = FitnessProportionate(self.fitness, SCALING_LINEAR)
436 | self.assertNotEqual(None, fit._scaling_type)
437 |
438 | # add some negative numbers
439 | self.fitness.extend([[-.5, 3], [-.25, 4], [2.5, 2]])
440 | self.assertRaises(ValueError, FitnessProportionate, self.fitness,
441 | SCALING_LINEAR)
442 |
443 | def test_set_scaling_type(self):
444 | fit = FitnessProportionate(self.fitness, SCALING_LINEAR)
445 | self.assertEqual(SCALING_LINEAR, fit._scaling_type)
446 |
447 | def test_check_minmax(self):
448 | self.fitness.extend([[.5, 3], [.25, 1], [-2.5, 2]])
449 | self.assertRaises(ValueError, FitnessProportionate, self.fitness,
450 | SCALING_LINEAR)
451 |
452 | def test_select(self):
453 | fit = FitnessProportionate(self.fitness, SCALING_LINEAR)
454 |
455 | self.assertEqual(3, len([i for i in fit.select()]))
456 |
457 | def test_apply_prop_scaling(self):
458 |
459 | # This scales the list according to the scaling type
460 | param = None
461 | # Check linear
462 | fit = FitnessProportionate(self.fitness, SCALING_LINEAR)
463 | scaling_list = fit._apply_prop_scaling(param)
464 | self.assertAlmostEqual(1.0, sum(scaling_list))
465 |
466 | self.assertAlmostEqual(0.09375, scaling_list[0])
467 | self.assertAlmostEqual(0.15625, scaling_list[1])
468 | self.assertAlmostEqual(0.75, scaling_list[2])
469 |
470 | # Check exponential
471 | fit = FitnessProportionate(self.fitness, SCALING_EXPONENTIAL)
472 | scaling_list = fit._apply_prop_scaling(param)
473 | self.assertAlmostEqual(1.0, sum(scaling_list))
474 |
475 | self.assertAlmostEqual(0.014754098360655738, scaling_list[0])
476 | self.assertAlmostEqual(0.040983606557377046, scaling_list[1])
477 | self.assertAlmostEqual(0.94426229508196724, scaling_list[2])
478 |
479 | fit = FitnessProportionate(self.fitness, SCALING_EXPONENTIAL)
480 | scaling_list = fit._apply_prop_scaling(param=1.5)
481 | self.assertAlmostEqual(1.0, sum(scaling_list))
482 |
483 | self.assertAlmostEqual(0.038791152234464166, scaling_list[0])
484 | self.assertAlmostEqual(0.083465270324597968, scaling_list[1])
485 | self.assertAlmostEqual(0.87774357744093778, scaling_list[2])
486 |
487 | # Check log
488 | fit = FitnessProportionate(self.fitness, SCALING_LOG)
489 | scaling_list = fit._apply_prop_scaling(param)
490 | self.assertAlmostEqual(1.0, sum(scaling_list))
491 |
492 | self.assertAlmostEqual(0.10651459360996007, scaling_list[0])
493 | self.assertAlmostEqual(0.24070711137026513, scaling_list[1])
494 | self.assertAlmostEqual(0.6527782950197748, scaling_list[2])
495 |
496 | # Check truncation
497 | fit = FitnessProportionate(self.fitness, SCALING_TRUNC)
498 | scaling_list = fit._apply_prop_scaling(param=2.0)
499 | self.assertAlmostEqual(1.0, sum(scaling_list))
500 |
501 | self.assertAlmostEqual(0.0, scaling_list[0])
502 | self.assertAlmostEqual(0.17241379310344829, scaling_list[1])
503 | self.assertAlmostEqual(0.82758620689655171, scaling_list[2])
504 |
505 |
506 | class TestFitnessTournament(unittest.TestCase):
507 |
508 | def test_classinit_(self):
509 |
510 | fitness_list = FitnessList(MAX)
511 | fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
512 |
513 | fit = FitnessTournament(fitness_list)
514 | self.assertEqual(2, fit._tournament_size)
515 |
516 | fit = FitnessTournament(fitness_list, tournament_size=3)
517 | self.assertEqual(3, fit._tournament_size)
518 |
519 | self.assertRaises(ValueError, FitnessTournament, fitness_list,
520 | tournament_size=4)
521 |
522 |
523 | class TestFitnessElites(unittest.TestCase):
524 |
525 | def setUp(self):
526 | self.fitness_list = FitnessList(MAX)
527 | self.fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
528 |
529 | def test_classinit_(self):
530 | self.assertRaises(ValueError, FitnessElites, self.fitness_list,
531 | rate=1.1)
532 | self.assertRaises(ValueError, FitnessElites, self.fitness_list,
533 | rate=-1.1)
534 | self.assertRaises(ValueError, FitnessElites, self.fitness_list,
535 | rate=0.0)
536 |
537 | fit = FitnessElites(self.fitness_list, rate=0.1)
538 | self.assertAlmostEqual(0.1, fit._rate)
539 | self.assertAlmostEqual(MIN, fit._selection_type)
540 |
541 | def test_set_rate(self):
542 | fit = FitnessElites(self.fitness_list, rate=0.1)
543 | self.assertAlmostEqual(0.1, fit._rate)
544 |
545 | def test_select(self):
546 | fit = FitnessElites(self.fitness_list, rate=0.3333)
547 | count = len([i for i in fit.select()])
548 | self.assertEqual(1, count)
549 |
550 | class TestFitnessLinearRanking(unittest.TestCase):
551 |
552 | def setUp(self):
553 | self.fitness_list = FitnessList(MAX)
554 | self.fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
555 |
556 | def test_classinit_(self):
557 |
558 | fit = FitnessLinearRanking(self.fitness_list, .6)
559 | self.assertAlmostEqual( 0.6, fit._worstfactor)
560 |
561 | def test_set_worstfactor(self):
562 | fit = FitnessLinearRanking(self.fitness_list, .6)
563 | self.assertAlmostEqual( 0.6, fit._worstfactor)
564 | fit.set_worstfactor(.7)
565 | self.assertAlmostEqual(.7, fit._worstfactor)
566 |
567 | self.assertRaises(ValueError, fit.set_worstfactor, -.1)
568 | self.assertRaises(ValueError, fit.set_worstfactor, 2.1)
569 |
570 | def test_select(self):
571 |
572 | fit = FitnessLinearRanking(self.fitness_list, .6)
573 | count = len([i for i in fit.select()])
574 | self.assertEqual(3, count)
575 |
576 | def test_linear_ranking(self):
577 | pass
578 | fit = FitnessLinearRanking(self.fitness_list, .6)
579 |
580 | prob_list = fit._linear_ranking(3, .6)
581 | self.assertAlmostEqual(0.199999999, prob_list[0])
582 | self.assertAlmostEqual(0.333333333, prob_list[1])
583 | self.assertAlmostEqual(0.466666666, prob_list[2])
584 |
585 | self.assertLessEqual(prob_list[0], prob_list[1])
586 | self.assertLessEqual(prob_list[1], prob_list[2])
587 |
588 | self.assertAlmostEqual(1.0, sum(prob_list))
589 |
590 | prob_list = fit._linear_ranking(3, .2)
591 | self.assertAlmostEqual(0.06666667, prob_list[0])
592 | self.assertAlmostEqual(0.33333333, prob_list[1])
593 | self.assertAlmostEqual(0.6, prob_list[2])
594 |
595 | self.assertLessEqual(prob_list[0], prob_list[1])
596 | self.assertLessEqual(prob_list[1], prob_list[2])
597 |
598 | self.assertAlmostEqual(1.0, sum(prob_list))
599 |
600 | class TestFitnessTruncationRanking(unittest.TestCase):
601 |
602 | def setUp(self):
603 | self.fitness_list = FitnessList(MAX)
604 | self.fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
605 |
606 | def test_classinit_(self):
607 |
608 | fit = FitnessTruncationRanking(self.fitness_list, .5)
609 | self.assertAlmostEqual(.5, fit._trunc_rate)
610 |
611 | def test_set_trunc_rate(self):
612 |
613 | fit = FitnessTruncationRanking(self.fitness_list, .5)
614 | self.assertRaises(ValueError, fit.set_trunc_rate, 5)
615 | self.assertRaises(ValueError, fit.set_trunc_rate, 5.0)
616 | self.assertRaises(ValueError, fit.set_trunc_rate, -5)
617 | self.assertRaises(ValueError, fit.set_trunc_rate, -5.0)
618 | self.assertRaises(ValueError, fit.set_trunc_rate, 1.0)
619 |
620 | fit.set_trunc_rate(.4)
621 | self.assertAlmostEqual(.4, fit._trunc_rate)
622 |
623 | def test_calc_prob(self):
624 | fit = FitnessTruncationRanking(self.fitness_list, .5)
625 | self.assertAlmostEqual(.125, fit._calc_prob(10, 2))
626 |
627 | def test_select(self):
628 |
629 | fit = FitnessTruncationRanking(self.fitness_list, .5)
630 | count = len([i for i in fit.select()])
631 | self.assertEqual(3, count)
632 |
633 |
634 | class TestReplacement(unittest.TestCase):
635 |
636 | def setUp(self):
637 | self.fitness_list = FitnessList(MAX)
638 | self.fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
639 | self.repl = Replacement(self.fitness_list)
640 |
641 | def test_classinit_(self):
642 | self.assertEqual(0, self.repl._replacement_count)
643 |
644 |
645 | class TestReplacementDeleteWorst(unittest.TestCase):
646 |
647 | def setUp(self):
648 | self.fitness_list = FitnessList(MAX)
649 | self.fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
650 |
651 | self.repl = ReplacementDeleteWorst(self.fitness_list,
652 | replacement_count=1)
653 |
654 | def test_classinit_(self):
655 |
656 | self.assertEqual(1, self.repl._replacement_count)
657 | self.assertEqual(MIN, self.repl._selection_type)
658 |
659 | def test_set_replacement_count(self):
660 | self.repl.set_replacement_count(2)
661 | self.assertEqual(2, self.repl._replacement_count)
662 |
663 | self.repl.set_replacement_count(3)
664 | self.assertEqual(3, self.repl._replacement_count)
665 |
666 | self.assertRaises(ValueError, self.repl.set_replacement_count, 4)
667 |
668 | def test_select(self):
669 |
670 | # attempting to maximize remove worst 3
671 | self.repl.set_replacement_count(3)
672 | values = [i for i in self.repl.select()]
673 |
674 | self.assertLessEqual(self.repl._fitness_list[values[0]][0],
675 | self.repl._fitness_list[values[1]][0],)
676 | self.assertLessEqual(self.repl._fitness_list[values[1]][0],
677 | self.repl._fitness_list[values[2]][0],)
678 |
679 | # attempting to minimize remove worst 3
680 | self.fitness_list = FitnessList(MIN)
681 | values = [i for i in self.repl.select()]
682 |
683 | self.assertGreaterEqual(self.repl._fitness_list[values[0]][0],
684 | self.repl._fitness_list[values[1]][0],)
685 | self.assertGreaterEqual(self.repl._fitness_list[values[1]][0],
686 | self.repl._fitness_list[values[2]][0],)
687 |
688 | class TestReplacementTournament(unittest.TestCase):
689 |
690 | def setUp(self):
691 | self.fitness_list = FitnessList(MAX)
692 | self.fitness_list.extend([[1.5, 0], [2.5, 1], [12.0, 2]])
693 |
694 | self.repl = ReplacementTournament(self.fitness_list,
695 | tournament_size=1)
696 |
697 | def test_classinit_(self):
698 |
699 | self.assertEqual(1, self.repl._tournament_size)
700 |
701 | if __name__ == '__main__':
702 | unittest.main()
703 |
--------------------------------------------------------------------------------
/pyneurgen/fitness.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | Copyright (C) 2012 Don Smiley ds@sidorof.com
4 |
5 | This program is free software: you can redistribute it and/or modify
6 | it under the terms of the GNU General Public License as published by
7 | the Free Software Foundation, either version 3 of the License, or
8 | (at your option) any later version.
9 |
10 | This program is distributed in the hope that it will be useful,
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | GNU General Public License for more details.
14 |
15 | You should have received a copy of the GNU General Public License
16 | along with this program. If not, see .
17 |
18 | See the LICENSE file included in this archive
19 |
20 |
21 |
22 | This module implements the fitness components for grammatical evolution.
23 |
24 | """
25 |
26 | from random import random, randint
27 | import math
28 |
29 | MAX = 'max'
30 | MIN = 'min'
31 | CENTER = 'center'
32 | FITNESS_TYPES = [MAX, MIN, CENTER]
33 |
34 | SCALING_LINEAR = 'linear'
35 | SCALING_TRUNC = 'truncation'
36 | SCALING_EXPONENTIAL = 'exponential'
37 | SCALING_LOG = 'logarithmic'
38 |
39 | SCALING_TYPES = [SCALING_LINEAR, SCALING_TRUNC, SCALING_EXPONENTIAL,
40 | SCALING_LOG]
41 |
42 |
43 | class FitnessList(list):
44 | """
45 | This class maintains a list of fitness values per generation. It is a
46 | subclassed list to maintain information regarding whether fitness values
47 | should be maximized, minimized or centered around zero. By holding that
48 | information, when the fitness list is given to a fitness evaluation or
49 | replacement object, it can configure itself automatically to conform to the
50 | appropropriate characteristics for the class.
51 |
52 | """
53 |
54 | def __init__(self, fitness_type, target_value=0.0):
55 | """
56 | This function initializes and accepts the fitness type. Optionally it
57 | accepts the target value. The target value is the value where
58 | execution of the evolutionary process halts upon attaining the goal.
59 |
60 | """
61 |
62 | list.__init__(self)
63 | self._fitness_type = None
64 | self._target_value = target_value
65 | self.set_fitness_type(fitness_type)
66 | self.set_target_value(target_value)
67 |
68 | def set_fitness_type(self, fitness_type):
69 | """
70 | This function sets the fitness type.
71 |
72 | Accepted fitness types are 'min', 'max', or 'center'.
73 |
74 | """
75 |
76 | if fitness_type not in FITNESS_TYPES:
77 | raise ValueError("""
78 | Fitness type must be either min, max, or
79 | center, not %s""" % (fitness_type))
80 |
81 | self._fitness_type = fitness_type
82 |
83 | def get_fitness_type(self):
84 | """
85 | This function returns the fitness type, such as 'min',
86 | 'max', or 'center'.
87 |
88 | """
89 |
90 | return self._fitness_type
91 |
92 | def set_target_value(self, target_value):
93 | """
94 | This function sets the target value.
95 |
96 | """
97 |
98 | if not isinstance(target_value, float):
99 | raise ValueError("The target value must be a float")
100 | self._target_value = target_value
101 |
102 | def get_target_value(self):
103 | """
104 | This function returns the target value.
105 |
106 | """
107 |
108 | return self._target_value
109 |
110 | def min_value(self):
111 | """
112 | This function returns the minimum value in the list.
113 |
114 | """
115 |
116 | values = [value for value in self]
117 | return min(values)[0]
118 |
119 | def max_value(self):
120 | """
121 | This function returns the maximum value in the list.
122 |
123 | """
124 |
125 | values = [value for value in self]
126 | return max(values)[0]
127 |
128 | def best_value(self):
129 | """
130 | This function returns the best value in the list on the basis of the
131 | objective of the fitness list. For example, when trying to maximize a
132 | fitness value, it would return the maximum value.
133 |
134 | """
135 |
136 | if self._fitness_type == MIN:
137 | return self.min_value()
138 | elif self._fitness_type == MAX:
139 | return self.max_value()
140 | elif self._fitness_type == CENTER:
141 | sortlist = self.sorted()
142 | return sortlist[0][0]
143 |
144 | def worst_value(self):
145 | """
146 | This function returns the worst value in the list on the basis of the
147 | objective of the fitness list. For example, when trying to maximize a
148 | fitness value, it would return the minimum value.
149 |
150 | """
151 |
152 | if self._fitness_type == MIN:
153 | return self.max_value()
154 | elif self._fitness_type == MAX:
155 | return self.min_value()
156 | elif self._fitness_type == CENTER:
157 | sortlist = self.sorted()
158 | return sortlist[-1][0]
159 |
160 | def min_member(self):
161 | """
162 | This function returns the member with the minimum value.
163 |
164 | """
165 |
166 | if self._fitness_type == MIN:
167 | return self.sorted()[0][1]
168 | elif self._fitness_type == MAX:
169 | return self.sorted()[-1][1]
170 | elif self._fitness_type == CENTER:
171 | return self.sorted()[0][1]
172 |
173 | def max_member(self):
174 | """
175 | This function returns the member with the maximum value.
176 |
177 | """
178 |
179 | if self._fitness_type == MIN:
180 | return self.sorted()[-1][1]
181 | elif self._fitness_type == MAX:
182 | return self.sorted()[0][1]
183 | elif self._fitness_type == CENTER:
184 | return self.sorted()[-1][1]
185 |
186 | def best_member(self):
187 | """
188 | This function returns the member with the best value based upon the
189 | criteria of the fitness type.
190 |
191 | """
192 |
193 | if self._fitness_type == MIN:
194 | return self.min_member()
195 | elif self._fitness_type == MAX:
196 | return self.max_member()
197 | elif self._fitness_type == CENTER:
198 | return self.min_member()
199 |
200 | def worst_member(self):
201 | """
202 | This function returns the member with the worst value based upon the
203 | criteria of the fitness type.
204 |
205 | """
206 |
207 | if self._fitness_type == MIN:
208 | return self.max_member()
209 | elif self._fitness_type == MAX:
210 | return self.min_member()
211 | elif self._fitness_type == CENTER:
212 | return self.max_member()
213 |
214 | def mean(self):
215 | """
216 | This function returns the mean fitness value.
217 |
218 | """
219 |
220 | total = 0.0
221 | for item in self:
222 | total += item[0]
223 | return total / float(len(self))
224 |
225 | def median(self):
226 | """
227 | This function returns the median fitness value.
228 |
229 | """
230 |
231 | sort_list = self.sorted()
232 | length = len(self)
233 | half = int(length / 2)
234 | if half - length % 2 == 0:
235 | return (sort_list[half - 1][0] + sort_list[half][0]) / 2.0
236 | else:
237 | return sort_list[half][0]
238 |
239 | def stddev(self):
240 | """
241 | This function returns the standard deviation of fitness values.
242 |
243 | """
244 |
245 | total = 0.0
246 | mean = self.mean()
247 | for item in self:
248 | total += pow(item[0] - mean, 2.0)
249 | return pow(total / (float(len(self)) - 1.0), .5)
250 |
251 | def sorted(self):
252 | """
253 | This function returns the fitness list sorted in fitness order
254 | according to the fitness type.
255 |
256 | """
257 |
258 | if self._fitness_type == MIN:
259 | new_list = [i for i in self]
260 | new_list.sort()
261 | elif self._fitness_type == MAX:
262 | new_list = [i for i in self]
263 | new_list.sort(reverse=True)
264 | elif self._fitness_type == CENTER:
265 | new_list = [[abs(i[0] - self._target_value), i[1]] for i in self]
266 | new_list.sort()
267 | new_list = [[self[i[1]][0], i[1]] for i in new_list]
268 | return new_list
269 |
270 |
271 | class Selection(object):
272 | """
273 | This is the base class for methods appropriate for assessing the fitness
274 | landscape. _selection_type refers to the technique associated with the
275 | selection method that is subclassed.
276 |
277 | The selection_list used in this class is an ordinary list of values, not a
278 | fitness list, the fitness list being a list of tuples.
279 |
280 | It is the responsibility of the derived classes to adjust the selection
281 | list values as needed.
282 |
283 | """
284 |
285 | def __init__(self, selection_list=None):
286 | self._selection_type = MAX
287 | if selection_list:
288 | self.set_selection_list(selection_list)
289 | else:
290 | self._selection_list = None
291 |
292 | def set_selection_list(self, selection_list):
293 | """
294 | This function accepts the selection list. This is the list of fitness
295 | values that may have been transformed for the selection process.
296 |
297 | """
298 |
299 | if not isinstance(selection_list, list):
300 | raise ValueError("Selection list is not a list")
301 | if isinstance(selection_list, FitnessList):
302 | raise ValueError("Selection list should not be a Fitness List")
303 | self._selection_list = selection_list
304 |
305 | def set_selection_type(self, selection_type):
306 | """
307 | This function accepts the selection type, which must be either 'min'
308 | or 'max'. The selection type is used by the subclass to know how to
309 | manipulate the list to achieve the fitness goals.
310 |
311 | """
312 |
313 | if selection_type not in (MIN, MAX):
314 | raise ValueError("""
315 | The selection type must be either '%s' or '%s', not '%s'.
316 | """ % (MIN, MAX, selection_type))
317 | self._selection_type = selection_type
318 |
319 | @staticmethod
320 | def _roulette_wheel(scale_list):
321 | """
322 | This function receives a list that has been scaled so that the sum
323 | total of the list is 1.0. This enables a fair use of probability.
324 | This is a generator that yields a random selection from the list.
325 |
326 | """
327 |
328 | if round(sum(scale_list), 10) != 1.0:
329 | raise ValueError(
330 | "The scaled list received does not total 1.0: %s" % (
331 | sum(scale_list)))
332 | cumu = [0.0]
333 | length = len(scale_list)
334 | for i in range(length):
335 | cumu.append(scale_list[i] + cumu[-1])
336 |
337 | # Rewrite for binary search sometime
338 | for i in range(length):
339 | rand_val = random()
340 | position = 0
341 | while position < length:
342 | if cumu[position + 1] > rand_val:
343 | yield position
344 | break
345 | position += 1
346 |
347 | def _make_sort_list(self):
348 | """
349 | This function sorts the _selection list making it similar to the
350 | original fitness list, except the the fitness values have been
351 | adjusted.
352 |
353 | """
354 |
355 | length = len(self._selection_list)
356 | sort_list = []
357 | for i in range(length):
358 | sort_list.append([self._selection_list[i], i])
359 | return sort_list
360 |
361 |
362 | class Tournament(Selection):
363 | """
364 | Selects random tuples and returns either the minimum or maximum. To speed
365 | up the rate of selection, use larger tournament sizes, and smaller to slow
366 | down the process.
367 |
368 | """
369 |
370 | def __init__(self, selection_list=None, tournament_size=None):
371 | Selection.__init__(self, selection_list)
372 | self._tournament_size = None
373 | self.set_tournament_size(tournament_size)
374 | self._minmax = None
375 |
376 | def set_tournament_size(self, tournament_size):
377 | """
378 | This function accepts the tournament size, which is the number of
379 | members that will be selected per tournament.
380 |
381 | """
382 |
383 | if tournament_size is not None:
384 | if not isinstance(tournament_size, int):
385 | raise ValueError("Tournament size, %s must be an int." % (
386 | tournament_size))
387 | if self._selection_list:
388 | if tournament_size > len(self._selection_list):
389 | raise ValueError("""The tournament size, %s, cannot
390 | be larger than the population, %s.""" % (
391 | tournament_size, len(self._selection_list)))
392 | self._tournament_size = tournament_size
393 |
394 | def _set_minmax(self, minmax):
395 | """
396 | This function sets whether the selection should minimize or maximize.
397 | """
398 |
399 | if minmax not in [MIN, MAX]:
400 | raise ValueError("Must be either '%s' or '%s'" % (MIN, MAX))
401 | self._minmax = minmax
402 |
403 | def select(self):
404 | """
405 | The select function provides all the members based upon the class
406 | algorithm.
407 |
408 | """
409 | population_size = len(self._selection_list)
410 | position = 0
411 | while position < population_size:
412 |
413 | choice = 0
414 |
415 | # Grabs members until reaching the tournament size
416 | tourn_list = FitnessList(self._minmax)
417 | while choice < self._tournament_size:
418 | rand_position = randint(0, population_size - 1)
419 |
420 | # Lookup the fitness value
421 | tourn_list.append([self._selection_list[rand_position],
422 | rand_position])
423 | choice += 1
424 | yield tourn_list.best_member()
425 | del tourn_list
426 | position += 1
427 |
428 |
429 | class Fitness(Selection):
430 | """
431 | This class is the prototype for the fitness functions. The primary job of
432 | a fitness function is to deal with the list of [fitness value, member no]
433 | pairs that are generated as a result of a run.
434 |
435 | It also scales and configures the fitness values to be consistent with the
436 | fitness functions characteristics. For example, if the fitness strategy is
437 | to minimize values and the Fitness selection strategy maximizes, then the
438 | fitness values will be converted to -value. If the fitness strategy
439 | centers on a value, such as zero, then the values will be converted to the
440 | absolute distance from that value.
441 |
442 | """
443 |
444 | def __init__(self, fitness_list):
445 | Selection.__init__(self)
446 | self._fitness_list = None
447 | self.set_fitness_list(fitness_list)
448 |
449 | def set_fitness_list(self, fitness_list):
450 | """
451 | This function accepts the fitness list. It is the list of fitness
452 | values by member number for a population.
453 |
454 | """
455 |
456 | if not isinstance(fitness_list, FitnessList):
457 | raise ValueError("Fitness_list is not a list")
458 |
459 | self._fitness_list = fitness_list
460 |
461 | if fitness_list.get_fitness_type() == CENTER:
462 | # Convert to absolute distance from the target_value
463 | self._selection_list = []
464 | length = len(fitness_list)
465 | target_value = fitness_list.get_target_value()
466 | for i in range(length):
467 | self._selection_list.append(
468 | abs(fitness_list[i][0] - target_value))
469 | else:
470 | self._selection_list = [item[0] for item in fitness_list]
471 |
472 | @staticmethod
473 | def _invert(value):
474 | """
475 | This method returns the reciprocal of the value.
476 |
477 | """
478 | if value == 0.0:
479 | return 0.0
480 | else:
481 | return 1.0 / value
482 |
483 | def _scale_list(self):
484 | """
485 | This function scales the list to convert for example min to max where
486 | the selection type warrants it.
487 |
488 | """
489 | seltype = self._selection_type
490 | fit_type = self._fitness_list.get_fitness_type()
491 |
492 | if seltype == MAX and \
493 | fit_type == MIN:
494 |
495 | inverse = True
496 |
497 | elif seltype == MIN and \
498 | fit_type == MAX:
499 |
500 | inverse = True
501 |
502 | elif seltype == MAX and \
503 | fit_type == CENTER:
504 |
505 | inverse = True
506 |
507 | else:
508 | inverse = False
509 |
510 | if inverse:
511 | self._selection_list = [self._invert(value)
512 | for value in self._selection_list]
513 |
514 | @staticmethod
515 | def _make_prob_list(selection_list):
516 | """
517 | This function aids in calculating probability lists. It
518 | scales the values to add up to 1.0.
519 |
520 | """
521 |
522 | total = sum(selection_list)
523 | if total != 0.0:
524 | return [value / total for value in selection_list]
525 | else:
526 | return selection_list
527 |
528 |
529 | class FitnessProportionate(Fitness):
530 | """
531 | The probability of selection is based upon the fitness value of the
532 | individual relative to the rest of the population.
533 |
534 | Pr(G_i) = f(G_i)/ sum(1 to pop) f(G_i)
535 |
536 | This is the total probability. Roulette wheel selects individuals
537 | randomly, highest fitness more likely to be included.
538 |
539 | Note that inherent in this approach is the assumption that the fitness
540 | value should be as large as possible. Consequently, if the fitness type
541 | is MIN or CENTER, a modification is made. In the case of MIN, inverse of
542 | the fitness values are used. For the CENTER fitness type, the inverse of
543 | the absolute difference between the target value and the fitness values are
544 | used.
545 |
546 | The available scaling methods are:
547 |
548 | * SCALING_LINEAR = 'linear'
549 | * SCALING_TRUNC = 'truncation'
550 | * SCALING_EXPONENTIAL = 'exponential'
551 | * SCALING_LOG = 'logarithmic'
552 |
553 | Truncation can be selected via
554 | ex: self.select(param=). Upon truncation,
555 | the fitness values are linearly scaled.
556 |
557 | The default value for exponential is 2.0, but that is adjustable via:
558 | ex: self.select(param=1.5)
559 |
560 | Also, note that because a probability map is derived from the scaled
561 | numbers, the fitness values must all be positive or all negative for this
562 | approach to be valid. In addition, the logarithmic scaling also requires
563 | that the fitness values must be 1 or above, prior to scaling. This is
564 | because the log below 1 is a negative number, which then interferes with
565 | the calculation of the probabilities.
566 |
567 | """
568 |
569 | def __init__(self, fitness_list, scaling_type):
570 | Fitness.__init__(self, fitness_list)
571 | self._scaling_type = None
572 | self.set_scaling_type(scaling_type)
573 | self._check_minmax()
574 |
575 | def set_scaling_type(self, scaling_type):
576 | """
577 | This function accepts the type of scaling that will be performed on
578 | the data in preparation of building a probability list for the roulette
579 | selection.
580 |
581 | """
582 | if not scaling_type in SCALING_TYPES:
583 | raise ValueError(
584 | "Invalid scaling type: %s, valid scaling types are %s" % (
585 | scaling_type, SCALING_TYPES))
586 | self._scaling_type = scaling_type
587 |
588 | def _check_minmax(self):
589 | """
590 | If selection of fitness is with proportions, then they must all be the
591 | same sign. Also, negative numbers do not mix with logs.
592 |
593 | """
594 |
595 | if min(self._selection_list) < 0.0 < max(self._selection_list):
596 | raise ValueError("Inconsistent signs in selection list")
597 |
598 | if min(self._selection_list) < 0.0 and \
599 | self._scaling_type == 'logarithmic':
600 | raise ValueError("Negative numbers cannot be used with logs.")
601 |
602 | def select(self, param=None):
603 | """
604 | The select function provides all the members based upon the class
605 | algorithm.
606 |
607 | """
608 |
609 | self._scale_list()
610 | # Now apply to the roulette wheel
611 | return self._roulette_wheel(self._apply_prop_scaling(param))
612 |
613 | def _apply_prop_scaling(self, param):
614 | """
615 | This function scales the values according to the scale type.
616 | """
617 |
618 | if not self._selection_list:
619 | raise ValueError("No fitness list to scale")
620 |
621 | if self._scaling_type == SCALING_LINEAR:
622 | self._selection_list = self._make_prob_list(self._selection_list)
623 |
624 | elif self._scaling_type == SCALING_EXPONENTIAL:
625 | if param is None:
626 | exponent = 2.0
627 | else:
628 | exponent = param
629 |
630 | self._selection_list = [pow(item, exponent)
631 | for item in self._selection_list]
632 | self._selection_list = self._make_prob_list(self._selection_list)
633 |
634 | elif self._scaling_type == SCALING_LOG:
635 | self._selection_list = [math.log(item)
636 | for item in self._selection_list]
637 | self._selection_list = self._make_prob_list(self._selection_list)
638 |
639 | elif self._scaling_type == SCALING_TRUNC:
640 | if param:
641 | trunc = param
642 | else:
643 | raise ValueError("""
644 | Truncation scaling requires a truncation value""")
645 | length = len(self._selection_list)
646 | for i in range(length):
647 | if self._selection_list[i] < trunc:
648 | self._selection_list[i] = 0.0
649 |
650 | self._selection_list = self._make_prob_list(self._selection_list)
651 | else:
652 | # theoretically the raise error would prevent this
653 | pass
654 |
655 | return self._selection_list
656 |
657 |
658 | class FitnessTournament(Fitness, Tournament):
659 | """
660 | This class selects the fitness based on a tournament. It defaults to a
661 | tournament size of 2, but it can be anything up to the size of the
662 | population.
663 |
664 | """
665 |
666 | def __init__(self, fitness_list, tournament_size=2):
667 |
668 | Tournament.__init__(self)
669 | Fitness.__init__(self, fitness_list)
670 | if self._fitness_list.get_fitness_type() == MAX:
671 | minmax = MAX
672 | else:
673 | minmax = MIN
674 | self._set_minmax(minmax)
675 | self.set_tournament_size(tournament_size)
676 |
677 |
678 | class FitnessElites(Fitness):
679 | """
680 | This class selects the highest or lowest fitness depending upon what is
681 | desired. The fitness list is put into the selection list in this case so
682 | that once the sorting for rank takes place, the member numbers are still
683 | available.
684 |
685 | """
686 |
687 | def __init__(self, fitness_list, rate):
688 | Fitness.__init__(self, fitness_list)
689 | self._rate = None
690 | self.set_rate(rate)
691 | self.set_selection_type(MIN)
692 |
693 | def set_rate(self, rate):
694 | """
695 | This function accepts a value greater than 0 and less than or equal to
696 | 1.0. It is the percentage of members from a list sorted by best
697 | values.
698 |
699 | """
700 |
701 | if not (isinstance(rate, float) and (0.0 < rate <= 1.0)):
702 | raise ValueError(
703 | "The rate, %s, should between 0.0 and 1.0" % (
704 | rate))
705 | self._rate = rate
706 |
707 | def select(self):
708 | """
709 | The select function provides all the members based upon the class
710 | algorithm.
711 |
712 | """
713 |
714 | self._scale_list()
715 | sort_list = self._make_sort_list()
716 | sort_list.sort()
717 | elites = int(round(self._rate * float(len(sort_list))))
718 | for item in sort_list[:elites]:
719 | yield item[1]
720 |
721 |
722 | class FitnessLinearRanking(Fitness):
723 | """
724 | This class selects fitness on the basis of rank with other members. Only
725 | the position in the ranking matters rather than the fitness value. The
726 | probability curve is calculated on that basis. Then, roulette selection
727 | takes place.
728 |
729 | This uses the formula for the probability curve of:
730 | Probability = 1 / population * (worstfactor + (best -
731 | worstfactor) * (rank(Member) - 1) / (population - 1))
732 |
733 | where best = 2.0 - worstfactor
734 |
735 | the best individual produces up to twice the children as the average.
736 |
737 | To make sense of this:
738 | Suppose that the worst factor is .6 and therefor the best factor is
739 | 1.4. That will generate a probability list where the most fit members
740 | will be weighted more highly than less fit members.
741 |
742 | If the worst factor is 1.0 and therefore the best factor is 1.0 too,
743 | the slope of the probability line will flat.
744 |
745 | And finally, if the worst factor is 1.6 and the best factor is .4, the
746 | slope of the probaility line will cause the less fit members to have a
747 | higher probability of being selected than the more fit members.
748 |
749 | """
750 |
751 | def __init__(self, fitness_list, worstfactor):
752 | Fitness.__init__(self, fitness_list)
753 | self._worstfactor = None
754 | self.set_worstfactor(worstfactor)
755 |
756 | def set_worstfactor(self, worstfactor):
757 | """
758 | This function sets the worst factor. See the class description for
759 | more.
760 |
761 | """
762 | if not isinstance(worstfactor, float):
763 | raise ValueError(
764 | "Worstfactor must be a float value between 0 and 2.0.")
765 | if (worstfactor < 0) or (worstfactor > 2.0):
766 | raise ValueError(
767 | "Worstfactor must be a float value between 0 and 2.0.")
768 | self._worstfactor = worstfactor
769 |
770 | def select(self):
771 | """
772 | The select function provides all the members based upon the class
773 | algorithm.
774 |
775 | """
776 |
777 | self._scale_list()
778 |
779 | sort_list = self._make_sort_list()
780 |
781 | sort_list.sort()
782 | length = len(sort_list)
783 | prob_list = self._linear_ranking(
784 | length, self._worstfactor)
785 |
786 | # Now this list needs the probabilities combined with members
787 | select_list = [[sort_list[i][1], prob_list[i]]
788 | for i in range(length)]
789 |
790 | return self._roulette_wheel([item[1] for item in select_list])
791 |
792 | @staticmethod
793 | def _linear_ranking(count, worst):
794 | """
795 | This applies the best and worst factors and assigns the selection
796 | probability to each rank.
797 |
798 | This returns a list of those probabilities.
799 |
800 | """
801 |
802 | best = 2.0 - worst
803 | scale_list = []
804 | i = 1.0
805 | count = float(count)
806 | while i < count + 1.0:
807 | value = (worst + (best - worst) * (i - 1.0) / (count - 1.0))
808 | value /= count
809 | scale_list.append(value)
810 | i += 1.0
811 | return scale_list
812 |
813 |
814 | class FitnessTruncationRanking(Fitness):
815 | """
816 | This class selects fitness on the basis of rank with other members if
817 | above a certain rank. Once above that rank, any member can be selected
818 | with an equal probability. The truncation value is entered as a rate and
819 | converted to a ranking value. For example, if a population has 100 members
820 | and a truncation value of .2, the truncated ranking will be converted to a
821 | rank of 20.
822 |
823 | """
824 |
825 | def __init__(self, fitness_list, trunc_rate):
826 | Fitness.__init__(self, fitness_list)
827 | self._trunc_rate = None
828 | self.set_trunc_rate(trunc_rate)
829 | self.set_selection_type(MIN)
830 |
831 | def set_trunc_rate(self, trunc_rate):
832 | """
833 | This function sets the rate, between 0 and 1 that is a hurdle for
834 | selection.
835 |
836 | """
837 |
838 | if not isinstance(trunc_rate, float):
839 | raise ValueError(
840 | "Trunc_rate, %s, should between 0.0 and 1.0" % (trunc_rate))
841 | if (trunc_rate < 0) or (trunc_rate >= 1.0):
842 | raise ValueError(
843 | "Trunc_rate, %s, should between 0.0 and 1.0" % (trunc_rate))
844 | self._trunc_rate = trunc_rate
845 |
846 | def select(self):
847 | """
848 | The select function provides all the members based upon the class
849 | algorithm.
850 |
851 | """
852 |
853 | self._scale_list()
854 | sort_list = self._make_sort_list()
855 | sort_list.sort(reverse=True)
856 |
857 | length = len(sort_list)
858 | cutoff_rank = int(round(self._trunc_rate * length))
859 | prob_list = []
860 | prob = self._calc_prob(length, cutoff_rank)
861 |
862 | for i in range(length):
863 | member_no = sort_list[i][1]
864 | if i < cutoff_rank - 1:
865 | prob_list.append([member_no, prob])
866 | else:
867 | prob_list.append([member_no, 0.0])
868 |
869 | return self._roulette_wheel([item[1] for item in prob_list])
870 |
871 | @staticmethod
872 | def _calc_prob(length, cutoff_rank):
873 | """
874 | This function calculates the probability that a member will be selected
875 | if it falls within the cutoff rank.
876 | """
877 |
878 | return 1.0 / float(length - cutoff_rank)
879 |
880 |
881 | class Replacement(Fitness):
882 | """
883 | This is the base class for the classes that identify which members are to
884 | be replaced. It is basically the same as a fitness class, but attempts to
885 | identify the worst, not the best.
886 |
887 | """
888 |
889 | def __init__(self, fitness_list):
890 | Fitness.__init__(self, fitness_list)
891 | self._replacement_count = 0
892 |
893 |
894 | class ReplacementDeleteWorst(Replacement):
895 | """
896 | This class is the mirror image of FitnessElite. The worst members are
897 | returned.
898 |
899 | """
900 |
901 | def __init__(self, fitness_list, replacement_count):
902 | """
903 | This function initializes Replacement and then adds a replacement
904 | count for handling the number of members that will be replaced each
905 | time.
906 |
907 | """
908 |
909 | Replacement.__init__(self, fitness_list)
910 |
911 | self.set_replacement_count(replacement_count)
912 | self.set_selection_type(MIN)
913 |
914 | def set_replacement_count(self, replacement_count):
915 | """
916 | This function accepts the number of members to be replaced.
917 |
918 | """
919 | length = len(self._selection_list)
920 |
921 | if not isinstance(replacement_count, int):
922 | raise ValueError(
923 | "Replacement count, %s should an int between 0 and %s" % (
924 | replacement_count, length))
925 | if not (0 < replacement_count <= length):
926 | raise ValueError(
927 | "Replacement count, %s should between 0 and %s" % (
928 | replacement_count, length))
929 | self._replacement_count = replacement_count
930 |
931 | def select(self):
932 | """
933 | select is a generator that yields the members for
934 | replacement sorted from worst to best. It halts when the replacemnt
935 | count has been reached.
936 |
937 | """
938 | self._scale_list()
939 | sort_list = self._make_sort_list()
940 | sort_list.sort(reverse=True)
941 | for item in sort_list[:self._replacement_count]:
942 | yield item[1]
943 |
944 |
945 | class ReplacementTournament(Replacement, Tournament):
946 | """
947 | This class selects the fitness based on a tournament.
948 |
949 | """
950 |
951 | def __init__(self, fitness_list, tournament_size):
952 | """
953 | This class combines Replacement and Tournament. It inits Replacement
954 | first, because both have a select function and the Tournament select
955 | function is the one that matters.
956 |
957 | """
958 |
959 | Replacement.__init__(self, fitness_list)
960 | #Tournament.__init__(self)
961 | self.set_tournament_size(tournament_size)
962 | self.set_fitness_list(fitness_list)
963 | if self._fitness_list.get_fitness_type() == MAX:
964 | minmax = MIN
965 | else:
966 | minmax = MAX
967 | self._set_minmax(minmax)
968 | self._scale_list()
969 |
--------------------------------------------------------------------------------